Move architecture dependent files

Bug: v8:9247
Change-Id: I2f999ed3a8cc0931e5092f2ac6e709b8ff3f9e42
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1630678
Commit-Queue: Yang Guo <yangguo@chromium.org>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61896}
This commit is contained in:
Yang Guo 2019-05-28 15:17:52 +02:00 committed by Commit Bot
parent b60fd1fa91
commit f455f86d89
209 changed files with 8927 additions and 12104 deletions

282
BUILD.gn
View File

@ -2982,31 +2982,41 @@ v8_source_set("v8_base_without_compiler") {
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"src/codegen/ia32/assembler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32.cc",
"src/codegen/ia32/assembler-ia32.h",
"src/codegen/ia32/constants-ia32.h",
"src/codegen/ia32/cpu-ia32.cc",
"src/codegen/ia32/interface-descriptors-ia32.cc",
"src/codegen/ia32/macro-assembler-ia32.cc",
"src/codegen/ia32/macro-assembler-ia32.h",
"src/codegen/ia32/register-ia32.h",
"src/codegen/ia32/sse-instr.h",
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-codes-ia32.h",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
"src/ia32/constants-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
"src/ia32/disasm-ia32.cc",
"src/ia32/frame-constants-ia32.cc",
"src/ia32/frame-constants-ia32.h",
"src/ia32/interface-descriptors-ia32.cc",
"src/ia32/macro-assembler-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
"src/ia32/register-ia32.h",
"src/ia32/sse-instr.h",
"src/deoptimizer/ia32/deoptimizer-ia32.cc",
"src/diagnostics/ia32/disasm-ia32.cc",
"src/execution/ia32/frame-constants-ia32.cc",
"src/execution/ia32/frame-constants-ia32.h",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
"src/codegen/x64/assembler-x64-inl.h",
"src/codegen/x64/assembler-x64.cc",
"src/codegen/x64/assembler-x64.h",
"src/codegen/x64/constants-x64.h",
"src/codegen/x64/cpu-x64.cc",
"src/codegen/x64/interface-descriptors-x64.cc",
"src/codegen/x64/macro-assembler-x64.cc",
"src/codegen/x64/macro-assembler-x64.h",
"src/codegen/x64/register-x64.h",
"src/codegen/x64/sse-instr.h",
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
@ -3014,25 +3024,15 @@ v8_source_set("v8_base_without_compiler") {
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.h",
"src/debug/x64/debug-x64.cc",
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
"src/execution/x64/frame-constants-x64.cc",
"src/execution/x64/frame-constants-x64.h",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h",
"src/x64/constants-x64.h",
"src/x64/cpu-x64.cc",
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
"src/x64/eh-frame-x64.cc",
"src/x64/frame-constants-x64.cc",
"src/x64/frame-constants-x64.h",
"src/x64/interface-descriptors-x64.cc",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
"src/x64/register-x64.h",
"src/x64/sse-instr.h",
]
# iOS Xcode simulator builds run on an x64 target. iOS and macOS are both
@ -3055,23 +3055,16 @@ v8_source_set("v8_base_without_compiler") {
}
} else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
"src/arm/assembler-arm-inl.h",
"src/arm/assembler-arm.cc",
"src/arm/assembler-arm.h",
"src/arm/constants-arm.cc",
"src/arm/constants-arm.h",
"src/arm/cpu-arm.cc",
"src/arm/deoptimizer-arm.cc",
"src/arm/disasm-arm.cc",
"src/arm/eh-frame-arm.cc",
"src/arm/frame-constants-arm.cc",
"src/arm/frame-constants-arm.h",
"src/arm/interface-descriptors-arm.cc",
"src/arm/macro-assembler-arm.cc",
"src/arm/macro-assembler-arm.h",
"src/arm/register-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
"src/codegen/arm/assembler-arm-inl.h",
"src/codegen/arm/assembler-arm.cc",
"src/codegen/arm/assembler-arm.h",
"src/codegen/arm/constants-arm.cc",
"src/codegen/arm/constants-arm.h",
"src/codegen/arm/cpu-arm.cc",
"src/codegen/arm/interface-descriptors-arm.cc",
"src/codegen/arm/macro-assembler-arm.cc",
"src/codegen/arm/macro-assembler-arm.h",
"src/codegen/arm/register-arm.h",
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
@ -3079,42 +3072,40 @@ v8_source_set("v8_base_without_compiler") {
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.h",
"src/debug/arm/debug-arm.cc",
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
"src/execution/arm/frame-constants-arm.cc",
"src/execution/arm/frame-constants-arm.h",
"src/execution/arm/simulator-arm.cc",
"src/execution/arm/simulator-arm.h",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
"src/arm64/assembler-arm64-inl.h",
"src/arm64/assembler-arm64.cc",
"src/arm64/assembler-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
"src/arm64/decoder-arm64-inl.h",
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
"src/arm64/deoptimizer-arm64.cc",
"src/arm64/disasm-arm64.cc",
"src/arm64/disasm-arm64.h",
"src/arm64/eh-frame-arm64.cc",
"src/arm64/frame-constants-arm64.cc",
"src/arm64/frame-constants-arm64.h",
"src/arm64/instructions-arm64-constants.cc",
"src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc",
"src/arm64/instrument-arm64.h",
"src/arm64/interface-descriptors-arm64.cc",
"src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
"src/arm64/register-arm64.cc",
"src/arm64/register-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/simulator-logic-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
"src/codegen/arm64/assembler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64.cc",
"src/codegen/arm64/assembler-arm64.h",
"src/codegen/arm64/constants-arm64.h",
"src/codegen/arm64/cpu-arm64.cc",
"src/codegen/arm64/decoder-arm64-inl.h",
"src/codegen/arm64/decoder-arm64.cc",
"src/codegen/arm64/decoder-arm64.h",
"src/codegen/arm64/instructions-arm64-constants.cc",
"src/codegen/arm64/instructions-arm64.cc",
"src/codegen/arm64/instructions-arm64.h",
"src/codegen/arm64/instrument-arm64.cc",
"src/codegen/arm64/instrument-arm64.h",
"src/codegen/arm64/interface-descriptors-arm64.cc",
"src/codegen/arm64/macro-assembler-arm64-inl.h",
"src/codegen/arm64/macro-assembler-arm64.cc",
"src/codegen/arm64/macro-assembler-arm64.h",
"src/codegen/arm64/register-arm64.cc",
"src/codegen/arm64/register-arm64.h",
"src/codegen/arm64/utils-arm64.cc",
"src/codegen/arm64/utils-arm64.h",
"src/compiler/backend/arm64/code-generator-arm64.cc",
"src/compiler/backend/arm64/instruction-codes-arm64.h",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
@ -3122,6 +3113,15 @@ v8_source_set("v8_base_without_compiler") {
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
"src/debug/arm64/debug-arm64.cc",
"src/deoptimizer/arm64/deoptimizer-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.h",
"src/diagnostics/arm64/eh-frame-arm64.cc",
"src/execution/arm64/frame-constants-arm64.cc",
"src/execution/arm64/frame-constants-arm64.h",
"src/execution/arm64/simulator-arm64.cc",
"src/execution/arm64/simulator-arm64.h",
"src/execution/arm64/simulator-logic-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
@ -3129,114 +3129,114 @@ v8_source_set("v8_base_without_compiler") {
jumbo_excluded_sources += [
# TODO(mostynb@vewd.com): fix this code so it doesn't need
# to be excluded, see the comments inside.
"src/arm64/instructions-arm64-constants.cc",
"src/codegen/arm64/instructions-arm64-constants.cc",
]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/codegen/mips/assembler-mips-inl.h",
"src/codegen/mips/assembler-mips.cc",
"src/codegen/mips/assembler-mips.h",
"src/codegen/mips/constants-mips.cc",
"src/codegen/mips/constants-mips.h",
"src/codegen/mips/cpu-mips.cc",
"src/codegen/mips/interface-descriptors-mips.cc",
"src/codegen/mips/macro-assembler-mips.cc",
"src/codegen/mips/macro-assembler-mips.h",
"src/codegen/mips/register-mips.h",
"src/compiler/backend/mips/code-generator-mips.cc",
"src/compiler/backend/mips/instruction-codes-mips.h",
"src/compiler/backend/mips/instruction-scheduler-mips.cc",
"src/compiler/backend/mips/instruction-selector-mips.cc",
"src/debug/mips/debug-mips.cc",
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
"src/mips/deoptimizer-mips.cc",
"src/mips/disasm-mips.cc",
"src/mips/frame-constants-mips.cc",
"src/mips/frame-constants-mips.h",
"src/mips/interface-descriptors-mips.cc",
"src/mips/macro-assembler-mips.cc",
"src/mips/macro-assembler-mips.h",
"src/mips/register-mips.h",
"src/mips/simulator-mips.cc",
"src/mips/simulator-mips.h",
"src/deoptimizer/mips/deoptimizer-mips.cc",
"src/diagnostics/mips/disasm-mips.cc",
"src/execution/mips/frame-constants-mips.cc",
"src/execution/mips/frame-constants-mips.h",
"src/execution/mips/simulator-mips.cc",
"src/execution/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
"src/codegen/mips64/assembler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64.cc",
"src/codegen/mips64/assembler-mips64.h",
"src/codegen/mips64/constants-mips64.cc",
"src/codegen/mips64/constants-mips64.h",
"src/codegen/mips64/cpu-mips64.cc",
"src/codegen/mips64/interface-descriptors-mips64.cc",
"src/codegen/mips64/macro-assembler-mips64.cc",
"src/codegen/mips64/macro-assembler-mips64.h",
"src/codegen/mips64/register-mips64.h",
"src/compiler/backend/mips64/code-generator-mips64.cc",
"src/compiler/backend/mips64/instruction-codes-mips64.h",
"src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
"src/compiler/backend/mips64/instruction-selector-mips64.cc",
"src/debug/mips64/debug-mips64.cc",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
"src/mips64/deoptimizer-mips64.cc",
"src/mips64/disasm-mips64.cc",
"src/mips64/frame-constants-mips64.cc",
"src/mips64/frame-constants-mips64.h",
"src/mips64/interface-descriptors-mips64.cc",
"src/mips64/macro-assembler-mips64.cc",
"src/mips64/macro-assembler-mips64.h",
"src/mips64/register-mips64.h",
"src/mips64/simulator-mips64.cc",
"src/mips64/simulator-mips64.h",
"src/deoptimizer/mips64/deoptimizer-mips64.cc",
"src/diagnostics/mips64/disasm-mips64.cc",
"src/execution/mips64/frame-constants-mips64.cc",
"src/execution/mips64/frame-constants-mips64.h",
"src/execution/mips64/simulator-mips64.cc",
"src/execution/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
"src/codegen/ppc/assembler-ppc.h",
"src/codegen/ppc/constants-ppc.cc",
"src/codegen/ppc/constants-ppc.h",
"src/codegen/ppc/cpu-ppc.cc",
"src/codegen/ppc/interface-descriptors-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/debug/ppc/debug-ppc.cc",
"src/ppc/assembler-ppc-inl.h",
"src/ppc/assembler-ppc.cc",
"src/ppc/assembler-ppc.h",
"src/ppc/constants-ppc.cc",
"src/ppc/constants-ppc.h",
"src/ppc/cpu-ppc.cc",
"src/ppc/deoptimizer-ppc.cc",
"src/ppc/disasm-ppc.cc",
"src/ppc/frame-constants-ppc.cc",
"src/ppc/frame-constants-ppc.h",
"src/ppc/interface-descriptors-ppc.cc",
"src/ppc/macro-assembler-ppc.cc",
"src/ppc/macro-assembler-ppc.h",
"src/ppc/register-ppc.h",
"src/ppc/simulator-ppc.cc",
"src/ppc/simulator-ppc.h",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/execution/ppc/frame-constants-ppc.cc",
"src/execution/ppc/frame-constants-ppc.h",
"src/execution/ppc/simulator-ppc.cc",
"src/execution/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
"src/codegen/s390/assembler-s390-inl.h",
"src/codegen/s390/assembler-s390.cc",
"src/codegen/s390/assembler-s390.h",
"src/codegen/s390/constants-s390.cc",
"src/codegen/s390/constants-s390.h",
"src/codegen/s390/cpu-s390.cc",
"src/codegen/s390/interface-descriptors-s390.cc",
"src/codegen/s390/macro-assembler-s390.cc",
"src/codegen/s390/macro-assembler-s390.h",
"src/codegen/s390/register-s390.h",
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-codes-s390.h",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/debug/s390/debug-s390.cc",
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/execution/s390/frame-constants-s390.cc",
"src/execution/s390/frame-constants-s390.h",
"src/execution/s390/simulator-s390.cc",
"src/execution/s390/simulator-s390.h",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/s390/assembler-s390-inl.h",
"src/s390/assembler-s390.cc",
"src/s390/assembler-s390.h",
"src/s390/constants-s390.cc",
"src/s390/constants-s390.h",
"src/s390/cpu-s390.cc",
"src/s390/deoptimizer-s390.cc",
"src/s390/disasm-s390.cc",
"src/s390/frame-constants-s390.cc",
"src/s390/frame-constants-s390.h",
"src/s390/interface-descriptors-s390.cc",
"src/s390/macro-assembler-s390.cc",
"src/s390/macro-assembler-s390.h",
"src/s390/register-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
}

5
OWNERS
View File

@ -22,5 +22,10 @@ per-file codereview.settings=file://INFRA_OWNERS
per-file AUTHORS=file://COMMON_OWNERS
per-file WATCHLIST=file://COMMON_OWNERS
per-file *-mips*=file://MIPS_OWNERS
per-file *-mips64*=file://MIPS_OWNERS
per-file *-ppc*=file://PPC_OWNERS
per-file *-s390*=file://S390_OWNERS
# TEAM: v8-dev@googlegroups.com
# COMPONENT: Blink>JavaScript

View File

@ -1,120 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/utils-arm64.h"
#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
class CacheLineSizes {
public:
CacheLineSizes() {
#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN)
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
__asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
: [ctr] "=r"(cache_type_register_));
#endif
}
uint32_t icache_line_size() const { return ExtractCacheLineSize(0); }
uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); }
private:
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
// The cache type register holds the size of cache lines in words as a
// power of two.
return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xF);
}
uint32_t cache_type_register_;
};
void CpuFeatures::FlushICache(void* address, size_t length) {
#if defined(V8_HOST_ARCH_ARM64)
#if defined(V8_OS_WIN)
::FlushInstructionCache(GetCurrentProcess(), address, length);
#else
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
// side of the CPU.
uintptr_t start = reinterpret_cast<uintptr_t>(address);
// Sizes will be used to generate a mask big enough to cover a pointer.
CacheLineSizes sizes;
uintptr_t dsize = sizes.dcache_line_size();
uintptr_t isize = sizes.icache_line_size();
// Cache line sizes are always a power of 2.
DCHECK_EQ(CountSetBits(dsize, 64), 1);
DCHECK_EQ(CountSetBits(isize, 64), 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
__asm__ __volatile__ ( // NOLINT
// Clean every line of the D cache containing the target data.
"0: \n\t"
// dc : Data Cache maintenance
// c : Clean
// i : Invalidate
// va : by (Virtual) Address
// c : to the point of Coherency
// See ARM DDI 0406B page B2-12 for more information.
// We would prefer to use "cvau" (clean to the point of unification) here
// but we use "civac" to work around Cortex-A53 errata 819472, 826319,
// 827319 and 824069.
"dc civac, %[dline] \n\t"
"add %[dline], %[dline], %[dsize] \n\t"
"cmp %[dline], %[end] \n\t"
"b.lt 0b \n\t"
// Barrier to make sure the effect of the code above is visible to the rest
// of the world.
// dsb : Data Synchronisation Barrier
// ish : Inner SHareable domain
// The point of unification for an Inner Shareable shareability domain is
// the point by which the instruction and data caches of all the processors
// in that Inner Shareable shareability domain are guaranteed to see the
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
// information.
"dsb ish \n\t"
// Invalidate every line of the I cache containing the target data.
"1: \n\t"
// ic : instruction cache maintenance
// i : invalidate
// va : by address
// u : to the point of unification
"ic ivau, %[iline] \n\t"
"add %[iline], %[iline], %[isize] \n\t"
"cmp %[iline], %[end] \n\t"
"b.lt 1b \n\t"
// Barrier to make sure the effect of the code above is visible to the rest
// of the world.
"dsb ish \n\t"
// Barrier to ensure any prefetching which happened before this code is
// discarded.
// isb : Instruction Synchronisation Barrier
"isb \n\t"
: [dline] "+r" (dstart),
[iline] "+r" (istart)
: [dsize] "r" (dsize),
[isize] "r" (isize),
[end] "r" (end)
// This code does not write to memory but without the dependency gcc might
// move this code before the code is generated.
: "cc", "memory"
); // NOLINT
#endif // V8_OS_WIN
#endif // V8_HOST_ARCH_ARM64
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -13,9 +13,9 @@
#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
#include "src/mips/constants-mips.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"

View File

@ -1 +0,0 @@
xwafish@gmail.com

View File

@ -13,9 +13,9 @@
#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
#include "src/mips64/constants-mips64.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"

View File

@ -34,10 +34,10 @@
// significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
#define V8_ARM_ASSEMBLER_ARM_INL_H_
#ifndef V8_CODEGEN_ARM_ASSEMBLER_ARM_INL_H_
#define V8_CODEGEN_ARM_ASSEMBLER_ARM_INL_H_
#include "src/arm/assembler-arm.h"
#include "src/codegen/arm/assembler-arm.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
@ -55,7 +55,6 @@ int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
}
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
@ -68,7 +67,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
@ -88,16 +86,12 @@ Address RelocInfo::target_address_address() {
}
}
Address RelocInfo::constant_pool_entry_address() {
DCHECK(IsInConstantPool());
return Assembler::constant_pool_entry_address(pc_, constant_pool_);
}
int RelocInfo::target_address_size() {
return kPointerSize;
}
int RelocInfo::target_address_size() { return kPointerSize; }
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
@ -146,7 +140,6 @@ Address RelocInfo::target_internal_reference() {
return Memory<Address>(pc_);
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
@ -208,7 +201,6 @@ void Assembler::CheckBuffer() {
MaybeCheckConstPool();
}
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
@ -230,12 +222,10 @@ void Assembler::deserialization_set_target_internal_reference_at(
Memory<Address>(pc) = target;
}
bool Assembler::is_constant_pool_load(Address pc) {
return IsLdrPcImmediateOffset(Memory<int32_t>(pc));
}
Address Assembler::constant_pool_entry_address(Address pc,
Address constant_pool) {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory<int32_t>(pc)));
@ -243,7 +233,6 @@ Address Assembler::constant_pool_entry_address(Address pc,
return pc + GetLdrRegisterImmediateOffset(instr) + Instruction::kPcLoadDelta;
}
Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
@ -369,4 +358,4 @@ T UseScratchRegisterScope::AcquireVfp() {
} // namespace internal
} // namespace v8
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
#endif // V8_CODEGEN_ARM_ASSEMBLER_ARM_INL_H_

File diff suppressed because it is too large Load Diff

View File

@ -37,14 +37,14 @@
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
#ifndef V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
#define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include <vector>
#include "src/arm/constants-arm.h"
#include "src/arm/register-arm.h"
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/arm/register-arm.h"
#include "src/codegen/assembler.h"
#include "src/codegen/constant-pool.h"
#include "src/numbers/double.h"
@ -57,16 +57,16 @@ class SafepointTableBuilder;
// Coprocessor number
enum Coprocessor {
p0 = 0,
p1 = 1,
p2 = 2,
p3 = 3,
p4 = 4,
p5 = 5,
p6 = 6,
p7 = 7,
p8 = 8,
p9 = 9,
p0 = 0,
p1 = 1,
p2 = 2,
p3 = 3,
p4 = 4,
p5 = 5,
p6 = 6,
p7 = 7,
p8 = 8,
p9 = 9,
p10 = 10,
p11 = 11,
p12 = 12,
@ -148,9 +148,7 @@ class V8_EXPORT_PRIVATE Operand {
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
bool IsImmediate() const {
return !rm_.is_valid();
}
bool IsImmediate() const { return !rm_.is_valid(); }
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
@ -168,12 +166,11 @@ class V8_EXPORT_PRIVATE Operand {
Register rs() const { return rs_; }
ShiftOp shift_op() const { return shift_op_; }
private:
Register rm_ = no_reg;
Register rs_ = no_reg;
ShiftOp shift_op_;
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
@ -204,8 +201,8 @@ class V8_EXPORT_PRIVATE MemOperand {
// [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
// [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
// [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
explicit MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
explicit MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm,
AddrMode am = Offset);
V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array,
Register key,
AddrMode am = Offset) {
@ -232,12 +229,12 @@ class V8_EXPORT_PRIVATE MemOperand {
}
private:
Register rn_; // base
Register rm_; // register offset
Register rn_; // base
Register rm_; // register offset
int32_t offset_; // valid if rm_ == no_reg
ShiftOp shift_op_;
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
AddrMode am_; // bits P, U, and W
AddrMode am_; // bits P, U, and W
friend class Assembler;
};
@ -269,22 +266,28 @@ class V8_EXPORT_PRIVATE NeonMemOperand {
class NeonListOperand {
public:
explicit NeonListOperand(DoubleRegister base, int register_count = 1)
: base_(base), register_count_(register_count) {}
: base_(base), register_count_(register_count) {}
explicit NeonListOperand(QwNeonRegister q_reg)
: base_(q_reg.low()), register_count_(2) {}
: base_(q_reg.low()), register_count_(2) {}
DoubleRegister base() const { return base_; }
int register_count() { return register_count_; }
int length() const { return register_count_ - 1; }
NeonListType type() const {
switch (register_count_) {
default: UNREACHABLE();
default:
UNREACHABLE();
// Fall through.
case 1: return nlt_1;
case 2: return nlt_2;
case 3: return nlt_3;
case 4: return nlt_4;
case 1:
return nlt_1;
case 2:
return nlt_2;
case 3:
return nlt_3;
case 4:
return nlt_4;
}
}
private:
DoubleRegister base_;
int register_count_;
@ -304,9 +307,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler();
virtual void AbortedCodeGeneration() {
pending_32_bit_constants_.clear();
}
virtual void AbortedCodeGeneration() { pending_32_bit_constants_.clear(); }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@ -399,9 +400,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RelocInfo::Mode rmode = RelocInfo::NONE);
void bl(int branch_offset, Condition cond = al,
RelocInfo::Mode rmode = RelocInfo::NONE);
void blx(int branch_offset); // v5 and above
void blx(int branch_offset); // v5 and above
void blx(Register target, Condition cond = al); // v5 and above
void bx(Register target, Condition cond = al); // v5 and above, plus v4t
void bx(Register target, Condition cond = al); // v5 and above, plus v4t
// Convenience branch instructions using labels
void b(Label* L, Condition cond = al);
@ -412,37 +413,37 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Data-processing instructions
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void and_(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
void eor(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void eor(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void eor(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
void sub(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void sub(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
void sub(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void sub(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
void rsb(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void rsb(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void add(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void add(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
void add(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void add(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
void adc(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void adc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void sbc(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void sbc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void rsc(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void rsc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void tst(Register src1, const Operand& src2, Condition cond = al);
void tst(Register src1, Register src2, Condition cond = al);
@ -456,13 +457,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cmn(Register src1, const Operand& src2, Condition cond = al);
void orr(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void orr(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
void orr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void orr(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
void mov(Register dst, const Operand& src,
SBit s = LeaveCC, Condition cond = al);
void mov(Register dst, const Operand& src, SBit s = LeaveCC,
Condition cond = al);
void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
// Load the position of the label relative to the generated code object
@ -474,11 +475,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movw(Register reg, uint32_t immediate, Condition cond = al);
void movt(Register reg, uint32_t immediate, Condition cond = al);
void bic(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void bic(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al);
void mvn(Register dst, const Operand& src,
SBit s = LeaveCC, Condition cond = al);
void mvn(Register dst, const Operand& src, SBit s = LeaveCC,
Condition cond = al);
// Shift instructions
@ -499,13 +500,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void sdiv(Register dst, Register src1, Register src2,
Condition cond = al);
void sdiv(Register dst, Register src1, Register src2, Condition cond = al);
void udiv(Register dst, Register src1, Register src2, Condition cond = al);
void mul(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
void mul(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
void smmla(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
@ -557,8 +557,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void bfc(Register dst, int lsb, int width, Condition cond = al);
void bfi(Register dst, Register src, int lsb, int width,
Condition cond = al);
void bfi(Register dst, Register src, int lsb, int width, Condition cond = al);
void pkhbt(Register dst, Register src1, const Operand& src2,
Condition cond = al);
@ -599,12 +598,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void strh(Register src, const MemOperand& dst, Condition cond = al);
void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
void ldrd(Register dst1,
Register dst2,
const MemOperand& src, Condition cond = al);
void strd(Register src1,
Register src2,
const MemOperand& dst, Condition cond = al);
void ldrd(Register dst1, Register dst2, const MemOperand& src,
Condition cond = al);
void strd(Register src1, Register src2, const MemOperand& dst,
Condition cond = al);
// Load literal from a pc relative address.
void ldr_pcrel(Register dst, int imm12, Condition cond = al);
@ -628,8 +625,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support
void stop(const char* msg,
Condition cond = al,
void stop(const char* msg, Condition cond = al,
int32_t code = kDefaultStopCode);
void bkpt(uint32_t imm16); // v5 and above
@ -646,28 +642,25 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Coprocessor instructions
void cdp(Coprocessor coproc, int opcode_1,
CRegister crd, CRegister crn, CRegister crm,
int opcode_2, Condition cond = al);
void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn,
CRegister crm, int opcode_2, Condition cond = al);
void cdp2(Coprocessor coproc, int opcode_1,
CRegister crd, CRegister crn, CRegister crm,
void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn,
CRegister crm,
int opcode_2); // v5 and above
void mcr(Coprocessor coproc, int opcode_1,
Register rd, CRegister crn, CRegister crm,
int opcode_2 = 0, Condition cond = al);
void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
CRegister crm, int opcode_2 = 0, Condition cond = al);
void mcr2(Coprocessor coproc, int opcode_1,
Register rd, CRegister crn, CRegister crm,
void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
CRegister crm,
int opcode_2 = 0); // v5 and above
void mrc(Coprocessor coproc, int opcode_1,
Register rd, CRegister crn, CRegister crm,
int opcode_2 = 0, Condition cond = al);
void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
CRegister crm, int opcode_2 = 0, Condition cond = al);
void mrc2(Coprocessor coproc, int opcode_1,
Register rd, CRegister crn, CRegister crm,
void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
CRegister crm,
int opcode_2 = 0); // v5 and above
void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
@ -683,215 +676,146 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Support for VFP.
// All these APIs support S0 to S31 and D0 to D31.
void vldr(const DwVfpRegister dst,
const Register base,
int offset,
void vldr(const DwVfpRegister dst, const Register base, int offset,
const Condition cond = al);
void vldr(const DwVfpRegister dst,
const MemOperand& src,
void vldr(const DwVfpRegister dst, const MemOperand& src,
const Condition cond = al);
void vldr(const SwVfpRegister dst,
const Register base,
int offset,
void vldr(const SwVfpRegister dst, const Register base, int offset,
const Condition cond = al);
void vldr(const SwVfpRegister dst,
const MemOperand& src,
void vldr(const SwVfpRegister dst, const MemOperand& src,
const Condition cond = al);
void vstr(const DwVfpRegister src,
const Register base,
int offset,
void vstr(const DwVfpRegister src, const Register base, int offset,
const Condition cond = al);
void vstr(const DwVfpRegister src,
const MemOperand& dst,
void vstr(const DwVfpRegister src, const MemOperand& dst,
const Condition cond = al);
void vstr(const SwVfpRegister src,
const Register base,
int offset,
void vstr(const SwVfpRegister src, const Register base, int offset,
const Condition cond = al);
void vstr(const SwVfpRegister src,
const MemOperand& dst,
void vstr(const SwVfpRegister src, const MemOperand& dst,
const Condition cond = al);
void vldm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond = al);
void vldm(BlockAddrMode am, Register base, DwVfpRegister first,
DwVfpRegister last, Condition cond = al);
void vstm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond = al);
void vstm(BlockAddrMode am, Register base, DwVfpRegister first,
DwVfpRegister last, Condition cond = al);
void vldm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond = al);
void vldm(BlockAddrMode am, Register base, SwVfpRegister first,
SwVfpRegister last, Condition cond = al);
void vstm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond = al);
void vstm(BlockAddrMode am, Register base, SwVfpRegister first,
SwVfpRegister last, Condition cond = al);
void vmov(const SwVfpRegister dst, Float32 imm);
void vmov(const DwVfpRegister dst,
Double imm,
void vmov(const DwVfpRegister dst, Double imm,
const Register extra_scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
void vmov(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
void vmov(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
void vmov(const DwVfpRegister dst, const Register src1, const Register src2,
const Condition cond = al);
void vmov(const Register dst1,
const Register dst2,
const DwVfpRegister src,
void vmov(const Register dst1, const Register dst2, const DwVfpRegister src,
const Condition cond = al);
void vmov(const SwVfpRegister dst,
const Register src,
void vmov(const SwVfpRegister dst, const Register src,
const Condition cond = al);
void vmov(const Register dst,
const SwVfpRegister src,
void vmov(const Register dst, const SwVfpRegister src,
const Condition cond = al);
void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_u32(const SwVfpRegister dst,
const SwVfpRegister src,
void vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_s32_f32(const SwVfpRegister dst,
const SwVfpRegister src,
void vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_u32_f32(const SwVfpRegister dst,
const SwVfpRegister src,
void vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_s32(const DwVfpRegister dst,
int fraction_bits,
void vcvt_f64_s32(const DwVfpRegister dst, int fraction_bits,
const Condition cond = al);
void vmrs(const Register dst, const Condition cond = al);
void vmsr(const Register dst, const Condition cond = al);
void vneg(const DwVfpRegister dst,
const DwVfpRegister src,
void vneg(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
void vneg(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
void vabs(const DwVfpRegister dst,
const DwVfpRegister src,
void vabs(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
void vabs(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
void vadd(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vadd(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2, const Condition cond = al);
void vadd(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vsub(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vsub(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2, const Condition cond = al);
void vsub(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vmul(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmul(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2, const Condition cond = al);
void vmul(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vmla(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmla(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2, const Condition cond = al);
void vmla(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vmls(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmls(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2, const Condition cond = al);
void vmls(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vdiv(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2, const Condition cond = al);
void vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
void vcmp(const DwVfpRegister src1, const DwVfpRegister src2,
const Condition cond = al);
void vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const double src2,
void vcmp(const DwVfpRegister src1, const double src2,
const Condition cond = al);
void vcmp(const SwVfpRegister src1, const float src2,
const Condition cond = al);
void vmaxnm(const DwVfpRegister dst,
const DwVfpRegister src1,
void vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2);
void vmaxnm(const SwVfpRegister dst,
const SwVfpRegister src1,
void vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2);
void vminnm(const DwVfpRegister dst,
const DwVfpRegister src1,
void vminnm(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2);
void vminnm(const SwVfpRegister dst,
const SwVfpRegister src1,
void vminnm(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2);
// VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
void vsel(const Condition cond,
const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2);
void vsel(const Condition cond,
const SwVfpRegister dst,
const SwVfpRegister src1,
const SwVfpRegister src2);
void vsel(const Condition cond, const DwVfpRegister dst,
const DwVfpRegister src1, const DwVfpRegister src2);
void vsel(const Condition cond, const SwVfpRegister dst,
const SwVfpRegister src1, const SwVfpRegister src2);
void vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
void vsqrt(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
@ -913,11 +837,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Support for NEON.
// All these APIs support D0 to D31 and Q0 to Q15.
void vld1(NeonSize size,
const NeonListOperand& dst,
void vld1(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src);
void vst1(NeonSize size,
const NeonListOperand& src,
void vst1(NeonSize size, const NeonListOperand& src,
const NeonMemOperand& dst);
// dt represents the narrower type
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
@ -961,16 +883,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
QwNeonRegister src2);
void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vmul(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vmul(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmin(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmax(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
@ -1030,7 +951,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
};
void nop(int type = 0); // 0 is the default non-marking type.
void nop(int type = 0); // 0 is the default non-marking type.
void push(Register src, Condition cond = al) {
str(src, MemOperand(sp, 4, NegPreIndex), cond);
@ -1083,9 +1004,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockConstPool();
}
~BlockConstPoolScope() {
assem_->EndBlockConstPool();
}
~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
private:
Assembler* assem_;
@ -1322,7 +1241,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
int no_const_pool_before_; // Block emission before this pc offset.
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
@ -1434,4 +1353,4 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
} // namespace internal
} // namespace v8
#endif // V8_ARM_ASSEMBLER_ARM_H_
#endif // V8_CODEGEN_ARM_ASSEMBLER_ARM_H_

View File

@ -4,8 +4,7 @@
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
#include "src/codegen/arm/constants-arm.h"
namespace v8 {
namespace internal {
@ -19,24 +18,22 @@ Float64 Instruction::DoubleImmedVmov() const {
//
// where B = ~b. Only the high 16 bits are affected.
uint64_t high16;
high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
high16 |= (0xFF * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
high16 |= (0xFF * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
uint64_t imm = high16 << 48;
return Float64::FromBits(imm);
}
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumRegisters] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
};
// List of alias names which can be used when referring to ARM registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{10, "sl"}, {11, "r11"}, {12, "r12"}, {13, "r13"},
@ -47,23 +44,18 @@ const Registers::RegisterAlias Registers::aliases_[] = {
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* VFPRegisters::names_[kNumVFPRegisters] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
};
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10",
"s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21",
"s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", "d0",
"d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
"d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
"d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
const char* VFPRegisters::Name(int reg, bool is_double) {
DCHECK((0 <= reg) && (reg < kNumVFPRegisters));
return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
}
int VFPRegisters::Number(const char* name, bool* is_double) {
for (int i = 0; i < kNumVFPRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
@ -81,7 +73,6 @@ int VFPRegisters::Number(const char* name, bool* is_double) {
return kNoRegister;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
@ -103,7 +94,6 @@ int Registers::Number(const char* name) {
return kNoRegister;
}
} // namespace internal
} // namespace v8

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
#ifndef V8_CODEGEN_ARM_CONSTANTS_ARM_H_
#define V8_CODEGEN_ARM_CONSTANTS_ARM_H_
#include <stdint.h>
@ -76,37 +76,35 @@ constexpr int kRootRegisterBias = 4095;
enum Condition {
kNoCondition = -1,
eq = 0 << 28, // Z set Equal.
ne = 1 << 28, // Z clear Not equal.
cs = 2 << 28, // C set Unsigned higher or same.
cc = 3 << 28, // C clear Unsigned lower.
mi = 4 << 28, // N set Negative.
pl = 5 << 28, // N clear Positive or zero.
vs = 6 << 28, // V set Overflow.
vc = 7 << 28, // V clear No overflow.
hi = 8 << 28, // C set, Z clear Unsigned higher.
ls = 9 << 28, // C clear or Z set Unsigned lower or same.
ge = 10 << 28, // N == V Greater or equal.
lt = 11 << 28, // N != V Less than.
gt = 12 << 28, // Z clear, N == V Greater than.
le = 13 << 28, // Z set or N != V Less then or equal
al = 14 << 28, // Always.
eq = 0 << 28, // Z set Equal.
ne = 1 << 28, // Z clear Not equal.
cs = 2 << 28, // C set Unsigned higher or same.
cc = 3 << 28, // C clear Unsigned lower.
mi = 4 << 28, // N set Negative.
pl = 5 << 28, // N clear Positive or zero.
vs = 6 << 28, // V set Overflow.
vc = 7 << 28, // V clear No overflow.
hi = 8 << 28, // C set, Z clear Unsigned higher.
ls = 9 << 28, // C clear or Z set Unsigned lower or same.
ge = 10 << 28, // N == V Greater or equal.
lt = 11 << 28, // N != V Less than.
gt = 12 << 28, // Z clear, N == V Greater than.
le = 13 << 28, // Z set or N != V Less then or equal
al = 14 << 28, // Always.
kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
kNumberOfConditions = 16,
// Aliases.
hs = cs, // C set Unsigned higher or same.
lo = cc // C clear Unsigned lower.
hs = cs, // C set Unsigned higher or same.
lo = cc // C clear Unsigned lower.
};
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
return static_cast<Condition>(cond ^ ne);
}
// -----------------------------------------------------------------------------
// Instructions encoding.
@ -119,16 +117,16 @@ using Instr = int32_t;
// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
// as defined in section A3.4
enum Opcode {
AND = 0 << 21, // Logical AND.
EOR = 1 << 21, // Logical Exclusive OR.
SUB = 2 << 21, // Subtract.
RSB = 3 << 21, // Reverse Subtract.
ADD = 4 << 21, // Add.
ADC = 5 << 21, // Add with Carry.
SBC = 6 << 21, // Subtract with Carry.
RSC = 7 << 21, // Reverse Subtract with Carry.
TST = 8 << 21, // Test.
TEQ = 9 << 21, // Test Equivalence.
AND = 0 << 21, // Logical AND.
EOR = 1 << 21, // Logical Exclusive OR.
SUB = 2 << 21, // Subtract.
RSB = 3 << 21, // Reverse Subtract.
ADD = 4 << 21, // Add.
ADC = 5 << 21, // Add with Carry.
SBC = 6 << 21, // Subtract with Carry.
RSC = 7 << 21, // Reverse Subtract with Carry.
TST = 8 << 21, // Test.
TEQ = 9 << 21, // Test Equivalence.
CMP = 10 << 21, // Compare.
CMN = 11 << 21, // Compare Negated.
ORR = 12 << 21, // Logical (inclusive) OR.
@ -137,20 +135,18 @@ enum Opcode {
MVN = 15 << 21 // Move Not.
};
// The bits for bit 7-4 for some type 0 miscellaneous instructions.
enum MiscInstructionsBits74 {
// With bits 22-21 01.
BX = 1 << 4,
BXJ = 2 << 4,
BLX = 3 << 4,
BKPT = 7 << 4,
BX = 1 << 4,
BXJ = 2 << 4,
BLX = 3 << 4,
BKPT = 7 << 4,
// With bits 22-21 11.
CLZ = 1 << 4
CLZ = 1 << 4
};
// Instruction encoding bits and masks.
enum {
H = 1 << 5, // Halfword (or byte).
@ -215,30 +211,24 @@ enum BarrierOption {
SY = 0xf,
};
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
// Condition code updating mode.
enum SBit {
SetCC = 1 << 20, // Set condition code.
LeaveCC = 0 << 20 // Leave condition code unchanged.
SetCC = 1 << 20, // Set condition code.
LeaveCC = 0 << 20 // Leave condition code unchanged.
};
// Status register selection.
enum SRegister {
CPSR = 0 << 22,
SPSR = 1 << 22
};
enum SRegister { CPSR = 0 << 22, SPSR = 1 << 22 };
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum ShiftOp {
LSL = 0 << 5, // Logical shift left.
LSR = 1 << 5, // Logical shift right.
ASR = 2 << 5, // Arithmetic shift right.
ROR = 3 << 5, // Rotate right.
LSL = 0 << 5, // Logical shift left.
LSR = 1 << 5, // Logical shift right.
ASR = 2 << 5, // Arithmetic shift right.
ROR = 3 << 5, // Rotate right.
// RRX is encoded as ROR with shift_imm == 0.
// Use a special code to make the distinction. The RRX ShiftOp is only used
@ -248,7 +238,6 @@ enum ShiftOp {
kNumberOfShifts = 4
};
// Status register fields.
enum SRegisterField {
CPSR_c = CPSR | 1 << 16,
@ -267,41 +256,40 @@ using SRegisterFieldMask = uint32_t;
// Memory operand addressing mode.
enum AddrMode {
// Bit encoding P U W.
Offset = (8|4|0) << 21, // Offset (without writeback to base).
PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
Offset = (8 | 4 | 0) << 21, // Offset (without writeback to base).
PreIndex = (8 | 4 | 1) << 21, // Pre-indexed addressing with writeback.
PostIndex = (0 | 4 | 0) << 21, // Post-indexed addressing with writeback.
NegOffset =
(8 | 0 | 0) << 21, // Negative offset (without writeback to base).
NegPreIndex = (8 | 0 | 1) << 21, // Negative pre-indexed with writeback.
NegPostIndex = (0 | 0 | 0) << 21 // Negative post-indexed with writeback.
};
// Load/store multiple addressing mode.
enum BlockAddrMode {
// Bit encoding P U W .
da = (0|0|0) << 21, // Decrement after.
ia = (0|4|0) << 21, // Increment after.
db = (8|0|0) << 21, // Decrement before.
ib = (8|4|0) << 21, // Increment before.
da_w = (0|0|1) << 21, // Decrement after with writeback to base.
ia_w = (0|4|1) << 21, // Increment after with writeback to base.
db_w = (8|0|1) << 21, // Decrement before with writeback to base.
ib_w = (8|4|1) << 21, // Increment before with writeback to base.
da = (0 | 0 | 0) << 21, // Decrement after.
ia = (0 | 4 | 0) << 21, // Increment after.
db = (8 | 0 | 0) << 21, // Decrement before.
ib = (8 | 4 | 0) << 21, // Increment before.
da_w = (0 | 0 | 1) << 21, // Decrement after with writeback to base.
ia_w = (0 | 4 | 1) << 21, // Increment after with writeback to base.
db_w = (8 | 0 | 1) << 21, // Decrement before with writeback to base.
ib_w = (8 | 4 | 1) << 21, // Increment before with writeback to base.
// Alias modes for comparison when writeback does not matter.
da_x = (0|0|0) << 21, // Decrement after.
ia_x = (0|4|0) << 21, // Increment after.
db_x = (8|0|0) << 21, // Decrement before.
ib_x = (8|4|0) << 21, // Increment before.
da_x = (0 | 0 | 0) << 21, // Decrement after.
ia_x = (0 | 4 | 0) << 21, // Increment after.
db_x = (8 | 0 | 0) << 21, // Decrement before.
ib_x = (8 | 4 | 0) << 21, // Increment before.
kBlockAddrModeMask = (8|4|1) << 21
kBlockAddrModeMask = (8 | 4 | 1) << 21
};
// Coprocessor load/store operand size.
enum LFlag {
Long = 1 << 22, // Long load/store coprocessor.
Short = 0 << 22 // Short load/store coprocessor.
Long = 1 << 22, // Long load/store coprocessor.
Short = 0 << 22 // Short load/store coprocessor.
};
// Neon sizes.
@ -331,12 +319,7 @@ inline NeonSize NeonDataTypeToSize(NeonDataType dt) {
return static_cast<NeonSize>(NeonSz(dt));
}
enum NeonListType {
nlt_1 = 0x7,
nlt_2 = 0xA,
nlt_3 = 0x6,
nlt_4 = 0x2
};
enum NeonListType { nlt_1 = 0x7, nlt_2 = 0xA, nlt_3 = 0x6, nlt_4 = 0x2 };
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
@ -355,8 +338,7 @@ enum SoftwareInterruptCodes {
};
const uint32_t kStopCodeMask = kStopCode - 1;
const uint32_t kMaxStopCode = kStopCode - 1;
const int32_t kDefaultStopCode = -1;
const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
enum VFPRegPrecision {
@ -366,10 +348,7 @@ enum VFPRegPrecision {
};
// VFP FPSCR constants.
enum VFPConversionMode {
kFPSCRRounding = 0,
kDefaultRoundToZero = 1
};
enum VFPConversionMode { kFPSCRRounding = 0, kDefaultRoundToZero = 1 };
// This mask does not include the "inexact" or "input denormal" cumulative
// exceptions flags, because we usually don't want to check for it.
@ -386,13 +365,12 @@ const uint32_t kVFPZConditionFlagBit = 1 << 30;
const uint32_t kVFPCConditionFlagBit = 1 << 29;
const uint32_t kVFPVConditionFlagBit = 1 << 28;
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
enum VFPRoundingMode {
RN = 0 << 22, // Round to Nearest.
RP = 1 << 22, // Round towards Plus Infinity.
RM = 2 << 22, // Round towards Minus Infinity.
RZ = 3 << 22, // Round towards zero.
RN = 0 << 22, // Round to Nearest.
RP = 1 << 22, // Round towards Plus Infinity.
RM = 2 << 22, // Round towards Minus Infinity.
RZ = 3 << 22, // Round towards zero.
// Aliases.
kRoundToNearest = RN,
@ -419,7 +397,6 @@ enum Hint { no_hint };
// Hints are not used on the arm. Negating is trivial.
inline Hint NegateHint(Hint ignored) { return no_hint; }
// -----------------------------------------------------------------------------
// Instruction abstraction.
@ -468,9 +445,7 @@ class Instruction {
// Extract a single bit from the instruction bits and return it as bit 0 in
// the result.
inline int Bit(int nr) const {
return (InstructionBits() >> nr) & 1;
}
inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
// Extract a bit field <hi:lo> from the instruction bits and return it in the
// least-significant bits of the result.
@ -487,9 +462,7 @@ class Instruction {
// Extract a single bit from the instruction bits and return it as bit 0 in
// the result.
static inline int Bit(Instr instr, int nr) {
return (instr >> nr) & 1;
}
static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
// Extract a bit field <hi:lo> from the instruction bits and return it in the
// least-significant bits of the result.
@ -514,7 +487,6 @@ class Instruction {
// e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
// 0xC0810002 ConditionField(instr) will return 0xC.
// Generally applicable fields
inline int ConditionValue() const { return Bits(31, 28); }
inline Condition ConditionField() const {
@ -562,14 +534,12 @@ class Instruction {
}
// Fields used in Data processing instructions
inline int OpcodeValue() const {
return static_cast<Opcode>(Bits(24, 21));
}
inline int OpcodeValue() const { return static_cast<Opcode>(Bits(24, 21)); }
inline Opcode OpcodeField() const {
return static_cast<Opcode>(BitField(24, 21));
}
inline int SValue() const { return Bit(20); }
// with register
// with register
inline int RmValue() const { return Bits(3, 0); }
DECLARE_STATIC_ACCESSOR(RmValue)
inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
@ -579,28 +549,29 @@ class Instruction {
inline int RegShiftValue() const { return Bit(4); }
inline int RsValue() const { return Bits(11, 8); }
inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate
// with immediate
inline int RotateValue() const { return Bits(11, 8); }
DECLARE_STATIC_ACCESSOR(RotateValue)
inline int Immed8Value() const { return Bits(7, 0); }
DECLARE_STATIC_ACCESSOR(Immed8Value)
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); }
return Immed4Value() << 12 | Offset12Value();
}
DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue)
// Fields used in Load/Store instructions
inline int PUValue() const { return Bits(24, 23); }
inline int PUField() const { return BitField(24, 23); }
inline int BValue() const { return Bit(22); }
inline int WValue() const { return Bit(21); }
inline int LValue() const { return Bit(20); }
// with register uses same fields as Data processing instructions above
// with immediate
inline int BValue() const { return Bit(22); }
inline int WValue() const { return Bit(21); }
inline int LValue() const { return Bit(20); }
// with register uses same fields as Data processing instructions above
// with immediate
inline int Offset12Value() const { return Bits(11, 0); }
// multiple
// multiple
inline int RlistValue() const { return Bits(15, 0); }
// extra loads and stores
// extra loads and stores
inline int SignValue() const { return Bit(6); }
inline int HValue() const { return Bit(5); }
inline int ImmedHValue() const { return Bits(11, 8); }
@ -638,10 +609,10 @@ class Instruction {
inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
// Test for miscellaneous instructions encodings of type 0 instructions.
inline bool IsMiscType0() const { return (Bit(24) == 1)
&& (Bit(23) == 0)
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
inline bool IsMiscType0() const {
return (Bit(24) == 1) && (Bit(23) == 0) && (Bit(20) == 0) &&
((Bit(7) == 0));
}
// Test for nop-like instructions which fall under type 1.
inline bool IsNopLikeType1() const { return Bits(24, 8) == 0x120F0; }
@ -652,13 +623,13 @@ class Instruction {
}
// Special accessors that test for existence of a value.
inline bool HasS() const { return SValue() == 1; }
inline bool HasB() const { return BValue() == 1; }
inline bool HasW() const { return WValue() == 1; }
inline bool HasL() const { return LValue() == 1; }
inline bool HasU() const { return UValue() == 1; }
inline bool HasS() const { return SValue() == 1; }
inline bool HasB() const { return BValue() == 1; }
inline bool HasW() const { return WValue() == 1; }
inline bool HasL() const { return LValue() == 1; }
inline bool HasU() const { return UValue() == 1; }
inline bool HasSign() const { return SignValue() == 1; }
inline bool HasH() const { return HValue() == 1; }
inline bool HasH() const { return HValue() == 1; }
inline bool HasLink() const { return LinkValue() == 1; }
// Decode the double immediate from a vmov instruction.
@ -672,7 +643,6 @@ class Instruction {
return reinterpret_cast<Instruction*>(pc);
}
private:
// Join split register codes, depending on register precision.
// four_bit is the position of the least-significant bit of the four
@ -696,7 +666,6 @@ class Instruction {
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
// Helper functions for converting between register numbers and names.
class Registers {
public:
@ -737,4 +706,4 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 32;
} // namespace internal
} // namespace v8
#endif // V8_ARM_CONSTANTS_ARM_H_
#endif // V8_CODEGEN_ARM_CONSTANTS_ARM_H_

View File

@ -6,7 +6,7 @@
#ifdef __arm__
#ifdef __QNXNTO__
#include <sys/mman.h> // for cache flushing.
#undef MAP_TYPE
#undef MAP_TYPE // NOLINT
#else
#include <sys/syscall.h> // for cache flushing.
#endif
@ -43,21 +43,21 @@ V8_NOINLINE void CpuFeatures::FlushICache(void* start, size_t size) {
// Use a different variant of the asm with GCC because some versions doesn't
// support r7 as an asm input.
asm volatile(
// This assembly works for both ARM and Thumb targets.
// This assembly works for both ARM and Thumb targets.
// Preserve r7; it is callee-saved, and GCC uses it as a frame pointer for
// Thumb targets.
" push {r7}\n"
// r0 = beg
// r1 = end
// r2 = flags (0)
" ldr r7, =%c[scno]\n" // r7 = syscall number
" svc 0\n"
// Preserve r7; it is callee-saved, and GCC uses it as a frame pointer for
// Thumb targets.
" push {r7}\n"
// r0 = beg
// r1 = end
// r2 = flags (0)
" ldr r7, =%c[scno]\n" // r7 = syscall number
" svc 0\n"
" pop {r7}\n"
:
: "r" (beg), "r" (end), "r" (flg), [scno] "i" (__ARM_NR_cacheflush)
: "memory");
" pop {r7}\n"
:
: "r"(beg), "r"(end), "r"(flg), [scno] "i"(__ARM_NR_cacheflush)
: "memory");
#endif
#endif
#endif // !USE_SIMULATOR

View File

@ -74,7 +74,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }

View File

@ -30,7 +30,7 @@
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
#include "src/arm/macro-assembler-arm.h"
#include "src/codegen/arm/macro-assembler-arm.h"
#endif
namespace v8 {
@ -535,7 +535,6 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
}
}
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
@ -547,13 +546,12 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else {
and_(dst, src1, src2, LeaveCC, cond);
}
}
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
@ -569,7 +567,6 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
}
}
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
@ -590,7 +587,6 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
}
}
void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
@ -843,8 +839,9 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
void TurboAssembler::PushStandardFrame(Register function_reg) {
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
fp.bit() | lr.bit());
stm(db_w, sp,
(function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() | fp.bit() |
lr.bit());
int offset = -StandardFrameConstants::kContextOffset;
offset += function_reg.is_valid() ? kPointerSize : 0;
add(fp, sp, Operand(offset));
@ -1400,7 +1397,7 @@ int TurboAssembler::ActivationFrameAlignment() {
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_ARM
#else // V8_HOST_ARCH_ARM
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
@ -1460,7 +1457,6 @@ void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
}
}
// On ARM this is just a synonym to make the purpose clear.
void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
MovFromFloatResult(dst);
@ -1744,7 +1740,6 @@ void MacroAssembler::PushStackHandler() {
str(sp, MemOperand(r6));
}
void MacroAssembler::PopStackHandler() {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@ -1756,11 +1751,8 @@ void MacroAssembler::PopStackHandler() {
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
}
void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
UseScratchRegisterScope temps(this);
const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
@ -1768,9 +1760,7 @@ void MacroAssembler::CompareObjectType(Register object,
CompareInstanceType(map, temp, type);
}
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
@ -1862,8 +1852,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r0 has the return value after call.
@ -1932,7 +1921,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@ -1945,8 +1933,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
if (emit_debug_code())
Check(cond, reason);
if (emit_debug_code()) Check(cond, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
@ -2011,7 +1998,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
ldr(dst, ContextMemOperand(dst, index));
}
void TurboAssembler::InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
mov(kRootRegister, Operand(isolate_root));
@ -2057,7 +2043,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -2092,7 +2077,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -2148,7 +2132,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
void TurboAssembler::CheckFor32DRegs(Register scratch) {
Move(scratch, ExternalReference::cpu_features());
ldr(scratch, MemOperand(scratch));
@ -2338,8 +2321,8 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
UseScratchRegisterScope temps(this);
if (!scratch.is_valid()) scratch = temps.Acquire();
@ -2362,7 +2345,6 @@ void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
}
}
// On ARM this is just a synonym to make the purpose clear.
void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
MovToFloatParameter(src);
@ -2457,8 +2439,8 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Pop(scratch1);
}
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@ -2477,11 +2459,8 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
b(cc, condition_met);
}
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();

View File

@ -6,10 +6,10 @@
#error This header must be included via macro-assembler.h
#endif
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#ifndef V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
#include "src/arm/assembler-arm.h"
#include "src/codegen/arm/assembler-arm.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
@ -29,9 +29,7 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2 = no_reg,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
@ -666,17 +664,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// register unless the heap_object register is the same register as one of the
// other registers.
// Type_reg can be no_reg. In that case a scratch register is used.
void CompareObjectType(Register heap_object,
Register map,
Register type_reg,
void CompareObjectType(Register heap_object, Register map, Register type_reg,
InstanceType type);
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map,
Register type_reg,
InstanceType type);
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
// Compare the object in a register to a value from the root list.
// Acquires a scratch register.
@ -709,8 +703,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Runtime calls
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f,
int num_arguments,
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Convenience function: Same as above, but takes the fid instead.
@ -743,10 +736,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
void IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
// ---------------------------------------------------------------------------
// Smi utilities
@ -781,12 +774,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
template<typename Field>
template <typename Field>
void DecodeField(Register dst, Register src) {
Ubfx(dst, src, Field::kShift, Field::kSize);
}
template<typename Field>
template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
@ -814,7 +807,6 @@ inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@ -824,4 +816,4 @@ inline MemOperand NativeContextMemOperand() {
} // namespace internal
} // namespace v8
#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
#endif // V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM_REGISTER_ARM_H_
#define V8_ARM_REGISTER_ARM_H_
#ifndef V8_CODEGEN_ARM_REGISTER_ARM_H_
#define V8_CODEGEN_ARM_REGISTER_ARM_H_
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
@ -366,4 +366,4 @@ constexpr Register kRootRegister = r10; // Roots array pointer.
} // namespace internal
} // namespace v8
#endif // V8_ARM_REGISTER_ARM_H_
#endif // V8_CODEGEN_ARM_REGISTER_ARM_H_

View File

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
#ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
#include "src/arm64/assembler-arm64.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
#include "src/objects/objects-inl.h"
@ -35,24 +35,20 @@ void RelocInfo::apply(intptr_t delta) {
}
}
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
return (reg_size_ == other.reg_size_) && (reg_type_ == other.reg_type_);
}
inline bool CPURegister::IsZero() const {
DCHECK(IsValid());
return IsRegister() && (reg_code_ == kZeroRegCode);
}
inline bool CPURegister::IsSP() const {
DCHECK(IsValid());
return IsRegister() && (reg_code_ == kSPRegInternalCode);
}
inline void CPURegList::Combine(const CPURegList& other) {
DCHECK(IsValid());
DCHECK(other.type() == type_);
@ -60,7 +56,6 @@ inline void CPURegList::Combine(const CPURegList& other) {
list_ |= other.list();
}
inline void CPURegList::Remove(const CPURegList& other) {
DCHECK(IsValid());
if (other.type() == type_) {
@ -68,14 +63,12 @@ inline void CPURegList::Remove(const CPURegList& other) {
}
}
inline void CPURegList::Combine(const CPURegister& other) {
DCHECK(other.type() == type_);
DCHECK(other.SizeInBits() == size_);
Combine(other.code());
}
inline void CPURegList::Remove(const CPURegister& other1,
const CPURegister& other2,
const CPURegister& other3,
@ -86,21 +79,18 @@ inline void CPURegList::Remove(const CPURegister& other1,
if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
}
inline void CPURegList::Combine(int code) {
DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ |= (1ULL << code);
}
inline void CPURegList::Remove(int code) {
DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ &= ~(1ULL << code);
}
inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return sp;
@ -110,7 +100,6 @@ inline Register Register::XRegFromCode(unsigned code) {
}
}
inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wsp;
@ -200,10 +189,9 @@ inline VRegister CPURegister::Q() const {
return VRegister::QRegFromCode(reg_code_);
}
// Immediate.
// Default initializer is for int types
template<typename T>
template <typename T>
struct ImmediateInitializer {
static const bool kIsIntType = true;
static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
@ -222,51 +210,43 @@ struct ImmediateInitializer<Smi> {
}
};
template<>
template <>
struct ImmediateInitializer<ExternalReference> {
static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(ExternalReference t) {
return RelocInfo::EXTERNAL_REFERENCE;
}
static inline int64_t immediate_for(ExternalReference t) {;
static inline int64_t immediate_for(ExternalReference t) {
return static_cast<int64_t>(t.address());
}
};
template<typename T>
template <typename T>
Immediate::Immediate(Handle<T> value) {
InitializeHandle(value);
}
template<typename T>
template <typename T>
Immediate::Immediate(T t)
: value_(ImmediateInitializer<T>::immediate_for(t)),
rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
template<typename T>
template <typename T>
Immediate::Immediate(T t, RelocInfo::Mode rmode)
: value_(ImmediateInitializer<T>::immediate_for(t)),
rmode_(rmode) {
: value_(ImmediateInitializer<T>::immediate_for(t)), rmode_(rmode) {
STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
}
// Operand.
template<typename T>
template <typename T>
Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
template<typename T>
template <typename T>
Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
template<typename T>
template <typename T>
Operand::Operand(T t, RelocInfo::Mode rmode)
: immediate_(t, rmode),
reg_(NoReg) {}
: immediate_(t, rmode), reg_(NoReg) {}
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
: immediate_(0),
@ -279,7 +259,6 @@ Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
DCHECK_IMPLIES(reg.IsSP(), shift_amount == 0);
}
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
: immediate_(0),
reg_(reg),
@ -311,17 +290,14 @@ bool Operand::IsImmediate() const {
return reg_.Is(NoReg) && !IsHeapObjectRequest();
}
bool Operand::IsShiftedRegister() const {
return reg_.IsValid() && (shift_ != NO_SHIFT);
}
bool Operand::IsExtendedRegister() const {
return reg_.IsValid() && (extend_ != NO_EXTEND);
}
bool Operand::IsZero() const {
if (IsImmediate()) {
return ImmediateValue() == 0;
@ -330,7 +306,6 @@ bool Operand::IsZero() const {
}
}
Operand Operand::ToExtendedRegister() const {
DCHECK(IsShiftedRegister());
DCHECK((shift_ == LSL) && (shift_amount_ <= 4));
@ -350,7 +325,6 @@ Immediate Operand::immediate() const {
return immediate_;
}
int64_t Operand::ImmediateValue() const {
DCHECK(IsImmediate());
return immediate_.value();
@ -366,43 +340,50 @@ Register Operand::reg() const {
return reg_;
}
Shift Operand::shift() const {
DCHECK(IsShiftedRegister());
return shift_;
}
Extend Operand::extend() const {
DCHECK(IsExtendedRegister());
return extend_;
}
unsigned Operand::shift_amount() const {
DCHECK(IsShiftedRegister() || IsExtendedRegister());
return shift_amount_;
}
MemOperand::MemOperand()
: base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset),
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
}
: base_(NoReg),
regoffset_(NoReg),
offset_(0),
addrmode_(Offset),
shift_(NO_SHIFT),
extend_(NO_EXTEND),
shift_amount_(0) {}
MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
: base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
: base_(base),
regoffset_(NoReg),
offset_(offset),
addrmode_(addrmode),
shift_(NO_SHIFT),
extend_(NO_EXTEND),
shift_amount_(0) {
DCHECK(base.Is64Bits() && !base.IsZero());
}
MemOperand::MemOperand(Register base,
Register regoffset,
Extend extend,
MemOperand::MemOperand(Register base, Register regoffset, Extend extend,
unsigned shift_amount)
: base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
: base_(base),
regoffset_(regoffset),
offset_(0),
addrmode_(Offset),
shift_(NO_SHIFT),
extend_(extend),
shift_amount_(shift_amount) {
DCHECK(base.Is64Bits() && !base.IsZero());
DCHECK(!regoffset.IsSP());
DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
@ -411,13 +392,15 @@ MemOperand::MemOperand(Register base,
DCHECK(regoffset.Is64Bits() || (extend != SXTX));
}
MemOperand::MemOperand(Register base,
Register regoffset,
Shift shift,
MemOperand::MemOperand(Register base, Register regoffset, Shift shift,
unsigned shift_amount)
: base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
: base_(base),
regoffset_(regoffset),
offset_(0),
addrmode_(Offset),
shift_(shift),
extend_(NO_EXTEND),
shift_amount_(shift_amount) {
DCHECK(base.Is64Bits() && !base.IsZero());
DCHECK(regoffset.Is64Bits() && !regoffset.IsSP());
DCHECK(shift == LSL);
@ -464,20 +447,13 @@ bool MemOperand::IsImmediateOffset() const {
return (addrmode_ == Offset) && regoffset_.Is(NoReg);
}
bool MemOperand::IsRegisterOffset() const {
return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
}
bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
bool MemOperand::IsPreIndex() const {
return addrmode_ == PreIndex;
}
bool MemOperand::IsPostIndex() const {
return addrmode_ == PostIndex;
}
bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
Operand MemOperand::OffsetAsOperand() const {
if (IsImmediateOffset()) {
@ -492,7 +468,6 @@ Operand MemOperand::OffsetAsOperand() const {
}
}
void Assembler::Unreachable() {
#ifdef USE_SIMULATOR
debug("UNREACHABLE", __LINE__, BREAK);
@ -502,14 +477,12 @@ void Assembler::Unreachable() {
#endif
}
Address Assembler::target_pointer_address_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
DCHECK(instr->IsLdrLiteralX());
return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
}
// Read/Modify the code target address in the branch/call instruction at pc.
Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
@ -621,7 +594,6 @@ int RelocInfo::target_address_size() {
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@ -651,7 +623,6 @@ Address RelocInfo::target_address_address() {
}
}
Address RelocInfo::constant_pool_entry_address() {
DCHECK(IsInConstantPool());
return Assembler::target_pointer_address_at(pc_);
@ -705,7 +676,6 @@ Address RelocInfo::target_internal_reference() {
return Memory<Address>(pc_);
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
@ -763,7 +733,6 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
}
}
LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
DCHECK(rt.IsValid());
if (rt.IsRegister()) {
@ -813,7 +782,6 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
}
}
LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
@ -823,7 +791,6 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
}
}
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
DCHECK_EQ(kStartOfLabelLinkChain, 0);
int offset = LinkAndGetByteOffsetTo(label);
@ -831,7 +798,6 @@ int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
return offset >> kInstrSizeLog2;
}
Instr Assembler::Flags(FlagsUpdate S) {
if (S == SetFlags) {
return 1 << FlagsUpdate_offset;
@ -841,11 +807,7 @@ Instr Assembler::Flags(FlagsUpdate S) {
UNREACHABLE();
}
Instr Assembler::Cond(Condition cond) {
return cond << Condition_offset;
}
Instr Assembler::Cond(Condition cond) { return cond << Condition_offset; }
Instr Assembler::ImmPCRelAddress(int imm21) {
CHECK(is_int21(imm21));
@ -855,31 +817,26 @@ Instr Assembler::ImmPCRelAddress(int imm21) {
return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
}
Instr Assembler::ImmUncondBranch(int imm26) {
CHECK(is_int26(imm26));
return truncate_to_int26(imm26) << ImmUncondBranch_offset;
}
Instr Assembler::ImmCondBranch(int imm19) {
CHECK(is_int19(imm19));
return truncate_to_int19(imm19) << ImmCondBranch_offset;
}
Instr Assembler::ImmCmpBranch(int imm19) {
CHECK(is_int19(imm19));
return truncate_to_int19(imm19) << ImmCmpBranch_offset;
}
Instr Assembler::ImmTestBranch(int imm14) {
CHECK(is_int14(imm14));
return truncate_to_int14(imm14) << ImmTestBranch_offset;
}
Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
DCHECK(is_uint6(bit_pos));
// Subtract five from the shift offset, as we need bit 5 from bit_pos.
@ -890,12 +847,10 @@ Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
return b5 | b40;
}
Instr Assembler::SF(Register rd) {
return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
}
Instr Assembler::ImmAddSub(int imm) {
DCHECK(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required.
@ -906,7 +861,6 @@ Instr Assembler::ImmAddSub(int imm) {
return imm;
}
Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
((reg_size == kWRegSizeInBits) && is_uint5(imms)));
@ -914,7 +868,6 @@ Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
return imms << ImmS_offset;
}
Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
((reg_size == kWRegSizeInBits) && is_uint5(immr)));
@ -923,7 +876,6 @@ Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
return immr << ImmR_offset;
}
Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
DCHECK(is_uint6(imms));
@ -932,7 +884,6 @@ Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
return imms << ImmSetBits_offset;
}
Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
@ -941,13 +892,11 @@ Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
return immr << ImmRotate_offset;
}
Instr Assembler::ImmLLiteral(int imm19) {
CHECK(is_int19(imm19));
return truncate_to_int19(imm19) << ImmLLiteral_offset;
}
Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
@ -955,47 +904,39 @@ Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
return bitn << BitN_offset;
}
Instr Assembler::ShiftDP(Shift shift) {
DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
return shift << ShiftDP_offset;
}
Instr Assembler::ImmDPShift(unsigned amount) {
DCHECK(is_uint6(amount));
return amount << ImmDPShift_offset;
}
Instr Assembler::ExtendMode(Extend extend) {
return extend << ExtendMode_offset;
}
Instr Assembler::ImmExtendShift(unsigned left_shift) {
DCHECK_LE(left_shift, 4);
return left_shift << ImmExtendShift_offset;
}
Instr Assembler::ImmCondCmp(unsigned imm) {
DCHECK(is_uint5(imm));
return imm << ImmCondCmp_offset;
}
Instr Assembler::Nzcv(StatusFlags nzcv) {
return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
}
Instr Assembler::ImmLSUnsigned(int imm12) {
DCHECK(is_uint12(imm12));
return imm12 << ImmLSUnsigned_offset;
}
Instr Assembler::ImmLS(int imm9) {
DCHECK(is_int9(imm9));
return truncate_to_int9(imm9) << ImmLS_offset;
@ -1008,37 +949,31 @@ Instr Assembler::ImmLSPair(int imm7, unsigned size) {
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
}
Instr Assembler::ImmShiftLS(unsigned shift_amount) {
DCHECK(is_uint1(shift_amount));
return shift_amount << ImmShiftLS_offset;
}
Instr Assembler::ImmException(int imm16) {
DCHECK(is_uint16(imm16));
return imm16 << ImmException_offset;
}
Instr Assembler::ImmSystemRegister(int imm15) {
DCHECK(is_uint15(imm15));
return imm15 << ImmSystemRegister_offset;
}
Instr Assembler::ImmHint(int imm7) {
DCHECK(is_uint7(imm7));
return imm7 << ImmHint_offset;
}
Instr Assembler::ImmBarrierDomain(int imm2) {
DCHECK(is_uint2(imm2));
return imm2 << ImmBarrierDomain_offset;
}
Instr Assembler::ImmBarrierType(int imm2) {
DCHECK(is_uint2(imm2));
return imm2 << ImmBarrierType_offset;
@ -1057,13 +992,11 @@ unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
return size;
}
Instr Assembler::ImmMoveWide(int imm) {
DCHECK(is_uint16(imm));
return imm << ImmMoveWide_offset;
}
Instr Assembler::ShiftMoveWide(int shift) {
DCHECK(is_uint2(shift));
return shift << ShiftMoveWide_offset;
@ -1076,12 +1009,10 @@ Instr Assembler::FPScale(unsigned scale) {
return scale << FPScale_offset;
}
const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
return reg.Is64Bits() ? xzr : wzr;
}
inline void Assembler::CheckBufferSpace() {
DCHECK_LT(pc_, buffer_start_ + buffer_->size());
if (buffer_space() < kGap) {
@ -1089,7 +1020,6 @@ inline void Assembler::CheckBufferSpace() {
}
}
inline void Assembler::CheckBuffer() {
CheckBufferSpace();
if (pc_offset() >= next_veneer_pool_check_) {
@ -1103,4 +1033,4 @@ inline void Assembler::CheckBuffer() {
} // namespace internal
} // namespace v8
#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
#endif // V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_

View File

@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
#define V8_ARM64_ASSEMBLER_ARM64_H_
#ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_H_
#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_H_
#include <deque>
#include <list>
#include <map>
#include <vector>
#include "src/arm64/constants-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/arm64/register-arm64.h"
#include "src/base/optional.h"
#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/instructions-arm64.h"
#include "src/codegen/arm64/register-arm64.h"
#include "src/codegen/assembler.h"
#include "src/codegen/constant-pool.h"
#include "src/common/globals.h"
@ -34,15 +34,15 @@ class SafepointTableBuilder;
// Immediates.
class Immediate {
public:
template<typename T>
template <typename T>
inline explicit Immediate(Handle<T> handle);
// This is allowed to be an implicit constructor because Immediate is
// a wrapper class that doesn't normally perform any type conversion.
template<typename T>
template <typename T>
inline Immediate(T value); // NOLINT(runtime/explicit)
template<typename T>
template <typename T>
inline Immediate(T value, RelocInfo::Mode rmode);
int64_t value() const { return value_; }
@ -55,7 +55,6 @@ class Immediate {
RelocInfo::Mode rmode_;
};
// -----------------------------------------------------------------------------
// Operands.
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
@ -71,16 +70,13 @@ class Operand {
// <shift_amount> is uint6_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
inline Operand(Register reg,
Shift shift = LSL,
inline Operand(Register reg, Shift shift = LSL,
unsigned shift_amount = 0); // NOLINT(runtime/explicit)
// rm, <extend> {#<shift_amount>}
// where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
// <shift_amount> is uint2_t.
inline Operand(Register reg,
Extend extend,
unsigned shift_amount = 0);
inline Operand(Register reg, Extend extend, unsigned shift_amount = 0);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedStringConstant(const StringConstantBase* str);
@ -89,15 +85,15 @@ class Operand {
inline HeapObjectRequest heap_object_request() const;
inline Immediate immediate_for_heap_object_request() const;
template<typename T>
template <typename T>
inline explicit Operand(Handle<T> handle);
// Implicit constructor for all int types, ExternalReference, and Smi.
template<typename T>
template <typename T>
inline Operand(T t); // NOLINT(runtime/explicit)
// Implicit constructor for int types.
template<typename T>
template <typename T>
inline Operand(T t, RelocInfo::Mode rmode);
inline bool IsImmediate() const;
@ -129,24 +125,17 @@ class Operand {
unsigned shift_amount_;
};
// MemOperand represents a memory operand in a load or store instruction.
class MemOperand {
public:
inline MemOperand();
inline explicit MemOperand(Register base,
int64_t offset = 0,
inline explicit MemOperand(Register base, int64_t offset = 0,
AddrMode addrmode = Offset);
inline explicit MemOperand(Register base,
Register regoffset,
Shift shift = LSL,
inline explicit MemOperand(Register base, Register regoffset,
Shift shift = LSL, unsigned shift_amount = 0);
inline explicit MemOperand(Register base, Register regoffset, Extend extend,
unsigned shift_amount = 0);
inline explicit MemOperand(Register base,
Register regoffset,
Extend extend,
unsigned shift_amount = 0);
inline explicit MemOperand(Register base,
const Operand& offset,
inline explicit MemOperand(Register base, const Operand& offset,
AddrMode addrmode = Offset);
const Register& base() const { return base_; }
@ -166,9 +155,9 @@ class MemOperand {
inline Operand OffsetAsOperand() const;
enum PairResult {
kNotPair, // Can't use a pair instruction.
kPairAB, // Can use a pair instruction (operandA has lower address).
kPairBA // Can use a pair instruction (operandB has lower address).
kNotPair, // Can't use a pair instruction.
kPairAB, // Can use a pair instruction (operandA has lower address).
kPairBA // Can use a pair instruction (operandB has lower address).
};
// Check if two MemOperand are consistent for stp/ldp use.
static PairResult AreConsistentForPair(const MemOperand& operandA,
@ -185,7 +174,6 @@ class MemOperand {
unsigned shift_amount_;
};
class ConstPool {
public:
explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
@ -240,7 +228,6 @@ class ConstPool {
std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
};
// -----------------------------------------------------------------------------
// Assembler.
@ -298,7 +285,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// before they go out of scope.
void bind(Label* label);
// RelocInfo and pools ------------------------------------------------------
// Record relocation information for current pc_.
@ -502,90 +488,62 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Data Processing instructions.
// Add.
void add(const Register& rd,
const Register& rn,
const Operand& operand);
void add(const Register& rd, const Register& rn, const Operand& operand);
// Add and update status flags.
void adds(const Register& rd,
const Register& rn,
const Operand& operand);
void adds(const Register& rd, const Register& rn, const Operand& operand);
// Compare negative.
void cmn(const Register& rn, const Operand& operand);
// Subtract.
void sub(const Register& rd,
const Register& rn,
const Operand& operand);
void sub(const Register& rd, const Register& rn, const Operand& operand);
// Subtract and update status flags.
void subs(const Register& rd,
const Register& rn,
const Operand& operand);
void subs(const Register& rd, const Register& rn, const Operand& operand);
// Compare.
void cmp(const Register& rn, const Operand& operand);
// Negate.
void neg(const Register& rd,
const Operand& operand);
void neg(const Register& rd, const Operand& operand);
// Negate and update status flags.
void negs(const Register& rd,
const Operand& operand);
void negs(const Register& rd, const Operand& operand);
// Add with carry bit.
void adc(const Register& rd,
const Register& rn,
const Operand& operand);
void adc(const Register& rd, const Register& rn, const Operand& operand);
// Add with carry bit and update status flags.
void adcs(const Register& rd,
const Register& rn,
const Operand& operand);
void adcs(const Register& rd, const Register& rn, const Operand& operand);
// Subtract with carry bit.
void sbc(const Register& rd,
const Register& rn,
const Operand& operand);
void sbc(const Register& rd, const Register& rn, const Operand& operand);
// Subtract with carry bit and update status flags.
void sbcs(const Register& rd,
const Register& rn,
const Operand& operand);
void sbcs(const Register& rd, const Register& rn, const Operand& operand);
// Negate with carry bit.
void ngc(const Register& rd,
const Operand& operand);
void ngc(const Register& rd, const Operand& operand);
// Negate with carry bit and update status flags.
void ngcs(const Register& rd,
const Operand& operand);
void ngcs(const Register& rd, const Operand& operand);
// Logical instructions.
// Bitwise and (A & B).
void and_(const Register& rd,
const Register& rn,
const Operand& operand);
void and_(const Register& rd, const Register& rn, const Operand& operand);
// Bitwise and (A & B) and update status flags.
void ands(const Register& rd,
const Register& rn,
const Operand& operand);
void ands(const Register& rd, const Register& rn, const Operand& operand);
// Bit test, and set flags.
void tst(const Register& rn, const Operand& operand);
// Bit clear (A & ~B).
void bic(const Register& rd,
const Register& rn,
const Operand& operand);
void bic(const Register& rd, const Register& rn, const Operand& operand);
// Bit clear (A & ~B) and update status flags.
void bics(const Register& rd,
const Register& rn,
const Operand& operand);
void bics(const Register& rd, const Register& rn, const Operand& operand);
// Bitwise and.
void and_(const VRegister& vd, const VRegister& vn, const VRegister& vm);
@ -750,19 +708,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Signed extend byte.
void sxtb(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 7);
}
void sxtb(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 7); }
// Signed extend halfword.
void sxth(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 15);
}
void sxth(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 15); }
// Signed extend word.
void sxtw(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 31);
}
void sxtw(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 31); }
// Ubfm aliases.
// Logical shift left.
@ -793,46 +745,32 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Unsigned extend byte.
void uxtb(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 7);
}
void uxtb(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 7); }
// Unsigned extend halfword.
void uxth(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 15);
}
void uxth(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 15); }
// Unsigned extend word.
void uxtw(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 31);
}
void uxtw(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 31); }
// Extract.
void extr(const Register& rd, const Register& rn, const Register& rm,
int lsb);
// Conditional select: rd = cond ? rn : rm.
void csel(const Register& rd,
const Register& rn,
const Register& rm,
void csel(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
// Conditional select increment: rd = cond ? rn : rm + 1.
void csinc(const Register& rd,
const Register& rn,
const Register& rm,
void csinc(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
// Conditional select inversion: rd = cond ? rn : ~rm.
void csinv(const Register& rd,
const Register& rn,
const Register& rm,
void csinv(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
// Conditional select negation: rd = cond ? rn : -rm.
void csneg(const Register& rd,
const Register& rn,
const Register& rm,
void csneg(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
// Conditional set: rd = cond ? 1 : 0.
@ -857,15 +795,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Conditional comparison.
// Conditional compare negative.
void ccmn(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
void ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
// Conditional compare.
void ccmp(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
void ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
// Multiplication.
@ -873,18 +807,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mul(const Register& rd, const Register& rn, const Register& rm);
// 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
void madd(const Register& rd,
const Register& rn,
const Register& rm,
void madd(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
void mneg(const Register& rd, const Register& rn, const Register& rm);
// 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
void msub(const Register& rd,
const Register& rn,
const Register& rm,
void msub(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// 32 x 32 -> 64-bit multiply.
@ -894,27 +824,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void smulh(const Register& rd, const Register& rn, const Register& rm);
// Signed 32 x 32 -> 64-bit multiply and accumulate.
void smaddl(const Register& rd,
const Register& rn,
const Register& rm,
void smaddl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// Unsigned 32 x 32 -> 64-bit multiply and accumulate.
void umaddl(const Register& rd,
const Register& rn,
const Register& rm,
void umaddl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// Signed 32 x 32 -> 64-bit multiply and subtract.
void smsubl(const Register& rd,
const Register& rn,
const Register& rm,
void smsubl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// Unsigned 32 x 32 -> 64-bit multiply and subtract.
void umsubl(const Register& rd,
const Register& rn,
const Register& rm,
void umsubl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// Signed integer divide.
@ -2295,11 +2217,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline static Instr Nzcv(StatusFlags nzcv);
static bool IsImmAddSub(int64_t immediate);
static bool IsImmLogical(uint64_t value,
unsigned width,
unsigned* n,
unsigned* imm_s,
unsigned* imm_r);
static bool IsImmLogical(uint64_t value, unsigned width, unsigned* n,
unsigned* imm_s, unsigned* imm_r);
// MemOperand offset encoding.
inline static Instr ImmLSUnsigned(int imm12);
@ -2492,9 +2411,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockConstPool();
}
~BlockConstPoolScope() {
assem_->EndBlockConstPool();
}
~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
private:
Assembler* assem_;
@ -2538,9 +2455,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockPools();
}
~BlockPoolsScope() {
assem_->EndBlockPools();
}
~BlockPoolsScope() { assem_->EndBlockPools(); }
private:
Assembler* assem_;
@ -2551,9 +2466,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
void LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op);
void LoadStore(const CPURegister& rt, const MemOperand& addr, LoadStoreOp op);
void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
void LoadStoreStruct(const VRegister& vt, const MemOperand& addr,
@ -2571,46 +2484,29 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsImmLSPair(int64_t offset, unsigned size);
void Logical(const Register& rd,
const Register& rn,
const Operand& operand,
void Logical(const Register& rd, const Register& rn, const Operand& operand,
LogicalOp op);
void LogicalImmediate(const Register& rd,
const Register& rn,
unsigned n,
unsigned imm_s,
unsigned imm_r,
LogicalOp op);
void LogicalImmediate(const Register& rd, const Register& rn, unsigned n,
unsigned imm_s, unsigned imm_r, LogicalOp op);
void ConditionalCompare(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond,
void ConditionalCompare(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond,
ConditionalCompareOp op);
static bool IsImmConditionalCompare(int64_t immediate);
void AddSubWithCarry(const Register& rd,
const Register& rn,
const Operand& operand,
FlagsUpdate S,
void AddSubWithCarry(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op);
// Functions for emulating operands not directly supported by the instruction
// set.
void EmitShift(const Register& rd,
const Register& rn,
Shift shift,
void EmitShift(const Register& rd, const Register& rn, Shift shift,
unsigned amount);
void EmitExtendShift(const Register& rd,
const Register& rn,
Extend extend,
void EmitExtendShift(const Register& rd, const Register& rn, Extend extend,
unsigned left_shift);
void AddSub(const Register& rd,
const Register& rn,
const Operand& operand,
FlagsUpdate S,
AddSubOp op);
void AddSub(const Register& rd, const Register& rn, const Operand& operand,
FlagsUpdate S, AddSubOp op);
static bool IsImmFP32(float imm);
static bool IsImmFP64(double imm);
@ -2636,32 +2532,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static uint32_t FPToImm8(double imm);
// Instruction helpers.
void MoveWide(const Register& rd,
uint64_t imm,
int shift,
void MoveWide(const Register& rd, uint64_t imm, int shift,
MoveWideImmediateOp mov_op);
void DataProcShiftedRegister(const Register& rd,
const Register& rn,
const Operand& operand,
FlagsUpdate S,
Instr op);
void DataProcExtendedRegister(const Register& rd,
const Register& rn,
const Operand& operand,
FlagsUpdate S,
void DataProcShiftedRegister(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S, Instr op);
void DataProcExtendedRegister(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S,
Instr op);
void ConditionalSelect(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond,
void ConditionalSelect(const Register& rd, const Register& rn,
const Register& rm, Condition cond,
ConditionalSelectOp op);
void DataProcessing1Source(const Register& rd,
const Register& rn,
void DataProcessing1Source(const Register& rd, const Register& rn,
DataProcessing1SourceOp op);
void DataProcessing3Source(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra,
void DataProcessing3Source(const Register& rd, const Register& rn,
const Register& rm, const Register& ra,
DataProcessing3SourceOp op);
void FPDataProcessing1Source(const VRegister& fd, const VRegister& fn,
FPDataProcessing1SourceOp op);
@ -2736,7 +2620,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kStartOfLabelLinkChain = 0;
// Verify that a label's link chain is intact.
void CheckLabelLinkChain(Label const * label);
void CheckLabelLinkChain(Label const* label);
// Postpone the generation of the constant pool for the specified number of
// instructions.
@ -2759,7 +2643,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Emit data inline in the instruction stream.
void EmitData(void const * data, unsigned size) {
void EmitData(void const* data, unsigned size) {
DCHECK_EQ(sizeof(*pc_), 1);
DCHECK_LE(pc_ + size, buffer_start_ + buffer_->size());
@ -2806,7 +2690,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
int no_const_pool_before_; // Block emission before this pc offset.
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
@ -2962,12 +2846,10 @@ class PatchingAssembler : public Assembler {
class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) {
assembler->CheckBufferSpace();
}
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBufferSpace(); }
};
} // namespace internal
} // namespace v8
#endif // V8_ARM64_ASSEMBLER_ARM64_H_
#endif // V8_CODEGEN_ARM64_ASSEMBLER_ARM64_H_

View File

@ -0,0 +1,116 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
#if V8_TARGET_ARCH_ARM64
#include "src/codegen/arm64/utils-arm64.h"
#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
class CacheLineSizes {
public:
CacheLineSizes() {
#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN)
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
__asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
: [ctr] "=r"(cache_type_register_));
#endif
}
uint32_t icache_line_size() const { return ExtractCacheLineSize(0); }
uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); }
private:
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
// The cache type register holds the size of cache lines in words as a
// power of two.
return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xF);
}
uint32_t cache_type_register_;
};
void CpuFeatures::FlushICache(void* address, size_t length) {
#if defined(V8_HOST_ARCH_ARM64)
#if defined(V8_OS_WIN)
::FlushInstructionCache(GetCurrentProcess(), address, length);
#else
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
// side of the CPU.
uintptr_t start = reinterpret_cast<uintptr_t>(address);
// Sizes will be used to generate a mask big enough to cover a pointer.
CacheLineSizes sizes;
uintptr_t dsize = sizes.dcache_line_size();
uintptr_t isize = sizes.icache_line_size();
// Cache line sizes are always a power of 2.
DCHECK_EQ(CountSetBits(dsize, 64), 1);
DCHECK_EQ(CountSetBits(isize, 64), 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
__asm__ __volatile__( // NOLINT
// Clean every line of the D cache containing the
// target data.
"0: \n\t"
// dc : Data Cache maintenance
// c : Clean
// i : Invalidate
// va : by (Virtual) Address
// c : to the point of Coherency
// See ARM DDI 0406B page B2-12 for more information.
// We would prefer to use "cvau" (clean to the point of unification) here
// but we use "civac" to work around Cortex-A53 errata 819472, 826319,
// 827319 and 824069.
"dc civac, %[dline] \n\t"
"add %[dline], %[dline], %[dsize] \n\t"
"cmp %[dline], %[end] \n\t"
"b.lt 0b \n\t"
// Barrier to make sure the effect of the code above is visible to the
// rest of the world. dsb : Data Synchronisation Barrier
// ish : Inner SHareable domain
// The point of unification for an Inner Shareable shareability domain is
// the point by which the instruction and data caches of all the
// processors in that Inner Shareable shareability domain are guaranteed
// to see the same copy of a memory location. See ARM DDI 0406B page
// B2-12 for more information.
"dsb ish \n\t"
// Invalidate every line of the I cache containing the target data.
"1: \n\t"
// ic : instruction cache maintenance
// i : invalidate
// va : by address
// u : to the point of unification
"ic ivau, %[iline] \n\t"
"add %[iline], %[iline], %[isize] \n\t"
"cmp %[iline], %[end] \n\t"
"b.lt 1b \n\t"
// Barrier to make sure the effect of the code above is visible to the
// rest of the world.
"dsb ish \n\t"
// Barrier to ensure any prefetching which happened before this code is
// discarded.
// isb : Instruction Synchronisation Barrier
"isb \n\t"
: [dline] "+r"(dstart), [iline] "+r"(istart)
: [dsize] "r"(dsize), [isize] "r"(isize), [end] "r"(end)
// This code does not write to memory but without the dependency gcc might
// move this code before the code is generated.
: "cc", "memory"); // NOLINT
#endif // V8_OS_WIN
#endif // V8_HOST_ARCH_ARM64
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -2,29 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_DECODER_ARM64_INL_H_
#define V8_ARM64_DECODER_ARM64_INL_H_
#ifndef V8_CODEGEN_ARM64_DECODER_ARM64_INL_H_
#define V8_CODEGEN_ARM64_DECODER_ARM64_INL_H_
#include "src/arm64/decoder-arm64.h"
#include "src/codegen/arm64/decoder-arm64.h"
#include "src/common/globals.h"
#include "src/utils/utils.h"
namespace v8 {
namespace internal {
// Top-level instruction decode function.
template<typename V>
void Decoder<V>::Decode(Instruction *instr) {
template <typename V>
void Decoder<V>::Decode(Instruction* instr) {
if (instr->Bits(28, 27) == 0) {
V::VisitUnallocated(instr);
} else {
switch (instr->Bits(27, 24)) {
// 0: PC relative addressing.
case 0x0: DecodePCRelAddressing(instr); break;
case 0x0:
DecodePCRelAddressing(instr);
break;
// 1: Add/sub immediate.
case 0x1: DecodeAddSubImmediate(instr); break;
case 0x1:
DecodeAddSubImmediate(instr);
break;
// A: Logical shifted register.
// Add/sub with carry.
@ -37,15 +40,21 @@ void Decoder<V>::Decode(Instruction *instr) {
// Add/sub extended register.
// Data processing 3 source.
case 0xA:
case 0xB: DecodeDataProcessing(instr); break;
case 0xB:
DecodeDataProcessing(instr);
break;
// 2: Logical immediate.
// Move wide immediate.
case 0x2: DecodeLogical(instr); break;
case 0x2:
DecodeLogical(instr);
break;
// 3: Bitfield.
// Extract.
case 0x3: DecodeBitfieldExtract(instr); break;
case 0x3:
DecodeBitfieldExtract(instr);
break;
// 4: Unconditional branch immediate.
// Exception generation.
@ -58,7 +67,9 @@ void Decoder<V>::Decode(Instruction *instr) {
case 0x4:
case 0x5:
case 0x6:
case 0x7: DecodeBranchSystemException(instr); break;
case 0x7:
DecodeBranchSystemException(instr);
break;
// 8,9: Load/store register pair post-index.
// Load register literal.
@ -73,7 +84,9 @@ void Decoder<V>::Decode(Instruction *instr) {
case 0x8:
case 0x9:
case 0xC:
case 0xD: DecodeLoadStore(instr); break;
case 0xD:
DecodeLoadStore(instr);
break;
// E: FP fixed point conversion.
// FP integer conversion.
@ -87,13 +100,14 @@ void Decoder<V>::Decode(Instruction *instr) {
// F: FP data processing 3 source.
// Advanced SIMD.
case 0xE:
case 0xF: DecodeFP(instr); break;
case 0xF:
DecodeFP(instr);
break;
}
}
}
template<typename V>
template <typename V>
void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
DCHECK_EQ(0x0, instr->Bits(27, 24));
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
@ -102,8 +116,7 @@ void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
V::VisitPCRelAddressing(instr);
}
template<typename V>
template <typename V>
void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
DCHECK_EQ(0x4, instr->Bits(27, 24) & 0xC); // 0x4, 0x5, 0x6, 0x7
@ -175,12 +188,9 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
}
}
} else {
if ((instr->Bit(24) == 0x1) ||
(instr->Bits(20, 16) != 0x1F) ||
(instr->Bits(15, 10) != 0) ||
(instr->Bits(4, 0) != 0) ||
(instr->Bits(24, 21) == 0x3) ||
(instr->Bits(24, 22) == 0x3)) {
if ((instr->Bit(24) == 0x1) || (instr->Bits(20, 16) != 0x1F) ||
(instr->Bits(15, 10) != 0) || (instr->Bits(4, 0) != 0) ||
(instr->Bits(24, 21) == 0x3) || (instr->Bits(24, 22) == 0x3)) {
V::VisitUnallocated(instr);
} else {
V::VisitUnconditionalBranchToRegister(instr);
@ -196,8 +206,7 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
}
}
template<typename V>
template <typename V>
void Decoder<V>::DecodeLoadStore(Instruction* instr) {
DCHECK_EQ(0x8, instr->Bits(27, 24) & 0xA); // 0x8, 0x9, 0xC, 0xD
@ -324,8 +333,7 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
}
}
template<typename V>
template <typename V>
void Decoder<V>::DecodeLogical(Instruction* instr) {
DCHECK_EQ(0x2, instr->Bits(27, 24));
@ -344,8 +352,7 @@ void Decoder<V>::DecodeLogical(Instruction* instr) {
}
}
template<typename V>
template <typename V>
void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
DCHECK_EQ(0x3, instr->Bits(27, 24));
@ -370,8 +377,7 @@ void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
}
}
template<typename V>
template <typename V>
void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
DCHECK_EQ(0x1, instr->Bits(27, 24));
if (instr->Bit(23) == 1) {
@ -381,11 +387,9 @@ void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
}
}
template<typename V>
template <typename V>
void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
DCHECK((instr->Bits(27, 24) == 0xA) ||
(instr->Bits(27, 24) == 0xB) );
DCHECK((instr->Bits(27, 24) == 0xA) || (instr->Bits(27, 24) == 0xB));
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
@ -405,8 +409,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
break;
}
case 2: {
if ((instr->Bit(29) == 0) ||
(instr->Mask(0x00000410) != 0)) {
if ((instr->Bit(29) == 0) || (instr->Mask(0x00000410) != 0)) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(11) == 0) {
@ -430,8 +433,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(30) == 0) {
if ((instr->Bit(15) == 0x1) ||
(instr->Bits(15, 11) == 0) ||
if ((instr->Bit(15) == 0x1) || (instr->Bits(15, 11) == 0) ||
(instr->Bits(15, 12) == 0x1) ||
(instr->Bits(15, 12) == 0x3) ||
(instr->Bits(15, 13) == 0x3) ||
@ -443,8 +445,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
V::VisitDataProcessing2Source(instr);
}
} else {
if ((instr->Bit(13) == 1) ||
(instr->Bits(20, 16) != 0) ||
if ((instr->Bit(13) == 1) || (instr->Bits(20, 16) != 0) ||
(instr->Bits(15, 14) != 0) ||
(instr->Mask(0xA01FFC00) == 0x00000C00) ||
(instr->Mask(0x201FF800) == 0x00001800)) {
@ -460,12 +461,14 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
case 1:
case 3:
case 5:
case 7: V::VisitUnallocated(instr); break;
case 7:
V::VisitUnallocated(instr);
break;
}
}
} else {
if (instr->Bit(28) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bits(23, 22) == 0x3) ||
(instr->Mask(0x80008000) == 0x00008000)) {
V::VisitUnallocated(instr);
@ -482,8 +485,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
}
}
} else {
if ((instr->Bit(30) == 0x1) ||
(instr->Bits(30, 29) == 0x1) ||
if ((instr->Bit(30) == 0x1) || (instr->Bits(30, 29) == 0x1) ||
(instr->Mask(0xE0600000) == 0x00200000) ||
(instr->Mask(0xE0608000) == 0x00400000) ||
(instr->Mask(0x60608000) == 0x00408000) ||
@ -498,11 +500,9 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
}
}
template<typename V>
template <typename V>
void Decoder<V>::DecodeFP(Instruction* instr) {
DCHECK((instr->Bits(27, 24) == 0xE) ||
(instr->Bits(27, 24) == 0xF) );
DCHECK((instr->Bits(27, 24) == 0xE) || (instr->Bits(27, 24) == 0xF));
if (instr->Bit(28) == 0) {
DecodeNEONVectorDataProcessing(instr);
@ -515,8 +515,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
if (instr->Bit(29) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) ||
(instr->Bit(18) == 1) ||
if ((instr->Bit(23) == 1) || (instr->Bit(18) == 1) ||
(instr->Mask(0x80008000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
@ -568,8 +567,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPDataProcessing1Source(instr);
}
} else if (instr->Bits(13, 10) == 8) {
if ((instr->Bits(15, 14) != 0) ||
(instr->Bits(2, 0) != 0) ||
if ((instr->Bits(15, 14) != 0) || (instr->Bits(2, 0) != 0) ||
(instr->Mask(0x80800000) != 0x00000000)) {
V::VisitUnallocated(instr);
} else {
@ -605,7 +603,8 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPConditionalSelect(instr);
break;
}
default: UNREACHABLE();
default:
UNREACHABLE();
}
}
}
@ -805,8 +804,7 @@ void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) {
}
}
} // namespace internal
} // namespace v8
#endif // V8_ARM64_DECODER_ARM64_INL_H_
#endif // V8_CODEGEN_ARM64_DECODER_ARM64_INL_H_

View File

@ -4,26 +4,23 @@
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/decoder-arm64.h"
#include "src/codegen/arm64/decoder-arm64.h"
#include "src/common/globals.h"
#include "src/utils/utils.h"
namespace v8 {
namespace internal {
void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_back(new_visitor);
}
void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_front(new_visitor);
}
void DispatchingDecoderVisitor::InsertVisitorBefore(
DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
@ -40,7 +37,6 @@ void DispatchingDecoderVisitor::InsertVisitorBefore(
visitors_.insert(it, new_visitor);
}
void DispatchingDecoderVisitor::InsertVisitorAfter(
DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
@ -58,12 +54,10 @@ void DispatchingDecoderVisitor::InsertVisitorAfter(
visitors_.push_back(new_visitor);
}
void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
visitors_.remove(visitor);
}
#define DEFINE_VISITOR_CALLERS(A) \
void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
if (!(instr->Mask(A##FMask) == A##Fixed)) { \
@ -77,7 +71,6 @@ void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
} // namespace internal
} // namespace v8

View File

@ -2,18 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_DECODER_ARM64_H_
#define V8_ARM64_DECODER_ARM64_H_
#ifndef V8_CODEGEN_ARM64_DECODER_ARM64_H_
#define V8_CODEGEN_ARM64_DECODER_ARM64_H_
#include <list>
#include "src/arm64/instructions-arm64.h"
#include "src/codegen/arm64/instructions-arm64.h"
#include "src/common/globals.h"
namespace v8 {
namespace internal {
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \
@ -90,9 +89,9 @@ class V8_EXPORT_PRIVATE DecoderVisitor {
public:
virtual ~DecoderVisitor() {}
#define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
#define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
#undef DECLARE
#undef DECLARE
};
// A visitor that dispatches to a list of visitors.
@ -132,16 +131,16 @@ class V8_EXPORT_PRIVATE DispatchingDecoderVisitor : public DecoderVisitor {
void VisitNEONShiftImmediate(const Instruction* instr);
#define DECLARE(A) void Visit##A(Instruction* instr);
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
#undef DECLARE
private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;
};
template<typename V>
template <typename V>
class Decoder : public V {
public:
Decoder() {}
@ -149,7 +148,7 @@ class Decoder : public V {
// Top-level instruction decoder function. Decodes an instruction and calls
// the visitor functions registered with the Decoder class.
virtual void Decode(Instruction *instr);
virtual void Decode(Instruction* instr);
private:
// Decode the PC relative addressing instruction, and call the corresponding
@ -208,8 +207,7 @@ class Decoder : public V {
void DecodeNEONScalarDataProcessing(Instruction* instr);
};
} // namespace internal
} // namespace v8
#endif // V8_ARM64_DECODER_ARM64_H_
#endif // V8_CODEGEN_ARM64_DECODER_ARM64_H_

View File

@ -4,8 +4,8 @@
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/instructions-arm64.h"
#include "src/codegen/arm64/instructions-arm64.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
@ -35,12 +35,12 @@ bool Instruction::IsLoad() const {
case LDR_d:
case LDR_q:
return true;
default: return false;
default:
return false;
}
}
}
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
@ -61,14 +61,13 @@ bool Instruction::IsStore() const {
case STR_d:
case STR_q:
return true;
default: return false;
default:
return false;
}
}
}
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
static uint64_t RotateRight(uint64_t value, unsigned int rotate,
unsigned int width) {
DCHECK_LE(width, 64);
rotate &= 63;
@ -76,9 +75,7 @@ static uint64_t RotateRight(uint64_t value,
(value >> rotate);
}
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
static uint64_t RepeatBitsAcrossReg(unsigned reg_size, uint64_t value,
unsigned width) {
DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
@ -90,7 +87,6 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
return result;
}
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
@ -134,9 +130,8 @@ uint64_t Instruction::ImmLogical() {
return 0;
}
uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
return RepeatBitsAcrossReg(
reg_size, RotateRight(bits, imm_r & mask, width), width);
}
}
}
@ -188,7 +183,6 @@ unsigned CalcLSPairDataSize(LoadStorePairOp op) {
}
}
int64_t Instruction::ImmPCOffset() {
int64_t offset;
if (IsPCRelAddressing()) {
@ -211,18 +205,15 @@ int64_t Instruction::ImmPCOffset() {
return offset;
}
Instruction* Instruction::ImmPCOffsetTarget() {
return InstructionAtOffset(ImmPCOffset());
}
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
ptrdiff_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
return IsValidImmPCOffset(BranchType(), DistanceTo(target));
}
@ -258,7 +249,6 @@ void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
}
}
void Instruction::SetBranchImmTarget(Instruction* target) {
DCHECK(IsAligned(DistanceTo(target), kInstrSize));
DCHECK(
@ -287,7 +277,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
imm_mask = ImmTestBranch_mask;
break;
}
default: UNREACHABLE();
default:
UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
@ -307,7 +298,6 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(
patcher.brk(low16);
}
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstrSize));
@ -319,7 +309,6 @@ void Instruction::SetImmLLiteral(Instruction* source) {
SetInstructionBits(Mask(~mask) | imm);
}
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-arm64-inl.h to work around this.
@ -331,7 +320,6 @@ bool InstructionSequence::IsInlineData() const {
// to update this method too.
}
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-arm64-inl.h to work around this.

View File

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_ARM64_INSTRUCTIONS_ARM64_H_
#ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
#include "src/arm64/constants-arm64.h"
#include "src/arm64/register-arm64.h"
#include "src/arm64/utils-arm64.h"
#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/register-arm64.h"
#include "src/codegen/arm64/utils-arm64.h"
#include "src/common/globals.h"
#include "src/utils/utils.h"
@ -54,17 +54,13 @@ unsigned CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
CondBranchType = 1,
UncondBranchType = 2,
CondBranchType = 1,
UncondBranchType = 2,
CompareBranchType = 3,
TestBranchType = 4
TestBranchType = 4
};
enum AddrMode {
Offset,
PreIndex,
PostIndex
};
enum AddrMode { Offset, PreIndex, PostIndex };
enum FPRounding {
// The first four values are encodable directly by FPCR<RMode>.
@ -79,10 +75,7 @@ enum FPRounding {
FPRoundOdd
};
enum Reg31Mode {
Reg31IsStackPointer,
Reg31IsZeroRegister
};
enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
// Instructions. ---------------------------------------------------------------
@ -96,9 +89,7 @@ class Instruction {
*reinterpret_cast<Instr*>(this) = new_instr;
}
int Bit(int pos) const {
return (InstructionBits() >> pos) & 1;
}
int Bit(int pos) const { return (InstructionBits() >> pos) & 1; }
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, InstructionBits());
@ -109,9 +100,7 @@ class Instruction {
return signed_bitextract_32(msb, lsb, bits);
}
Instr Mask(uint32_t mask) const {
return InstructionBits() & mask;
}
Instr Mask(uint32_t mask) const { return InstructionBits() & mask; }
V8_INLINE const Instruction* following(int count = 1) const {
return InstructionAtOffset(count * static_cast<int>(kInstrSize));
@ -125,14 +114,12 @@ class Instruction {
return following(-count);
}
V8_INLINE Instruction* preceding(int count = 1) {
return following(-count);
}
V8_INLINE Instruction* preceding(int count = 1) { return following(-count); }
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int32_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
#undef DEFINE_GETTER
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
@ -180,13 +167,9 @@ class Instruction {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
bool IsTestBranch() const {
return Mask(TestBranchFMask) == TestBranchFixed;
}
bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
bool IsImmBranch() const {
return BranchType() != UnknownBranchType;
}
bool IsImmBranch() const { return BranchType() != UnknownBranchType; }
static float Imm8ToFP32(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
@ -219,17 +202,13 @@ class Instruction {
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsLdrLiteralX() const {
return Mask(LoadLiteralMask) == LDR_x_lit;
}
bool IsLdrLiteralX() const { return Mask(LoadLiteralMask) == LDR_x_lit; }
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
bool IsAdr() const {
return Mask(PCRelAddressingMask) == ADR;
}
bool IsAdr() const { return Mask(PCRelAddressingMask) == ADR; }
bool IsBrk() const { return Mask(ExceptionMask) == BRK; }
@ -345,11 +324,16 @@ class Instruction {
int ImmBranch() const {
switch (BranchType()) {
case CondBranchType: return ImmCondBranch();
case UncondBranchType: return ImmUncondBranch();
case CompareBranchType: return ImmCmpBranch();
case TestBranchType: return ImmTestBranch();
default: UNREACHABLE();
case CondBranchType:
return ImmCondBranch();
case UncondBranchType:
return ImmUncondBranch();
case CompareBranchType:
return ImmCmpBranch();
case TestBranchType:
return ImmTestBranch();
default:
UNREACHABLE();
}
return 0;
}
@ -393,9 +377,7 @@ class Instruction {
// mov r<n>, r<n>
// which is encoded as
// orr r<n>, xzr, r<n>
return (Mask(LogicalShiftedMask) == ORR_x) &&
(Rd() == Rm()) &&
(Rd() == n);
return (Mask(LogicalShiftedMask) == ORR_x) && (Rd() == Rm()) && (Rd() == n);
}
// Find the PC offset encoded in this instruction. 'this' may be a branch or
@ -439,7 +421,8 @@ class Instruction {
return this + offset;
}
template<typename T> V8_INLINE static Instruction* Cast(T src) {
template <typename T>
V8_INLINE static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
@ -447,7 +430,6 @@ class Instruction {
return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
}
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target);
@ -468,7 +450,6 @@ class InstructionSequence : public Instruction {
uint64_t InlineData() const;
};
// Simulator/Debugger debug instructions ---------------------------------------
// Each debug marker is represented by a HLT instruction. The immediate comment
// field in the instruction is used to identify the type of debug marker. Each
@ -751,5 +732,4 @@ class NEONFormatDecoder {
} // namespace internal
} // namespace v8
#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
#endif // V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/arm64/instrument-arm64.h"
#include "src/codegen/arm64/instrument-arm64.h"
namespace v8 {
namespace internal {
@ -13,21 +13,11 @@ Counter::Counter(const char* name, CounterType type)
strncpy(name_, name, kCounterNameMaxLength);
}
void Counter::Enable() { enabled_ = true; }
void Counter::Enable() {
enabled_ = true;
}
void Counter::Disable() {
enabled_ = false;
}
bool Counter::IsEnabled() {
return enabled_;
}
void Counter::Disable() { enabled_ = false; }
bool Counter::IsEnabled() { return enabled_; }
void Counter::Increment() {
if (enabled_) {
@ -35,7 +25,6 @@ void Counter::Increment() {
}
}
uint64_t Counter::count() {
uint64_t result = count_;
if (type_ == Gauge) {
@ -45,15 +34,9 @@ uint64_t Counter::count() {
return result;
}
const char* Counter::name() { return name_; }
const char* Counter::name() {
return name_;
}
CounterType Counter::type() {
return type_;
}
CounterType Counter::type() { return type_; }
struct CounterDescriptor {
const char* name;
@ -120,7 +103,6 @@ Instrument::Instrument(const char* datafile, uint64_t sample_period)
DumpCounterNames();
}
Instrument::~Instrument() {
// Dump any remaining instruction data to the output file.
DumpCounters();
@ -136,7 +118,6 @@ Instrument::~Instrument() {
}
}
void Instrument::Update() {
// Increment the instruction counter, and dump all counters if a sample period
// has elapsed.
@ -149,7 +130,6 @@ void Instrument::Update() {
}
}
void Instrument::DumpCounters() {
// Iterate through the counter objects, dumping their values to the output
// stream.
@ -161,7 +141,6 @@ void Instrument::DumpCounters() {
fflush(output_stream_);
}
void Instrument::DumpCounterNames() {
// Iterate through the counter objects, dumping the counter names to the
// output stream.
@ -173,16 +152,19 @@ void Instrument::DumpCounterNames() {
fflush(output_stream_);
}
void Instrument::HandleInstrumentationEvent(unsigned event) {
switch (event) {
case InstrumentStateEnable: Enable(); break;
case InstrumentStateDisable: Disable(); break;
default: DumpEventMarker(event);
case InstrumentStateEnable:
Enable();
break;
case InstrumentStateDisable:
Disable();
break;
default:
DumpEventMarker(event);
}
}
void Instrument::DumpEventMarker(unsigned marker) {
// Dumpan event marker to the output stream as a specially formatted comment
// line.
@ -192,7 +174,6 @@ void Instrument::DumpEventMarker(unsigned marker) {
(marker >> 8) & 0xFF, counter->count());
}
Counter* Instrument::GetCounter(const char* name) {
// Get a Counter object by name from the counter list.
std::list<Counter*>::const_iterator it;
@ -205,13 +186,12 @@ Counter* Instrument::GetCounter(const char* name) {
// A Counter by that name does not exist: print an error message to stderr
// and the output file, and exit.
static const char* error_message =
"# Error: Unknown counter \"%s\". Exiting.\n";
"# Error: Unknown counter \"%s\". Exiting.\n";
fprintf(stderr, error_message, name);
fprintf(output_stream_, error_message, name);
exit(1);
}
void Instrument::Enable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
@ -219,7 +199,6 @@ void Instrument::Enable() {
}
}
void Instrument::Disable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
@ -227,28 +206,24 @@ void Instrument::Disable() {
}
}
void Instrument::VisitPCRelAddressing(Instruction* instr) {
Update();
static Counter* counter = GetCounter("PC Addressing");
counter->Increment();
}
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitLogicalImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitMoveWideImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Move Immediate");
@ -261,70 +236,60 @@ void Instrument::VisitMoveWideImmediate(Instruction* instr) {
}
}
void Instrument::VisitBitfield(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitExtract(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitUnconditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitCompareBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Compare and Branch");
counter->Increment();
}
void Instrument::VisitTestBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Test and Branch");
counter->Increment();
}
void Instrument::VisitConditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Branch");
counter->Increment();
}
void Instrument::VisitSystem(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitException(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::InstrumentLoadStorePair(Instruction* instr) {
static Counter* load_pair_counter = GetCounter("Load Pair");
static Counter* store_pair_counter = GetCounter("Store Pair");
@ -335,32 +300,27 @@ void Instrument::InstrumentLoadStorePair(Instruction* instr) {
}
}
void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");
counter->Increment();
}
void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_int_counter = GetCounter("Load Integer");
static Counter* store_int_counter = GetCounter("Store Integer");
@ -368,52 +328,56 @@ void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreMask)) {
case STRB_w: // Fall through.
case STRH_w: // Fall through.
case STR_w: // Fall through.
case STR_x: store_int_counter->Increment(); break;
case STR_s: // Fall through.
case STR_d: store_fp_counter->Increment(); break;
case LDRB_w: // Fall through.
case LDRH_w: // Fall through.
case LDR_w: // Fall through.
case LDR_x: // Fall through.
case LDRSB_x: // Fall through.
case LDRSH_x: // Fall through.
case LDRSW_x: // Fall through.
case LDRSB_w: // Fall through.
case LDRSH_w: load_int_counter->Increment(); break;
case LDR_s: // Fall through.
case LDR_d: load_fp_counter->Increment(); break;
default: UNREACHABLE();
case STRB_w: // Fall through.
case STRH_w: // Fall through.
case STR_w: // Fall through.
case STR_x:
store_int_counter->Increment();
break;
case STR_s: // Fall through.
case STR_d:
store_fp_counter->Increment();
break;
case LDRB_w: // Fall through.
case LDRH_w: // Fall through.
case LDR_w: // Fall through.
case LDR_x: // Fall through.
case LDRSB_x: // Fall through.
case LDRSH_x: // Fall through.
case LDRSW_x: // Fall through.
case LDRSB_w: // Fall through.
case LDRSH_w:
load_int_counter->Increment();
break;
case LDR_s: // Fall through.
case LDR_d:
load_fp_counter->Increment();
break;
default:
UNREACHABLE();
}
}
void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
@ -432,7 +396,9 @@ void Instrument::VisitLoadStoreAcquireRelease(Instruction* instr) {
case LDAXR_b: // Fall-through.
case LDAXR_h: // Fall-through.
case LDAXR_w: // Fall-through.
case LDAXR_x: load_counter->Increment(); break;
case LDAXR_x:
load_counter->Increment();
break;
case STLR_b: // Fall-through.
case STLR_h: // Fall-through.
case STLR_w: // Fall-through.
@ -440,8 +406,11 @@ void Instrument::VisitLoadStoreAcquireRelease(Instruction* instr) {
case STLXR_b: // Fall-through.
case STLXR_h: // Fall-through.
case STLXR_w: // Fall-through.
case STLXR_x: store_counter->Increment(); break;
default: UNREACHABLE();
case STLXR_x:
store_counter->Increment();
break;
default:
UNREACHABLE();
}
}
@ -451,126 +420,108 @@ void Instrument::VisitLogicalShifted(Instruction* instr) {
counter->Increment();
}
void Instrument::VisitAddSubShifted(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubWithCarry(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitFPCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPConditionalCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitFPConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitFPImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPIntegerConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
@ -737,13 +688,11 @@ void Instrument::VisitUnallocated(Instruction* instr) {
counter->Increment();
}
void Instrument::VisitUnimplemented(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
} // namespace internal
} // namespace v8

View File

@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
#define V8_ARM64_INSTRUMENT_ARM64_H_
#ifndef V8_CODEGEN_ARM64_INSTRUMENT_ARM64_H_
#define V8_CODEGEN_ARM64_INSTRUMENT_ARM64_H_
#include "src/common/globals.h"
#include "src/utils/utils.h"
#include "src/arm64/constants-arm64.h"
#include "src/arm64/decoder-arm64.h"
#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/decoder-arm64.h"
namespace v8 {
namespace internal {
@ -17,19 +17,13 @@ namespace internal {
const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
enum InstrumentState {
InstrumentStateDisable = 0,
InstrumentStateEnable = 1
};
enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 };
enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};
class Counter {
public:
explicit Counter(const char* name, CounterType type = Gauge);
@ -49,18 +43,17 @@ class Counter {
CounterType type_;
};
class Instrument: public DecoderVisitor {
class Instrument : public DecoderVisitor {
public:
explicit Instrument(
const char* datafile = nullptr,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
#undef DECLARE
private:
void Update();
@ -77,11 +70,11 @@ class Instrument: public DecoderVisitor {
std::list<Counter*> counters_;
FILE *output_stream_;
FILE* output_stream_;
uint64_t sample_period_;
};
} // namespace internal
} // namespace v8
#endif // V8_ARM64_INSTRUMENT_ARM64_H_
#endif // V8_CODEGEN_ARM64_INSTRUMENT_ARM64_H_

View File

@ -74,7 +74,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }

View File

@ -2,28 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#include <ctype.h>
#include "src/common/globals.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/instrument-arm64.h"
#include "src/base/bits.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/arm64/instrument-arm64.h"
#include "src/codegen/macro-assembler.h"
namespace v8 {
namespace internal {
MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
void TurboAssembler::And(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
@ -50,9 +48,7 @@ void TurboAssembler::Bic(const Register& rd, const Register& rn,
LogicalMacro(rd, rn, operand, BIC);
}
void MacroAssembler::Bics(const Register& rd,
const Register& rn,
void MacroAssembler::Bics(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@ -97,11 +93,8 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
}
}
void MacroAssembler::Ccmn(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond) {
void MacroAssembler::Ccmn(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
@ -186,45 +179,35 @@ void TurboAssembler::Adc(const Register& rd, const Register& rn,
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
}
void MacroAssembler::Adcs(const Register& rd,
const Register& rn,
void MacroAssembler::Adcs(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
}
void MacroAssembler::Sbc(const Register& rd,
const Register& rn,
void MacroAssembler::Sbc(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
}
void MacroAssembler::Sbcs(const Register& rd,
const Register& rn,
void MacroAssembler::Sbcs(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
}
void MacroAssembler::Ngc(const Register& rd,
const Operand& operand) {
void MacroAssembler::Ngc(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
Register zr = AppropriateZeroRegFor(rd);
Sbc(rd, zr, operand);
}
void MacroAssembler::Ngcs(const Register& rd,
const Operand& operand) {
void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
Register zr = AppropriateZeroRegFor(rd);
@ -303,10 +286,7 @@ void TurboAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb,
bfi(rd, rn, lsb, width);
}
void MacroAssembler::Bfxil(const Register& rd,
const Register& rn,
unsigned lsb,
void MacroAssembler::Bfxil(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@ -340,9 +320,7 @@ void TurboAssembler::Brk(int code) {
brk(code);
}
void MacroAssembler::Cinc(const Register& rd,
const Register& rn,
void MacroAssembler::Cinc(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@ -350,9 +328,7 @@ void MacroAssembler::Cinc(const Register& rd,
cinc(rd, rn, cond);
}
void MacroAssembler::Cinv(const Register& rd,
const Register& rn,
void MacroAssembler::Cinv(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@ -380,18 +356,15 @@ void TurboAssembler::Cneg(const Register& rd, const Register& rn,
cneg(rd, rn, cond);
}
// Conditionally zero the destination register. Only X registers are supported
// due to the truncation side-effect when used on W registers.
void MacroAssembler::CzeroX(const Register& rd,
Condition cond) {
void MacroAssembler::CzeroX(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP() && rd.Is64Bits());
DCHECK((cond != al) && (cond != nv));
csel(rd, xzr, rd, cond);
}
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
void TurboAssembler::CmovX(const Register& rd, const Register& rn,
@ -432,22 +405,16 @@ void TurboAssembler::Csinc(const Register& rd, const Register& rn,
csinc(rd, rn, rm, cond);
}
void MacroAssembler::Csinv(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond) {
void MacroAssembler::Csinv(const Register& rd, const Register& rn,
const Register& rm, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
csinv(rd, rn, rm, cond);
}
void MacroAssembler::Csneg(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond) {
void MacroAssembler::Csneg(const Register& rd, const Register& rn,
const Register& rm, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@ -469,11 +436,8 @@ void TurboAssembler::Debug(const char* message, uint32_t code, Instr params) {
debug(message, code, params);
}
void MacroAssembler::Extr(const Register& rd,
const Register& rn,
const Register& rm,
unsigned lsb) {
void MacroAssembler::Extr(const Register& rd, const Register& rn,
const Register& rm, unsigned lsb) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
extr(rd, rn, rm, lsb);
@ -716,13 +680,11 @@ void TurboAssembler::Fsub(const VRegister& fd, const VRegister& fn,
fsub(fd, fn, fm);
}
void MacroAssembler::Hint(SystemHint code) {
DCHECK(allow_macro_instructions());
hint(code);
}
void MacroAssembler::Hlt(int code) {
DCHECK(allow_macro_instructions());
hlt(code);
@ -792,7 +754,6 @@ void TurboAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
mrs(rt, sysreg);
}
void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
DCHECK(allow_macro_instructions());
msr(sysreg, rt);
@ -831,7 +792,6 @@ void TurboAssembler::Ret(const Register& xn) {
CheckVeneerPool(false, false);
}
void MacroAssembler::Rev(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@ -864,10 +824,7 @@ void TurboAssembler::Ror(const Register& rd, const Register& rn,
rorv(rd, rn, rm);
}
void MacroAssembler::Sbfiz(const Register& rd,
const Register& rn,
unsigned lsb,
void MacroAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@ -894,21 +851,15 @@ void TurboAssembler::Sdiv(const Register& rd, const Register& rn,
sdiv(rd, rn, rm);
}
void MacroAssembler::Smaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
void MacroAssembler::Smaddl(const Register& rd, const Register& rn,
const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smaddl(rd, rn, rm, ra);
}
void MacroAssembler::Smsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
void MacroAssembler::Smsubl(const Register& rd, const Register& rn,
const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smsubl(rd, rn, rm, ra);
@ -921,9 +872,7 @@ void TurboAssembler::Smull(const Register& rd, const Register& rn,
smull(rd, rn, rm);
}
void MacroAssembler::Smulh(const Register& rd,
const Register& rn,
void MacroAssembler::Smulh(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@ -982,21 +931,15 @@ void TurboAssembler::Udiv(const Register& rd, const Register& rn,
udiv(rd, rn, rm);
}
void MacroAssembler::Umaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
void MacroAssembler::Umaddl(const Register& rd, const Register& rn,
const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umaddl(rd, rn, rm, ra);
}
void MacroAssembler::Umsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
void MacroAssembler::Umsubl(const Register& rd, const Register& rn,
const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umsubl(rd, rn, rm, ra);
@ -1025,7 +968,6 @@ void TurboAssembler::InitializeRootRegister() {
Mov(kRootRegister, Operand(isolate_root));
}
void MacroAssembler::SmiTag(Register dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
@ -1233,10 +1175,8 @@ void TurboAssembler::DropSlots(int64_t count) {
void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
void MacroAssembler::CompareAndBranch(const Register& lhs,
const Operand& rhs,
Condition cond,
Label* label) {
void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label) {
if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
((cond == eq) || (cond == ne))) {
if (cond == eq) {
@ -1276,26 +1216,22 @@ void TurboAssembler::TestAndBranchIfAllClear(const Register& reg,
}
}
void MacroAssembler::InlineData(uint64_t data) {
DCHECK(is_uint16(data));
InstructionAccurateScope scope(this, 1);
movz(xzr, data);
}
void MacroAssembler::EnableInstrumentation() {
InstructionAccurateScope scope(this, 1);
movn(xzr, InstrumentStateEnable);
}
void MacroAssembler::DisableInstrumentation() {
InstructionAccurateScope scope(this, 1);
movn(xzr, InstrumentStateDisable);
}
void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
DCHECK_EQ(strlen(marker_name), 2);
@ -1310,4 +1246,4 @@ void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
} // namespace internal
} // namespace v8
#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#endif // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_

View File

@ -26,7 +26,7 @@
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
#include "src/arm64/macro-assembler-arm64.h"
#include "src/codegen/arm64/macro-assembler-arm64.h"
#endif
namespace v8 {
@ -324,8 +324,8 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// this case, the instruction is discarded.
//
// If sp is an operand, add #0 is emitted, otherwise, orr #0.
if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
(discard_mode == kDontDiscardForSameWReg))) {
if (!rd.Is(operand.reg()) ||
(rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
}
// This case can handle writes into the system stack pointer directly.
@ -544,7 +544,6 @@ unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
return count;
}
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
@ -700,7 +699,7 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
Ldr(temp, operand.immediate());
AddSubMacro(rd, rn, temp, S, op);
} else if ((operand.IsImmediate() &&
!IsImmAddSub(operand.ImmediateValue())) ||
!IsImmAddSub(operand.ImmediateValue())) ||
(rn.IsZero() && !operand.IsShiftedRegister()) ||
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
UseScratchRegisterScope temps(this);
@ -754,9 +753,9 @@ void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
// Add/sub with carry (shifted register).
DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
DCHECK(operand.shift() != ROR);
DCHECK(is_uintn(operand.shift_amount(),
rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
: kWRegSizeInBitsLog2));
DCHECK(is_uintn(operand.shift_amount(), rd.SizeInBits() == kXRegSizeInBits
? kXRegSizeInBitsLog2
: kWRegSizeInBitsLog2));
Register temp = temps.AcquireSameSizeAs(rn);
EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
AddSubWithCarry(rd, rn, temp, S, op);
@ -853,17 +852,15 @@ bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
// instruction in the chain is too far away.
if (label->is_bound() || label->is_linked()) {
need_longer_range =
!Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
!Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
}
if (!need_longer_range && !label->is_bound()) {
int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
unresolved_branches_.insert(
std::pair<int, FarBranchInfo>(max_reachable_pc,
FarBranchInfo(pc_offset(), label)));
unresolved_branches_.insert(std::pair<int, FarBranchInfo>(
max_reachable_pc, FarBranchInfo(pc_offset(), label)));
// Also maintain the next pool check.
next_veneer_pool_check_ =
Min(next_veneer_pool_check_,
max_reachable_pc - kVeneerDistanceCheckMargin);
next_veneer_pool_check_ = Min(
next_veneer_pool_check_, max_reachable_pc - kVeneerDistanceCheckMargin);
}
return need_longer_range;
}
@ -892,8 +889,8 @@ void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
InstructionAccurateScope scope(
this, PatchingAssembler::kAdrFarPatchableNInstrs);
InstructionAccurateScope scope(this,
PatchingAssembler::kAdrFarPatchableNInstrs);
adr(rd, label);
for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
nop(ADR_FAR_NOP);
@ -909,12 +906,23 @@ void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) {
B(static_cast<Condition>(type), label);
} else {
switch (type) {
case always: B(label); break;
case never: break;
case reg_zero: Cbz(reg, label); break;
case reg_not_zero: Cbnz(reg, label); break;
case reg_bit_clear: Tbz(reg, bit, label); break;
case reg_bit_set: Tbnz(reg, bit, label); break;
case always:
B(label);
break;
case never:
break;
case reg_zero:
Cbz(reg, label);
break;
case reg_not_zero:
Cbnz(reg, label);
break;
case reg_bit_clear:
Tbz(reg, bit, label);
break;
case reg_bit_set:
Tbnz(reg, bit, label);
break;
default:
UNREACHABLE();
}
@ -927,7 +935,7 @@ void TurboAssembler::B(Label* label, Condition cond) {
Label done;
bool need_extra_instructions =
NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
if (need_extra_instructions) {
b(&done, NegateCondition(cond));
@ -943,7 +951,7 @@ void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
Label done;
bool need_extra_instructions =
NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
if (need_extra_instructions) {
tbz(rt, bit_pos, &done);
@ -959,7 +967,7 @@ void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
Label done;
bool need_extra_instructions =
NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
if (need_extra_instructions) {
tbnz(rt, bit_pos, &done);
@ -975,7 +983,7 @@ void TurboAssembler::Cbnz(const Register& rt, Label* label) {
Label done;
bool need_extra_instructions =
NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
if (need_extra_instructions) {
cbz(rt, &done);
@ -991,7 +999,7 @@ void TurboAssembler::Cbz(const Register& rt, Label* label) {
Label done;
bool need_extra_instructions =
NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
if (need_extra_instructions) {
cbnz(rt, &done);
@ -1002,7 +1010,6 @@ void TurboAssembler::Cbz(const Register& rt, Label* label) {
bind(&done);
}
// Pseudo-instructions.
void TurboAssembler::Abs(const Register& rd, const Register& rm,
@ -1026,7 +1033,6 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm,
}
}
// Abstracted stack operations.
void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
@ -1277,16 +1283,13 @@ void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
Stp(src1, src2, MemOperand(sp, offset));
}
void MacroAssembler::PeekPair(const CPURegister& dst1,
const CPURegister& dst2,
void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
int offset) {
DCHECK(AreSameSizeAndType(dst1, dst2));
DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
Ldp(dst1, dst2, MemOperand(sp, offset));
}
void MacroAssembler::PushCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
@ -1306,7 +1309,6 @@ void MacroAssembler::PushCalleeSavedRegisters() {
stp(x19, x20, tos);
}
void MacroAssembler::PopCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
@ -1461,7 +1463,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
@ -1561,7 +1562,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
@ -1638,8 +1638,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// All arguments must be on the stack before this function is called.
// x0 holds the return value after the call.
@ -1690,7 +1689,7 @@ int TurboAssembler::ActivationFrameAlignment() {
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_ARM64
#else // V8_HOST_ARCH_ARM64
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
@ -2440,13 +2439,11 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
Pop(fp, lr);
}
void MacroAssembler::ExitFramePreserveFPRegs() {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PushCPURegList(kCallerSavedV);
}
void MacroAssembler::ExitFrameRestoreFPRegs() {
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
@ -2522,7 +2519,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
// Leave the current exit frame.
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
@ -2582,7 +2578,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
IncrementCounter(counter, -value, scratch1, scratch2);
@ -2597,36 +2592,27 @@ void MacroAssembler::MaybeDropFrames() {
ne);
}
void MacroAssembler::JumpIfObjectType(Register object,
Register map,
Register type_reg,
InstanceType type,
Label* if_cond_pass,
Condition cond) {
void MacroAssembler::JumpIfObjectType(Register object, Register map,
Register type_reg, InstanceType type,
Label* if_cond_pass, Condition cond) {
CompareObjectType(object, map, type_reg, type);
B(cond, if_cond_pass);
}
// Sets condition flags based on comparison, and returns type in type_reg.
void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
LoadTaggedPointerField(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, type_reg, type);
}
// Sets condition flags based on comparison, and returns type in type_reg.
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Cmp(type_reg, type);
}
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
// Load the map's "bit field 2".
Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
@ -3108,10 +3094,9 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadTaggedPointerField(dst, ContextMemOperand(dst, index));
}
// This is the main Printf implementation. All other Printf variants call
// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
void MacroAssembler::PrintfNoPreserve(const char * format,
void MacroAssembler::PrintfNoPreserve(const char* format,
const CPURegister& arg0,
const CPURegister& arg1,
const CPURegister& arg2,
@ -3136,7 +3121,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
// We can use caller-saved registers as scratch values, except for the
// arguments and the PCS registers where they might need to go.
CPURegList tmp_list = kCallerSaved;
tmp_list.Remove(x0); // Used to pass the format string.
tmp_list.Remove(x0); // Used to pass the format string.
tmp_list.Remove(kPCSVarargs);
tmp_list.Remove(arg0, arg1, arg2, arg3);
@ -3221,7 +3206,8 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Adr(x0, &format_address);
// Emit the format string directly in the instruction stream.
{ BlockPoolsScope scope(this);
{
BlockPoolsScope scope(this);
Label after_data;
B(&after_data);
Bind(&format_address);
@ -3241,7 +3227,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
{
InstructionAccurateScope scope(this, kPrintfLength / kInstrSize);
hlt(kImmExceptionIsPrintf);
dc32(arg_count); // kPrintfArgCountOffset
dc32(arg_count); // kPrintfArgCountOffset
// Determine the argument pattern.
uint32_t arg_pattern_list = 0;
@ -3256,18 +3242,15 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
}
dc32(arg_pattern_list); // kPrintfArgPatternListOffset
dc32(arg_pattern_list); // kPrintfArgPatternListOffset
}
#else
Call(ExternalReference::printf_function());
#endif
}
void MacroAssembler::Printf(const char * format,
CPURegister arg0,
CPURegister arg1,
CPURegister arg2,
void MacroAssembler::Printf(const char* format, CPURegister arg0,
CPURegister arg1, CPURegister arg2,
CPURegister arg3) {
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
@ -3293,7 +3276,8 @@ void MacroAssembler::Printf(const char * format,
TmpList()->set_list(tmp_list.list());
FPTmpList()->set_list(fp_tmp_list.list());
{ UseScratchRegisterScope temps(this);
{
UseScratchRegisterScope temps(this);
// If any of the arguments are the current stack pointer, allocate a new
// register for them, and adjust the value to compensate for pushing the
// caller-saved registers.
@ -3315,7 +3299,8 @@ void MacroAssembler::Printf(const char * format,
}
// Preserve NZCV.
{ UseScratchRegisterScope temps(this);
{
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Mrs(tmp, NZCV);
Push(tmp, xzr);
@ -3324,7 +3309,8 @@ void MacroAssembler::Printf(const char * format,
PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
// Restore NZCV.
{ UseScratchRegisterScope temps(this);
{
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Pop(xzr, tmp);
Msr(NZCV, tmp);
@ -3343,7 +3329,6 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
availablefp_->set_list(old_availablefp_);
}
Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
int code = AcquireNextAvailable(available_).code();
return Register::Create(code, reg.SizeInBits());
@ -3354,7 +3339,6 @@ VRegister UseScratchRegisterScope::AcquireSameSizeAs(const VRegister& reg) {
return VRegister::Create(code, reg.SizeInBits());
}
CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
@ -3363,7 +3347,6 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
return result;
}
MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}

View File

@ -6,24 +6,23 @@
#error This header must be included via macro-assembler.h
#endif
#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
#include <vector>
#include "src/arm64/assembler-arm64.h"
#include "src/base/bits.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
// Simulator specific helpers.
#if USE_SIMULATOR
// TODO(all): If possible automatically prepend an indicator like
// UNIMPLEMENTED or LOCATION.
#define ASM_UNIMPLEMENTED(message) \
__ Debug(message, __LINE__, NO_PARAM)
#define ASM_UNIMPLEMENTED_BREAK(message) \
__ Debug(message, __LINE__, \
// TODO(all): If possible automatically prepend an indicator like
// UNIMPLEMENTED or LOCATION.
#define ASM_UNIMPLEMENTED(message) __ Debug(message, __LINE__, NO_PARAM)
#define ASM_UNIMPLEMENTED_BREAK(message) \
__ Debug(message, __LINE__, \
FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
#if DEBUG
#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
@ -40,7 +39,6 @@
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
namespace v8 {
namespace internal {
@ -110,11 +108,14 @@ enum BranchType {
// 'always' is used to generate unconditional branches.
// 'never' is used to not generate a branch (generally as the inverse
// branch type of 'always).
always, never,
always,
never,
// cbz and cbnz
reg_zero, reg_not_zero,
reg_zero,
reg_not_zero,
// tbz and tbnz
reg_bit_clear, reg_bit_set,
reg_bit_clear,
reg_bit_set,
// Aliases.
kBranchTypeFirstCondition = eq,
@ -1547,11 +1548,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LoadObject(Register result, Handle<Object> object);
inline void PushSizeRegList(RegList registers, unsigned reg_size,
inline void PushSizeRegList(
RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
PushCPURegList(CPURegList(type, reg_size, registers));
}
inline void PopSizeRegList(RegList registers, unsigned reg_size,
inline void PopSizeRegList(
RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
PopCPURegList(CPURegList(type, reg_size, registers));
}
@ -1591,10 +1594,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Compare a register with an operand, and branch to label depending on the
// condition. May corrupt the status flags.
inline void CompareAndBranch(const Register& lhs,
const Operand& rhs,
Condition cond,
Label* label);
inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label);
// Insert one or more instructions into the instruction stream that encode
// some caller-defined data. The instructions used will be executable with no
@ -1636,14 +1637,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
static int SafepointRegisterStackIndex(int reg_code);
template<typename Field>
template <typename Field>
void DecodeField(Register dst, Register src) {
static const int shift = Field::kShift;
static const int setbits = CountSetBits(Field::kMask, 32);
Ubfx(dst, src, shift, setbits);
}
template<typename Field>
template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
@ -1679,8 +1680,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---- Calling / Jumping helpers ----
void CallRuntime(const Runtime::Function* f,
int num_arguments,
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Convenience function: Same as above, but takes the fid instead.
@ -1745,12 +1745,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other registers.
void CompareObjectType(Register heap_object,
Register map,
Register type_reg,
void CompareObjectType(Register heap_object, Register map, Register type_reg,
InstanceType type);
// Compare object type for heap object, and branch if equal (or not.)
// heap_object contains a non-Smi whose object type should be compared with
// the given type. This both sets the flags and leaves the object type in
@ -1758,19 +1755,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// type_reg and map register are the same register). It leaves the heap
// object in the heap_object register unless the heap_object register is the
// same register as one of the other registers.
void JumpIfObjectType(Register object,
Register map,
Register type_reg,
InstanceType type,
Label* if_cond_pass,
void JumpIfObjectType(Register object, Register map, Register type_reg,
InstanceType type, Label* if_cond_pass,
Condition cond = eq);
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map,
Register type_reg,
InstanceType type);
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
// Load the elements kind field from a map, and return it in the result
// register.
@ -1893,17 +1885,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// a problem, preserve the important registers manually and then call
// PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
// implicitly preserved.
void Printf(const char * format,
CPURegister arg0 = NoCPUReg,
CPURegister arg1 = NoCPUReg,
CPURegister arg2 = NoCPUReg,
void Printf(const char* format, CPURegister arg0 = NoCPUReg,
CPURegister arg1 = NoCPUReg, CPURegister arg2 = NoCPUReg,
CPURegister arg3 = NoCPUReg);
// Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
//
// The return code from the system printf call will be returned in x0.
void PrintfNoPreserve(const char * format,
const CPURegister& arg0 = NoCPUReg,
void PrintfNoPreserve(const char* format, const CPURegister& arg0 = NoCPUReg,
const CPURegister& arg1 = NoCPUReg,
const CPURegister& arg2 = NoCPUReg,
const CPURegister& arg3 = NoCPUReg);
@ -1924,7 +1913,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
// Use this scope when you need a one-to-one mapping between methods and
// instructions. This scope prevents the MacroAssembler from being called and
// literal pools from being emitted. It also asserts the number of instructions
@ -2008,16 +1996,15 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
VRegister AcquireSameSizeAs(const VRegister& reg);
private:
static CPURegister AcquireNextAvailable(
CPURegList* available);
static CPURegister AcquireNextAvailable(CPURegList* available);
// Available scratch registers.
CPURegList* available_; // kRegister
CPURegList* availablefp_; // kVRegister
CPURegList* available_; // kRegister
CPURegList* availablefp_; // kVRegister
// The state of the available lists at the start of this scope.
RegList old_available_; // kRegister
RegList old_availablefp_; // kVRegister
RegList old_available_; // kRegister
RegList old_availablefp_; // kVRegister
};
MemOperand ContextMemOperand(Register context, int index = 0);
@ -2028,4 +2015,4 @@ MemOperand NativeContextMemOperand();
#define ACCESS_MASM(masm) masm->
#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#endif // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_

View File

@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/register-arm64.h"
#include "src/codegen/arm64/register-arm64.h"
namespace v8 {
namespace internal {

View File

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_REGISTER_ARM64_H_
#define V8_ARM64_REGISTER_ARM64_H_
#ifndef V8_CODEGEN_ARM64_REGISTER_ARM64_H_
#define V8_CODEGEN_ARM64_REGISTER_ARM64_H_
#include "src/arm64/utils-arm64.h"
#include "src/codegen/arm64/utils-arm64.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"
@ -748,4 +748,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = x8;
} // namespace internal
} // namespace v8
#endif // V8_ARM64_REGISTER_ARM64_H_
#endif // V8_CODEGEN_ARM64_REGISTER_ARM64_H_

View File

@ -4,8 +4,7 @@
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/utils-arm64.h"
#include "src/codegen/arm64/utils-arm64.h"
namespace v8 {
namespace internal {
@ -81,7 +80,6 @@ int CountLeadingZeros(uint64_t value, int width) {
return base::bits::CountLeadingZeros64(value << (64 - width));
}
int CountLeadingSignBits(int64_t value, int width) {
DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64));
if (value >= 0) {
@ -91,7 +89,6 @@ int CountLeadingSignBits(int64_t value, int width) {
}
}
int CountTrailingZeros(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
@ -101,7 +98,6 @@ int CountTrailingZeros(uint64_t value, int width) {
static_cast<uint32_t>(value & 0xFFFFFFFFF)));
}
int CountSetBits(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
@ -121,12 +117,10 @@ int HighestSetBitPosition(uint64_t value) {
return 63 - CountLeadingZeros(value, 64);
}
uint64_t LargestPowerOf2Divisor(uint64_t value) {
return value & (-(int64_t)value);
}
int MaskToBit(uint64_t mask) {
DCHECK_EQ(CountSetBits(mask, 64), 1);
return CountTrailingZeros(mask, 64);

View File

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_UTILS_ARM64_H_
#define V8_ARM64_UTILS_ARM64_H_
#ifndef V8_CODEGEN_ARM64_UTILS_ARM64_H_
#define V8_CODEGEN_ARM64_UTILS_ARM64_H_
#include <cmath>
#include "src/arm64/constants-arm64.h"
#include "src/codegen/arm64/constants-arm64.h"
#include "src/utils/utils.h"
namespace v8 {
@ -40,7 +40,6 @@ int HighestSetBitPosition(uint64_t value);
uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
@ -70,7 +69,6 @@ T ReverseBytes(T value, int block_bytes_log2) {
return result;
}
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = bit_cast<uint64_t>(num);
@ -80,7 +78,6 @@ inline bool IsSignallingNaN(double num) {
return false;
}
inline bool IsSignallingNaN(float num) {
uint32_t raw = bit_cast<uint32_t>(num);
if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
@ -99,27 +96,23 @@ inline bool IsQuietNaN(T num) {
return std::isnan(num) && !IsSignallingNaN(num);
}
// Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) {
DCHECK(std::isnan(num));
return bit_cast<double>(bit_cast<uint64_t>(num) | kDQuietNanMask);
}
inline float ToQuietNaN(float num) {
DCHECK(std::isnan(num));
return bit_cast<float>(bit_cast<uint32_t>(num) |
static_cast<uint32_t>(kSQuietNanMask));
}
// Fused multiply-add.
inline double FusedMultiplyAdd(double op1, double op2, double a) {
return fma(op1, op2, a);
}
inline float FusedMultiplyAdd(float op1, float op2, float a) {
return fmaf(op1, op2, a);
}
@ -127,4 +120,4 @@ inline float FusedMultiplyAdd(float op1, float op2, float a) {
} // namespace internal
} // namespace v8
#endif // V8_ARM64_UTILS_ARM64_H_
#endif // V8_CODEGEN_ARM64_UTILS_ARM64_H_

View File

@ -8,21 +8,21 @@
#include "src/codegen/assembler.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32.h"
#include "src/codegen/ia32/assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
#include "src/codegen/x64/assembler-x64.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64.h"
#include "src/codegen/arm64/assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm.h"
#include "src/codegen/arm/assembler-arm.h"
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc.h"
#include "src/codegen/ppc/assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips.h"
#include "src/codegen/mips/assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64.h"
#include "src/codegen/mips64/assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390.h"
#include "src/codegen/s390/assembler-s390.h"
#else
#error Unknown architecture.
#endif

View File

@ -8,21 +8,21 @@
#include "src/codegen/assembler.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h"
#include "src/codegen/ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64-inl.h"
#include "src/codegen/x64/assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
#include "src/codegen/arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc-inl.h"
#include "src/codegen/ppc/assembler-ppc-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h"
#include "src/codegen/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64-inl.h"
#include "src/codegen/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h"
#include "src/codegen/s390/assembler-s390-inl.h"
#else
#error Unknown architecture.
#endif

View File

@ -6,21 +6,21 @@
#define V8_CODEGEN_CONSTANTS_ARCH_H_
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h" // NOLINT
#include "src/codegen/arm/constants-arm.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/constants-arm64.h" // NOLINT
#include "src/codegen/arm64/constants-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_IA32
#include "src/ia32/constants-ia32.h" // NOLINT
#include "src/codegen/ia32/constants-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h" // NOLINT
#include "src/codegen/mips/constants-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h" // NOLINT
#include "src/codegen/mips64/constants-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/constants-ppc.h" // NOLINT
#include "src/codegen/ppc/constants-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/constants-s390.h" // NOLINT
#include "src/codegen/s390/constants-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/x64/constants-x64.h" // NOLINT
#include "src/codegen/x64/constants-x64.h" // NOLINT
#else
#error Unsupported target architecture.
#endif

View File

@ -34,10 +34,10 @@
// A light-weight IA32 Assembler.
#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
#define V8_IA32_ASSEMBLER_IA32_INL_H_
#ifndef V8_CODEGEN_IA32_ASSEMBLER_IA32_INL_H_
#define V8_CODEGEN_IA32_ASSEMBLER_IA32_INL_H_
#include "src/ia32/assembler-ia32.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
@ -50,7 +50,6 @@ bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
DCHECK_EQ(kApplyMask, (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
@ -68,7 +67,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@ -79,15 +77,9 @@ Address RelocInfo::target_address_address() {
return pc_;
}
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
@ -135,7 +127,6 @@ Address RelocInfo::target_internal_reference() {
return ReadUnalignedValue<Address>(pc_);
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
@ -179,7 +170,6 @@ void Assembler::emit(uint32_t x) {
pc_ += sizeof(uint32_t);
}
void Assembler::emit_q(uint64_t x) {
WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint64_t);
@ -200,7 +190,6 @@ void Assembler::emit(Handle<Code> code, RelocInfo::Mode rmode) {
emit(code.address(), rmode);
}
void Assembler::emit(const Immediate& x) {
if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
Label* label = reinterpret_cast<Label*>(x.immediate());
@ -216,7 +205,6 @@ void Assembler::emit(const Immediate& x) {
}
}
void Assembler::emit_code_relative_offset(Label* label) {
if (label->is_bound()) {
int32_t pos;
@ -240,7 +228,6 @@ void Assembler::emit_w(const Immediate& x) {
pc_ += sizeof(uint16_t);
}
Address Assembler::target_address_at(Address pc, Address constant_pool) {
return pc + sizeof(int32_t) + ReadUnalignedValue<int32_t>(pc);
}
@ -270,19 +257,16 @@ Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
void Assembler::disp_at_put(Label* L, Displacement disp) {
long_at_put(L->pos(), disp.data());
}
void Assembler::emit_disp(Label* L, Displacement::Type type) {
Displacement disp(L, type);
L->link_to(pc_offset());
emit(static_cast<int>(disp.data()));
}
void Assembler::emit_near_disp(Label* L) {
byte disp = 0x00;
if (L->is_near_linked()) {
@ -299,7 +283,6 @@ void Assembler::deserialization_set_target_internal_reference_at(
WriteUnalignedValue(pc, target);
}
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
DCHECK_EQ(len_, 1);
DCHECK_EQ(scale & -4, 0);
@ -309,7 +292,6 @@ void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
len_ = 2;
}
void Operand::set_disp8(int8_t disp) {
DCHECK(len_ == 1 || len_ == 2);
*reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
@ -318,4 +300,4 @@ void Operand::set_disp8(int8_t disp) {
} // namespace internal
} // namespace v8
#endif // V8_IA32_ASSEMBLER_IA32_INL_H_
#endif // V8_CODEGEN_IA32_ASSEMBLER_IA32_INL_H_

View File

@ -34,7 +34,7 @@
// significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#include "src/ia32/assembler-ia32.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include <cstring>
@ -97,7 +97,6 @@ V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
#endif // !V8_LIBC_MSVCRT
bool OSHasAVXSupport() {
#if V8_OS_MACOSX
// Mac OS X up to 10.9 has a bug where AVX transitions were indeed being
@ -121,8 +120,9 @@ bool OSHasAVXSupport() {
return (feature_mask & 0x6) == 0x6;
}
} // namespace
#undef _XCR_XFEATURE_ENABLED_MASK
} // namespace
void CpuFeatures::ProbeImpl(bool cross_compile) {
base::CPU cpu;
@ -154,8 +154,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
}
}
void CpuFeatures::PrintTarget() { }
void CpuFeatures::PrintTarget() {}
void CpuFeatures::PrintFeatures() {
printf(
"SSE3=%d SSSE3=%d SSE4_1=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d LZCNT=%d "
@ -167,7 +166,6 @@ void CpuFeatures::PrintFeatures() {
CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
}
// -----------------------------------------------------------------------------
// Implementation of Displacement
@ -183,7 +181,6 @@ void Displacement::init(Label* L, Type type) {
data_ = NextField::encode(next) | TypeField::encode(type);
}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@ -201,9 +198,7 @@ bool RelocInfo::IsCodedSpecially() {
return RelocInfo::ModeMask(rmode_) & kApplyMask;
}
bool RelocInfo::IsInConstantPool() {
return false;
}
bool RelocInfo::IsInConstantPool() { return false; }
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
@ -232,11 +227,7 @@ Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
}
}
Operand::Operand(Register base,
Register index,
ScaleFactor scale,
int32_t disp,
Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp,
RelocInfo::Mode rmode) {
DCHECK(index != esp); // illegal addressing mode
// [base + index*scale + disp/r]
@ -257,10 +248,7 @@ Operand::Operand(Register base,
}
}
Operand::Operand(Register index,
ScaleFactor scale,
int32_t disp,
Operand::Operand(Register index, ScaleFactor scale, int32_t disp,
RelocInfo::Mode rmode) {
DCHECK(index != esp); // illegal addressing mode
// [index*scale + disp/r]
@ -269,12 +257,10 @@ Operand::Operand(Register index,
set_dispr(disp, rmode);
}
bool Operand::is_reg_only() const {
return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
}
Register Operand::reg() const {
DCHECK(is_reg_only());
return Register::from_code(buf_[0] & 0x07);
@ -305,8 +291,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// Implementation of Assembler.
// Emit a single byte. Must always be inlined.
#define EMIT(x) \
*pc_++ = (x)
#define EMIT(x) *pc_++ = (x)
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
@ -379,7 +364,6 @@ void Assembler::Align(int m) {
Nop((m - (addr & mask)) & mask);
}
bool Assembler::IsNop(Address addr) {
byte* a = reinterpret_cast<byte*>(addr);
while (*a == 0x66) a++;
@ -388,7 +372,6 @@ bool Assembler::IsNop(Address addr) {
return false;
}
void Assembler::Nop(int bytes) {
EnsureSpace ensure_space(this);
// Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
@ -457,43 +440,36 @@ void Assembler::Nop(int bytes) {
}
}
void Assembler::CodeTargetAlign() {
Align(16); // Preferred alignment of jump targets on ia32.
}
void Assembler::cpuid() {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA2);
}
void Assembler::pushad() {
EnsureSpace ensure_space(this);
EMIT(0x60);
}
void Assembler::popad() {
EnsureSpace ensure_space(this);
EMIT(0x61);
}
void Assembler::pushfd() {
EnsureSpace ensure_space(this);
EMIT(0x9C);
}
void Assembler::popfd() {
EnsureSpace ensure_space(this);
EMIT(0x9D);
}
void Assembler::push(const Immediate& x) {
EnsureSpace ensure_space(this);
if (x.is_int8()) {
@ -505,14 +481,12 @@ void Assembler::push(const Immediate& x) {
}
}
void Assembler::push_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0x68);
emit(imm32);
}
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
EMIT(0x50 | src.code());
@ -524,7 +498,6 @@ void Assembler::push(Operand src) {
emit_operand(esi, src);
}
void Assembler::pop(Register dst) {
DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
@ -537,7 +510,6 @@ void Assembler::pop(Operand dst) {
emit_operand(eax, dst);
}
void Assembler::enter(const Immediate& size) {
EnsureSpace ensure_space(this);
EMIT(0xC8);
@ -545,7 +517,6 @@ void Assembler::enter(const Immediate& size) {
EMIT(0);
}
void Assembler::leave() {
EnsureSpace ensure_space(this);
EMIT(0xC9);
@ -595,14 +566,12 @@ void Assembler::mov_w(Operand dst, const Immediate& src) {
EMIT(static_cast<int8_t>(src.immediate() >> 8));
}
void Assembler::mov(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(imm32);
}
void Assembler::mov(Register dst, const Immediate& x) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
@ -621,7 +590,6 @@ void Assembler::mov(Register dst, Operand src) {
emit_operand(dst, src);
}
void Assembler::mov(Register dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x89);
@ -701,33 +669,28 @@ void Assembler::cmov(Condition cc, Register dst, Operand src) {
emit_operand(dst, src);
}
void Assembler::cld() {
EnsureSpace ensure_space(this);
EMIT(0xFC);
}
void Assembler::rep_movs() {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0xA5);
}
void Assembler::rep_stos() {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0xAB);
}
void Assembler::stos() {
EnsureSpace ensure_space(this);
EMIT(0xAB);
}
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src == eax || dst == eax) { // Single-byte encoding.
@ -835,12 +798,10 @@ void Assembler::add(Operand dst, const Immediate& x) {
emit_arith(0, dst, x);
}
void Assembler::and_(Register dst, int32_t imm32) {
and_(dst, Immediate(imm32));
}
void Assembler::and_(Register dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(4, Operand(dst), x);
@ -946,18 +907,17 @@ void Assembler::cmp(Operand op, Handle<HeapObject> handle) {
void Assembler::cmpb_al(Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x38); // CMP r/m8, r8
EMIT(0x38); // CMP r/m8, r8
emit_operand(eax, op); // eax has same code as register al.
}
void Assembler::cmpw_ax(Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x39); // CMP r/m16, r16
EMIT(0x39); // CMP r/m16, r16
emit_operand(eax, op); // eax has same code as register ax.
}
void Assembler::dec_b(Register dst) {
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this);
@ -971,7 +931,6 @@ void Assembler::dec_b(Operand dst) {
emit_operand(ecx, dst);
}
void Assembler::dec(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0x48 | dst.code());
@ -983,7 +942,6 @@ void Assembler::dec(Operand dst) {
emit_operand(ecx, dst);
}
void Assembler::cdq() {
EnsureSpace ensure_space(this);
EMIT(0x99);
@ -1001,7 +959,6 @@ void Assembler::div(Operand src) {
emit_operand(esi, src);
}
void Assembler::imul(Register reg) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
@ -1015,7 +972,6 @@ void Assembler::imul(Register dst, Operand src) {
emit_operand(dst, src);
}
void Assembler::imul(Register dst, Register src, int32_t imm32) {
imul(dst, Operand(src), imm32);
}
@ -1033,7 +989,6 @@ void Assembler::imul(Register dst, Operand src, int32_t imm32) {
}
}
void Assembler::inc(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0x40 | dst.code());
@ -1051,14 +1006,12 @@ void Assembler::lea(Register dst, Operand src) {
emit_operand(dst, src);
}
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xE0 | src.code());
}
void Assembler::neg(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
@ -1071,7 +1024,6 @@ void Assembler::neg(Operand dst) {
emit_operand(ebx, dst);
}
void Assembler::not_(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
@ -1084,7 +1036,6 @@ void Assembler::not_(Operand dst) {
emit_operand(edx, dst);
}
void Assembler::or_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
emit_arith(1, Operand(dst), Immediate(imm32));
@ -1107,7 +1058,6 @@ void Assembler::or_(Operand dst, Register src) {
emit_operand(src, dst);
}
void Assembler::rcl(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
@ -1121,7 +1071,6 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
}
}
void Assembler::rcr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
@ -1438,25 +1387,21 @@ void Assembler::bsf(Register dst, Operand src) {
emit_operand(dst, src);
}
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
}
void Assembler::int3() {
EnsureSpace ensure_space(this);
EMIT(0xCC);
}
void Assembler::nop() {
EnsureSpace ensure_space(this);
EMIT(0x90);
}
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
DCHECK(is_uint16(imm16));
@ -1469,14 +1414,12 @@ void Assembler::ret(int imm16) {
}
}
void Assembler::ud2() {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x0B);
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@ -1508,7 +1451,6 @@ void Assembler::print(const Label* L) {
}
}
void Assembler::bind_to(Label* L, int pos) {
EnsureSpace ensure_space(this);
DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
@ -1564,7 +1506,6 @@ void Assembler::bind_to(Label* L, int pos) {
L->bind_to(pos);
}
void Assembler::bind(Label* L) {
EnsureSpace ensure_space(this);
DCHECK(!L->is_bound()); // label can only be bound once
@ -1696,7 +1637,6 @@ void Assembler::jmp(Operand adr) {
emit_operand(esp, adr);
}
void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@ -1704,13 +1644,12 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
emit(code, rmode);
}
void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
DCHECK(0 <= cc && static_cast<int>(cc) < 16);
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
const int long_size = 6;
int offs = L->pos() - pc_offset();
DCHECK_LE(offs, 0);
if (is_int8(offs - short_size)) {
@ -1749,7 +1688,6 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
}
}
void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK((0 <= cc) && (static_cast<int>(cc) < 16));
@ -1763,7 +1701,6 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
}
}
void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
// 0000 1111 1000 tttn #32-bit disp
@ -1772,7 +1709,6 @@ void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
emit(code, rmode);
}
// FPU instructions.
void Assembler::fld(int i) {
@ -1780,34 +1716,29 @@ void Assembler::fld(int i) {
emit_farith(0xD9, 0xC0, i);
}
void Assembler::fstp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xD8, i);
}
void Assembler::fld1() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xE8);
}
void Assembler::fldpi() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xEB);
}
void Assembler::fldz() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xEE);
}
void Assembler::fldln2() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
@ -1894,88 +1825,75 @@ void Assembler::fistp_d(Operand adr) {
emit_operand(edi, adr);
}
void Assembler::fabs() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xE1);
}
void Assembler::fchs() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xE0);
}
void Assembler::fcos() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFF);
}
void Assembler::fsin() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFE);
}
void Assembler::fptan() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF2);
}
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF1);
}
void Assembler::f2xm1() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF0);
}
void Assembler::fscale() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFD);
}
void Assembler::fninit() {
EnsureSpace ensure_space(this);
EMIT(0xDB);
EMIT(0xE3);
}
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
}
void Assembler::fadd_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xC0, i);
}
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xE8, i);
}
void Assembler::fsub_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xE0, i);
@ -1987,168 +1905,142 @@ void Assembler::fisub_s(Operand adr) {
emit_operand(esp, adr);
}
void Assembler::fmul_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xC8, i);
}
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC8, i);
}
void Assembler::fdiv(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xF8, i);
}
void Assembler::fdiv_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xF0, i);
}
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC0, i);
}
void Assembler::fsubp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xE8, i);
}
void Assembler::fsubrp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xE0, i);
}
void Assembler::fmulp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC8, i);
}
void Assembler::fdivp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xF8, i);
}
void Assembler::fprem() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF8);
}
void Assembler::fprem1() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF5);
}
void Assembler::fxch(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD9, 0xC8, i);
}
void Assembler::fincstp() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF7);
}
void Assembler::ffree(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xC0, i);
}
void Assembler::ftst() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xE4);
}
void Assembler::fucomp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xE8, i);
}
void Assembler::fucompp() {
EnsureSpace ensure_space(this);
EMIT(0xDA);
EMIT(0xE9);
}
void Assembler::fucomi(int i) {
EnsureSpace ensure_space(this);
EMIT(0xDB);
EMIT(0xE8 + i);
}
void Assembler::fucomip() {
EnsureSpace ensure_space(this);
EMIT(0xDF);
EMIT(0xE9);
}
void Assembler::fcompp() {
EnsureSpace ensure_space(this);
EMIT(0xDE);
EMIT(0xD9);
}
void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this);
EMIT(0xDF);
EMIT(0xE0);
}
void Assembler::fwait() {
EnsureSpace ensure_space(this);
EMIT(0x9B);
}
void Assembler::frndint() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFC);
}
void Assembler::fnclex() {
EnsureSpace ensure_space(this);
EMIT(0xDB);
EMIT(0xE2);
}
void Assembler::sahf() {
EnsureSpace ensure_space(this);
EMIT(0x9E);
}
void Assembler::setcc(Condition cc, Register reg) {
DCHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
@ -2177,7 +2069,6 @@ void Assembler::cvttsd2si(Register dst, Operand src) {
emit_operand(dst, src);
}
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@ -2406,7 +2297,6 @@ void Assembler::ucomisd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@ -2419,7 +2309,6 @@ void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EMIT(static_cast<byte>(mode) | 0x8);
}
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@ -2432,7 +2321,6 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EMIT(static_cast<byte>(mode) | 0x8);
}
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@ -2441,7 +2329,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
void Assembler::movmskps(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@ -2465,7 +2352,6 @@ void Assembler::minsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@ -2595,7 +2481,6 @@ void Assembler::movd(Operand dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
DCHECK(IsEnabled(SSE4_1));
DCHECK(is_uint8(imm8));
@ -2671,7 +2556,6 @@ void Assembler::psllq(XMMRegister reg, uint8_t shift) {
EMIT(shift);
}
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@ -2689,7 +2573,6 @@ void Assembler::psrlq(XMMRegister reg, uint8_t shift) {
EMIT(shift);
}
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@ -2886,7 +2769,6 @@ void Assembler::minss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
Operand src2) {
@ -3137,22 +3019,18 @@ void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
emit_operand(ireg, adr);
}
void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
EMIT(0xC0 | dst.code() << 3 | src.code());
}
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
EMIT(0xC0 | dst.code() << 3 | src.code());
}
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
EMIT(0xC0 | (dst.code() << 3) | src.code());
}
void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
if (mm != k0F || w != kW0) {
@ -3166,14 +3044,12 @@ void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
}
}
void Assembler::emit_vex_prefix(Register vreg, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
XMMRegister ivreg = XMMRegister::from_code(vreg.code());
emit_vex_prefix(ivreg, l, pp, mm, w);
}
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
DCHECK_EQ(buffer_start_, buffer_->start());
@ -3227,7 +3103,6 @@ void Assembler::GrowBuffer() {
DCHECK(!buffer_overflow());
}
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode
DCHECK(is_uint8(imm8));
@ -3237,7 +3112,6 @@ void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
EMIT(imm8);
}
void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
DCHECK((0 <= sel) && (sel <= 7));
Register ireg = Register::from_code(sel);
@ -3295,7 +3169,6 @@ void Assembler::emit_operand(int code, Operand adr) {
}
}
void Assembler::emit_label(Label* label) {
if (label->is_bound()) {
internal_reference_positions_.push_back(pc_offset());
@ -3305,46 +3178,42 @@ void Assembler::emit_label(Label* label) {
}
}
void Assembler::emit_farith(int b1, int b2, int i) {
DCHECK(is_uint8(b1) && is_uint8(b2)); // wrong opcode
DCHECK(0 <= i && i < 8); // illegal stack offset
DCHECK(0 <= i && i < 8); // illegal stack offset
EMIT(b1);
EMIT(b2 + i);
}
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
EMIT(data);
}
void Assembler::dd(uint32_t data) {
EnsureSpace ensure_space(this);
emit(data);
}
void Assembler::dq(uint64_t data) {
EnsureSpace ensure_space(this);
emit_q(data);
}
void Assembler::dd(Label* label) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
emit_label(label);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
reloc_info_writer.Write(&rinfo);
}
#undef EMIT
} // namespace internal
} // namespace v8

View File

@ -34,17 +34,17 @@
// A light-weight IA32 Assembler.
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
#ifndef V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
#define V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
#include <deque>
#include "src/codegen/assembler.h"
#include "src/codegen/ia32/constants-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/ia32/sse-instr.h"
#include "src/codegen/label.h"
#include "src/execution/isolate.h"
#include "src/ia32/constants-ia32.h"
#include "src/ia32/register-ia32.h"
#include "src/ia32/sse-instr.h"
#include "src/objects/smi.h"
#include "src/utils/utils.h"
@ -55,35 +55,34 @@ class SafepointTableBuilder;
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
no_condition = -1,
overflow = 0,
no_overflow = 1,
below = 2,
above_equal = 3,
equal = 4,
not_equal = 5,
below_equal = 6,
above = 7,
negative = 8,
positive = 9,
parity_even = 10,
parity_odd = 11,
less = 12,
overflow = 0,
no_overflow = 1,
below = 2,
above_equal = 3,
equal = 4,
not_equal = 5,
below_equal = 6,
above = 7,
negative = 8,
positive = 9,
parity_even = 10,
parity_odd = 11,
less = 12,
greater_equal = 13,
less_equal = 14,
greater = 15,
less_equal = 14,
greater = 15,
// aliases
carry = below,
not_carry = above_equal,
zero = equal,
not_zero = not_equal,
sign = negative,
not_sign = positive
carry = below,
not_carry = above_equal,
zero = equal,
not_zero = not_equal,
sign = negative,
not_sign = positive
};
// Returns the equivalent of !cc.
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
@ -92,7 +91,6 @@ inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
enum RoundingMode {
kRoundToNearest = 0x0,
kRoundDown = 0x1,
@ -120,9 +118,7 @@ class Immediate {
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
static Immediate EmbeddedStringConstant(const StringConstantBase* str);
static Immediate CodeRelativeOffset(Label* label) {
return Immediate(label);
}
static Immediate CodeRelativeOffset(Label* label) { return Immediate(label); }
bool is_heap_object_request() const {
DCHECK_IMPLIES(is_heap_object_request_,
@ -195,7 +191,6 @@ class Immediate {
friend class MacroAssembler;
};
// -----------------------------------------------------------------------------
// Machine instruction Operands
@ -341,14 +336,14 @@ class Displacement {
void print() {
PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
NextField::decode(data_));
NextField::decode(data_));
}
private:
int data_;
class TypeField: public BitField<Type, 0, 2> {};
class NextField: public BitField<int, 2, 32-2> {};
class TypeField : public BitField<Type, 0, 2> {};
class NextField : public BitField<int, 2, 32 - 2> {};
void init(Label* L, Type type);
};
@ -607,7 +602,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void div(Operand src);
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, Operand src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
@ -619,7 +614,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void lea(Register dst, Operand src);
// Unsigned multiply instruction.
void mul(Register src); // edx:eax = eax * reg.
void mul(Register src); // edx:eax = eax * reg.
void neg(Register dst);
void neg(Operand dst);
@ -749,9 +744,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void jmp_rel(int offset);
// Conditional jumps
void j(Condition cc,
Label* L,
Label::Distance distance = Label::kFar);
void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
void j(Condition cc, Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
@ -1671,10 +1664,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
private:
uint32_t long_at(int pos) {
uint32_t long_at(int pos) {
return ReadUnalignedValue<uint32_t>(addr_at(pos));
}
void long_at_put(int pos, uint32_t x) {
void long_at_put(int pos, uint32_t x) {
WriteUnalignedValue(addr_at(pos), x);
}
@ -1769,7 +1762,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
std::map<Label*, std::vector<int>> label_farjmp_maps_;
};
// Helper class that ensures that there is enough space for generating
// instructions and relocation information. The constructor makes
// sure that there is enough space and (in debug mode) the destructor
@ -1800,4 +1792,4 @@ class EnsureSpace {
} // namespace internal
} // namespace v8
#endif // V8_IA32_ASSEMBLER_IA32_H_
#endif // V8_CODEGEN_IA32_ASSEMBLER_IA32_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_IA32_CONSTANTS_IA32_H_
#define V8_IA32_CONSTANTS_IA32_H_
#ifndef V8_CODEGEN_IA32_CONSTANTS_IA32_H_
#define V8_CODEGEN_IA32_CONSTANTS_IA32_H_
#include "src/common/globals.h"
@ -20,4 +20,4 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
} // namespace internal
} // namespace v8
#endif // V8_IA32_CONSTANTS_IA32_H_
#endif // V8_CODEGEN_IA32_CONSTANTS_IA32_H_

View File

@ -10,12 +10,12 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
#include "src/codegen/ia32/assembler-ia32-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/ia32/assembler-ia32-inl.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
@ -25,7 +25,7 @@
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
#include "src/ia32/macro-assembler-ia32.h"
#include "src/codegen/ia32/macro-assembler-ia32.h"
#endif
namespace v8 {
@ -493,9 +493,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Label::kNear);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
zero,
&done,
MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear);
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
@ -649,14 +647,12 @@ void TurboAssembler::SarPair_cl(Register high, Register low) {
bind(&done);
}
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
Register map) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
@ -692,7 +688,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@ -750,7 +745,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@ -852,7 +846,6 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
mov(ExternalReferenceAsOperand(c_function_address, scratch), edx);
}
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
@ -897,7 +890,6 @@ void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) {
EnterExitFrameEpilogue(argc, false);
}
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore all XMM registers.
if (save_doubles) {
@ -975,8 +967,7 @@ void MacroAssembler::PopStackHandler(Register scratch) {
add(esp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
@ -1802,7 +1793,6 @@ void TurboAssembler::Abort(AbortReason reason) {
int3();
}
void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {

View File

@ -6,13 +6,13 @@
#error This header must be included via macro-assembler.h
#endif
#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#ifndef V8_CODEGEN_IA32_MACRO_ASSEMBLER_IA32_H_
#define V8_CODEGEN_IA32_MACRO_ASSEMBLER_IA32_H_
#include "src/codegen/assembler.h"
#include "src/codegen/bailout-reason.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/common/globals.h"
#include "src/ia32/assembler-ia32.h"
namespace v8 {
namespace internal {
@ -519,7 +519,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
@ -564,7 +563,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
j(not_zero, smi_label, distance);
}
template<typename Field>
template <typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
@ -716,4 +715,4 @@ inline Operand NativeContextOperand() {
} // namespace internal
} // namespace v8
#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
#endif // V8_CODEGEN_IA32_MACRO_ASSEMBLER_IA32_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_IA32_REGISTER_IA32_H_
#define V8_IA32_REGISTER_IA32_H_
#ifndef V8_CODEGEN_IA32_REGISTER_IA32_H_
#define V8_CODEGEN_IA32_REGISTER_IA32_H_
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
@ -163,4 +163,4 @@ constexpr Register kSpeculationPoisonRegister = no_reg;
} // namespace internal
} // namespace v8
#endif // V8_IA32_REGISTER_IA32_H_
#endif // V8_CODEGEN_IA32_REGISTER_IA32_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_IA32_SSE_INSTR_H_
#define V8_IA32_SSE_INSTR_H_
#ifndef V8_CODEGEN_IA32_SSE_INSTR_H_
#define V8_CODEGEN_IA32_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
V(packsswb, 66, 0F, 63) \
@ -79,4 +79,4 @@
V(pmovzxwd, 66, 0F, 38, 33) \
V(ptest, 66, 0F, 38, 17)
#endif // V8_IA32_SSE_INSTR_H_
#endif // V8_CODEGEN_IA32_SSE_INSTR_H_

View File

@ -9,7 +9,7 @@
#include "src/codegen/macro-assembler.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#endif
#endif // V8_CODEGEN_MACRO_ASSEMBLER_INL_H_

View File

@ -31,27 +31,27 @@ enum AllocationFlags {
// This is the only place allowed to include the platform-specific headers.
#define INCLUDED_FROM_MACRO_ASSEMBLER_H
#if V8_TARGET_ARCH_IA32
#include "src/ia32/macro-assembler-ia32.h"
#include "src/codegen/ia32/macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/macro-assembler-x64.h"
#include "src/codegen/x64/macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/constants-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
#include "src/arm/macro-assembler-arm.h"
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/constants-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
#include "src/codegen/ppc/constants-ppc.h"
#include "src/codegen/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/mips/macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/constants-s390.h"
#include "src/s390/macro-assembler-s390.h"
#include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h"
#else
#error Unsupported target architecture.
#endif

View File

@ -33,11 +33,10 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
#include "src/mips/assembler-mips.h"
#include "src/codegen/mips/assembler-mips.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
@ -53,9 +52,7 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
bool Operand::is_reg() const {
return rm_.is_valid();
}
bool Operand::is_reg() const { return rm_.is_valid(); }
int32_t Operand::immediate() const {
DCHECK(!is_reg());
@ -75,7 +72,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
@ -108,14 +104,9 @@ Address RelocInfo::target_address_address() {
}
}
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
@ -228,7 +219,6 @@ Address RelocInfo::target_internal_reference() {
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
@ -284,14 +274,12 @@ Handle<Code> Assembler::relative_code_target_object_handle_at(
// -----------------------------------------------------------------------------
// Assembler.
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
@ -306,7 +294,6 @@ void Assembler::CheckForEmitInForbiddenSlot() {
}
}
void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
if (IsPrevInstrCompactBranch()) {
if (Instruction::IsForbiddenAfterBranchInstr(x)) {
@ -356,4 +343,4 @@ EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_

View File

@ -32,9 +32,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
#define V8_MIPS_ASSEMBLER_MIPS_H_
#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_
#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
@ -43,8 +42,8 @@
#include "src/codegen/assembler.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/label.h"
#include "src/mips/constants-mips.h"
#include "src/mips/register-mips.h"
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/mips/register-mips.h"
#include "src/objects/smi.h"
namespace v8 {
@ -121,25 +120,19 @@ class Operand {
// friend class MacroAssembler;
};
// On MIPS we have only one addressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
// Immediate value attached to offset.
enum OffsetAddend {
offset_minus_one = -1,
offset_zero = 0
};
enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 };
explicit MemOperand(Register rn, int32_t offset = 0);
explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
OffsetAddend offset_addend = offset_zero);
int32_t offset() const { return offset_; }
bool OffsetIsInt16Encodable() const {
return is_int16(offset_);
}
bool OffsetIsInt16Encodable() const { return is_int16(offset_); }
private:
int32_t offset_;
@ -159,7 +152,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
virtual ~Assembler() { }
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@ -361,7 +354,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
sll(zero_reg, nop_rt_reg, type, true);
}
// --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
@ -484,7 +476,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void jic(Register rt, int16_t offset);
void jialc(Register rt, int16_t offset);
// -------Data-processing-instructions---------
// Arithmetic.
@ -559,12 +550,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void auipc(Register rs, int16_t imm16);
void aluipc(Register rs, int16_t imm16);
// ----------------Prefetch--------------------
void pref(int32_t hint, const MemOperand& rs);
// -------------Misc-instructions--------------
// Break / Trap instructions.
@ -725,8 +714,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvt_d_s(FPURegister fd, FPURegister fs);
// Conditions and branches for MIPSr6.
void cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister ft, FPURegister fs);
void cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
FPURegister ft, FPURegister fs);
void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
@ -740,8 +729,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs,
uint16_t cc = 0);
void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
@ -1358,9 +1347,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockTrampolinePool();
}
~BlockTrampolinePoolScope() {
assem_->EndBlockTrampolinePool();
}
~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
private:
Assembler* assem_;
@ -1377,9 +1364,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockGrowBuffer();
}
~BlockGrowBufferScope() {
assem_->EndBlockGrowBuffer();
}
~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
private:
Assembler* assem_;
@ -1555,9 +1540,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
no_trampoline_pool_before_ = pc_offset;
}
void StartBlockTrampolinePool() {
trampoline_pool_blocked_nesting_++;
}
void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
@ -1570,13 +1553,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return trampoline_pool_blocked_nesting_ > 0;
}
bool has_exception() const {
return internal_trampoline_exception_;
}
bool has_exception() const { return internal_trampoline_exception_; }
bool is_trampoline_emitted() const {
return trampoline_emitted_;
}
bool is_trampoline_emitted() const { return trampoline_emitted_; }
// Temporarily block automatic assembly buffer growth.
void StartBlockGrowBuffer() {
@ -1589,9 +1568,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
block_buffer_growth_ = false;
}
bool is_buffer_growth_blocked() const {
return block_buffer_growth_;
}
bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
void EmitForbiddenSlotInstruction() {
if (IsPrevInstrCompactBranch()) {
@ -1684,12 +1661,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrRegister(Opcode opcode, Register rs, Register rt, Register rd,
uint16_t sa = 0, SecondaryField func = nullptrSF);
void GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
uint16_t msb,
uint16_t lsb,
SecondaryField func);
void GenInstrRegister(Opcode opcode, Register rs, Register rt, uint16_t msb,
uint16_t lsb, SecondaryField func);
void GenInstrRegister(Opcode opcode, SecondaryField fmt, FPURegister ft,
FPURegister fs, FPURegister fd,
@ -1725,9 +1698,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Opcode opcode, int32_t offset26,
CompactBranchType is_compact_branch = CompactBranchType::NO);
void GenInstrJump(Opcode opcode,
uint32_t address);
void GenInstrJump(Opcode opcode, uint32_t address);
// MSA
void GenInstrMsaI8(SecondaryField operation, uint32_t imm8, MSARegister ws,
@ -1833,12 +1804,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
free_slot_count_ = slot_count;
end_ = start + slot_count * kTrampolineSlotsSize;
}
int start() {
return start_;
}
int end() {
return end_;
}
int start() { return start_; }
int end() { return end_; }
int take_slot() {
int trampoline_slot = kInvalidSlotPos;
if (free_slot_count_ <= 0) {
@ -1916,8 +1883,7 @@ class UseScratchRegisterScope {
RegList old_available_;
};
} // namespace internal
} // namespace v8
#endif // V8_MIPS_ASSEMBLER_MIPS_H_
#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_

View File

@ -4,35 +4,21 @@
#if V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h"
#include "src/codegen/mips/constants-mips.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg",
"at",
"v0", "v1",
"a0", "a1", "a2", "a3",
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"t8", "t9",
"k0", "k1",
"gp",
"sp",
"fp",
"ra",
"LO", "HI",
"pc"
};
"zero_reg", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0",
"t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1",
"s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0",
"k1", "gp", "sp", "fp", "ra", "LO", "HI", "pc"};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
@ -52,7 +38,6 @@ const char* Registers::Name(int reg) {
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
@ -74,13 +59,10 @@ int Registers::Number(const char* name) {
return kInvalidRegister;
}
const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
"f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
@ -96,7 +78,6 @@ const char* FPURegisters::Name(int creg) {
return result;
}
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {

View File

@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_CONSTANTS_MIPS_H_
#define V8_MIPS_CONSTANTS_MIPS_H_
#ifndef V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_
#define V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_
#include "src/codegen/cpu-features.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
#else
#define UNIMPLEMENTED_MIPS()
@ -24,45 +24,38 @@ enum ArchVariants {
};
#ifdef _MIPS_ARCH_MIPS32R2
static const ArchVariants kArchVariant = kMips32r2;
static const ArchVariants kArchVariant = kMips32r2;
#elif _MIPS_ARCH_MIPS32R6
static const ArchVariants kArchVariant = kMips32r6;
static const ArchVariants kArchVariant = kMips32r6;
#elif _MIPS_ARCH_LOONGSON
// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
// which predates (and is a subset of) the mips32r2 and r1 architectures.
static const ArchVariants kArchVariant = kLoongson;
static const ArchVariants kArchVariant = kLoongson;
#elif _MIPS_ARCH_MIPS32RX
// This flags referred to compatibility mode that creates universal code that
// can run on any MIPS32 architecture revision. The dynamically generated code
// by v8 is specialized for the MIPS host detected in runtime probing.
static const ArchVariants kArchVariant = kMips32r1;
static const ArchVariants kArchVariant = kMips32r1;
#else
static const ArchVariants kArchVariant = kMips32r1;
static const ArchVariants kArchVariant = kMips32r1;
#endif
enum Endianness {
kLittle,
kBig
};
enum Endianness { kLittle, kBig };
#if defined(V8_TARGET_LITTLE_ENDIAN)
static const Endianness kArchEndian = kLittle;
static const Endianness kArchEndian = kLittle;
#elif defined(V8_TARGET_BIG_ENDIAN)
static const Endianness kArchEndian = kBig;
static const Endianness kArchEndian = kBig;
#else
#error Unknown endianness
#endif
enum FpuMode {
kFP32,
kFP64,
kFPXX
};
enum FpuMode { kFP32, kFP64, kFPXX };
#if defined(FPU_MODE_FP32)
static const FpuMode kFpuMode = kFP32;
static const FpuMode kFpuMode = kFP32;
#elif defined(FPU_MODE_FP64)
static const FpuMode kFpuMode = kFP64;
static const FpuMode kFpuMode = kFP64;
#elif defined(FPU_MODE_FPXX)
#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6)
static const FpuMode kFpuMode = kFPXX;
@ -73,11 +66,11 @@ static const FpuMode kFpuMode = kFPXX;
static const FpuMode kFpuMode = kFP32;
#endif
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
#if defined(__mips_hard_float) && __mips_hard_float != 0
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
#elif defined(__mips_soft_float) && __mips_soft_float != 0
// This flag is raised when -msoft-float is passed to the compiler.
// Although FPU is a base requirement for v8, soft-float ABI is used
// on soft-float systems with FPU kernel emulation.
@ -101,8 +94,7 @@ const uint32_t kHoleNanLower32Offset = 4;
#define IsFpxxMode() (kFpuMode == kFPXX)
#ifndef _MIPS_ARCH_MIPS32RX
#define IsMipsArchVariant(check) \
(kArchVariant == check)
#define IsMipsArchVariant(check) (kArchVariant == check)
#else
#define IsMipsArchVariant(check) \
(CpuFeatures::IsSupported(static_cast<CpuFeature>(check)))
@ -203,11 +195,8 @@ const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit;
const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
kFCSRUnderflowFlagMask |
kFCSROverflowFlagMask |
kFCSRDivideByZeroFlagMask |
kFCSRInvalidOpFlagMask;
kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
@ -310,22 +299,21 @@ const uint32_t kMaxWatchpointCode = 31;
const uint32_t kMaxStopCode = 127;
STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
// ----- Fields offset and length.
const int kOpcodeShift = 26;
const int kOpcodeBits = 6;
const int kRsShift = 21;
const int kRsBits = 5;
const int kRtShift = 16;
const int kRtBits = 5;
const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
const int kOpcodeShift = 26;
const int kOpcodeBits = 6;
const int kRsShift = 21;
const int kRsBits = 5;
const int kRtShift = 16;
const int kRtBits = 5;
const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
const int kLsaSaBits = 2;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
const int kFunctionBits = 6;
const int kLuiShift = 16;
const int kBp2Shift = 6;
const int kBp2Bits = 2;
const int kBaseShift = 21;
@ -336,19 +324,19 @@ const int kBit6Bits = 1;
const int kImm9Shift = 7;
const int kImm9Bits = 9;
const int kImm16Shift = 0;
const int kImm16Bits = 16;
const int kImm16Bits = 16;
const int kImm18Shift = 0;
const int kImm18Bits = 18;
const int kImm19Shift = 0;
const int kImm19Bits = 19;
const int kImm21Shift = 0;
const int kImm21Bits = 21;
const int kImm21Bits = 21;
const int kImm26Shift = 0;
const int kImm26Bits = 26;
const int kImm26Bits = 26;
const int kImm28Shift = 0;
const int kImm28Bits = 28;
const int kImm28Bits = 28;
const int kImm32Shift = 0;
const int kImm32Bits = 32;
const int kImm32Bits = 32;
const int kMsaImm8Shift = 16;
const int kMsaImm8Bits = 8;
const int kMsaImm5Shift = 16;
@ -362,20 +350,20 @@ const int kMsaImmMI10Bits = 10;
// and are therefore shifted by 2.
const int kImmFieldShift = 2;
const int kFrBits = 5;
const int kFrShift = 21;
const int kFsShift = 11;
const int kFsBits = 5;
const int kFtShift = 16;
const int kFtBits = 5;
const int kFdShift = 6;
const int kFdBits = 5;
const int kFCccShift = 8;
const int kFCccBits = 3;
const int kFBccShift = 18;
const int kFBccBits = 3;
const int kFBtrueShift = 16;
const int kFBtrueBits = 1;
const int kFrBits = 5;
const int kFrShift = 21;
const int kFsShift = 11;
const int kFsBits = 5;
const int kFtShift = 16;
const int kFtBits = 5;
const int kFdShift = 6;
const int kFdBits = 5;
const int kFCccShift = 8;
const int kFCccBits = 3;
const int kFBccShift = 18;
const int kFBccBits = 3;
const int kFBtrueShift = 16;
const int kFBtrueBits = 1;
const int kWtBits = 5;
const int kWtShift = 16;
const int kWsBits = 5;
@ -1045,7 +1033,6 @@ enum Condition {
cc_default = kNoCondition
};
// Returns the equivalent of !cc.
// Negation of the default kNoCondition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
@ -1055,7 +1042,6 @@ inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
inline Condition NegateFpuCondition(Condition cc) {
DCHECK(cc != cc_always);
switch (cc) {
@ -1119,7 +1105,6 @@ enum MSABranchDF {
MSA_BRANCH_V
};
// ----- Coprocessor conditions.
enum FPUCondition {
kNoFPUCondition = -1,
@ -1141,7 +1126,6 @@ enum FPUCondition {
NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only.
};
// FPU rounding modes.
enum FPURoundingMode {
RN = 0 << 0, // Round to Nearest.
@ -1176,15 +1160,9 @@ enum class MaxMinKind : int { kMin = 0, kMax = 1 };
// Branch hints are not used on the MIPS. They are defined so that they can
// appear in shared function signatures, but will be ignored in MIPS
// implementations.
enum Hint {
no_hint = 0
};
inline Hint NegateHint(Hint hint) {
return no_hint;
}
enum Hint { no_hint = 0 };
inline Hint NegateHint(Hint hint) { return no_hint; }
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@ -1244,16 +1222,13 @@ class InstructionBase {
}
// Read one particular bit out of the instruction bits.
inline int Bit(int nr) const {
return (InstructionBits() >> nr) & 1;
}
inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
}
static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
@ -1677,7 +1652,6 @@ class Instruction : public InstructionGetters<InstructionBase> {
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
@ -1793,7 +1767,7 @@ InstructionBase::Type InstructionBase::InstructionType() const {
}
default:
return kImmediateType;
return kImmediateType;
}
}
@ -1924,4 +1898,4 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
} // namespace internal
} // namespace v8
#endif // V8_MIPS_CONSTANTS_MIPS_H_
#endif // V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_

View File

@ -18,7 +18,6 @@
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
@ -28,10 +27,10 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#if defined(ANDROID)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char *end = reinterpret_cast<char *>(start) + size;
cacheflush(
reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
#else // ANDROID
char* end = reinterpret_cast<char*>(start) + size;
cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
0);
#else // ANDROID
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);

View File

@ -101,7 +101,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@ -251,10 +250,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // kApiFunctionAddress
a2, // kArgc
a3, // kCallData
a0, // kHolder
a1, // kApiFunctionAddress
a2, // kArgc
a3, // kCallData
a0, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -28,7 +28,7 @@
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
#include "src/mips/macro-assembler-mips.h"
#include "src/codegen/mips/macro-assembler-mips.h"
#endif
namespace v8 {
@ -133,7 +133,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
@ -161,7 +160,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
return kSafepointRegisterStackIndexMap[reg_code];
}
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@ -351,9 +349,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
eq,
&done);
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
@ -943,7 +939,6 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
if (IsMipsArchVariant(kLoongson)) {
lw(zero_reg, rs);
@ -1410,7 +1405,6 @@ void TurboAssembler::MultiPush(RegList regs) {
}
}
void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
@ -1423,7 +1417,6 @@ void TurboAssembler::MultiPop(RegList regs) {
addiu(sp, sp, stack_offset);
}
void TurboAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@ -1437,7 +1430,6 @@ void TurboAssembler::MultiPushFPU(RegList regs) {
}
}
void TurboAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
@ -1464,8 +1456,7 @@ void TurboAssembler::AddPair(Register dst_low, Register dst_high,
}
void TurboAssembler::AddPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
int32_t imm,
Register left_low, Register left_high, int32_t imm,
Register scratch1, Register scratch2) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch3 = t8;
@ -2680,13 +2671,10 @@ void TurboAssembler::Popcnt(Register rd, Register rs) {
srl(rd, rd, shift);
}
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
Register scratch,
DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
void MacroAssembler::EmitFPUTruncate(
FPURoundingMode rounding_mode, Register result, DoubleRegister double_input,
Register scratch, DoubleRegister double_scratch, Register except_flag,
CheckForInexactConversion check_inexact) {
DCHECK(result != scratch);
DCHECK(double_input != double_scratch);
DCHECK(except_flag != scratch);
@ -2763,8 +2751,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
cfc1(scratch, FCSR);
ctc1(scratch2, FCSR);
// Check for overflow and NaNs.
And(scratch,
scratch,
And(scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
Branch(done, eq, scratch, Operand(zero_reg));
@ -2877,8 +2864,7 @@ void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
b(offset);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bdslot == PROTECT) nop();
}
void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
@ -2905,7 +2891,6 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
}
}
int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
@ -3285,8 +3270,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
}
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bdslot == PROTECT) nop();
return true;
}
@ -3381,8 +3365,7 @@ void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
bal(offset);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bdslot == PROTECT) nop();
}
void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
@ -3514,7 +3497,6 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
}
break;
// Unsigned comparison.
case Ugreater:
// rs > r2
@ -3638,8 +3620,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bdslot == PROTECT) nop();
return true;
}
@ -4106,7 +4087,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
Label skip;
if (cond != al) {
Branch(&skip, NegateCondition(cond), reg, op);
Branch(&skip, NegateCondition(cond), reg, op);
}
Addu(sp, sp, Operand(count * kPointerSize));
@ -4116,11 +4097,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
}
}
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch) {
void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
if (scratch == no_reg) {
Xor(reg1, reg1, Operand(reg2));
Xor(reg2, reg2, Operand(reg1));
@ -4176,7 +4153,6 @@ void MacroAssembler::PushStackHandler() {
sw(sp, MemOperand(t2));
}
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
@ -4263,7 +4239,6 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
// -----------------------------------------------------------------------------
// JavaScript invokes.
@ -4513,18 +4488,15 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
// ---------------------------------------------------------------------------
// Support functions.
void MacroAssembler::GetObjectType(Register object,
Register map,
void MacroAssembler::GetObjectType(Register object, Register map,
Register type_reg) {
lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
// -----------------------------------------------------------------------------
// Runtime calls.
@ -4691,7 +4663,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@ -4703,14 +4674,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
// -----------------------------------------------------------------------------
// Debugging.
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
if (emit_debug_code()) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@ -4947,7 +4916,7 @@ int TurboAssembler::ActivationFrameAlignment() {
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_MIPS
#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
@ -4956,24 +4925,23 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_MIPS
}
void MacroAssembler::AssertStackIsAligned() {
if (emit_debug_code()) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
stop("Unexpected stack alignment");
bind(&alignment_as_expected);
}
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
stop("Unexpected stack alignment");
bind(&alignment_as_expected);
}
}
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
@ -4983,10 +4951,8 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value,
Label* not_smi_label,
Register scratch,
BranchDelaySlot bd) {
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
Register scratch, BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
@ -5002,7 +4968,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -5041,7 +5006,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@ -5095,7 +5059,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1 == src2) {
@ -5319,8 +5282,8 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// mips, even though those argument slots are not normally used.
// Remaining arguments are pushed on the stack, above (higher address than)
// the argument slots.
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@ -5443,8 +5406,8 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
}
}
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@ -5453,7 +5416,6 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
}
}
#undef BRANCH_ARGS_CHECK
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
@ -5464,11 +5426,8 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();

View File

@ -6,12 +6,12 @@
#error This header must be included via macro-assembler.h
#endif
#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#ifndef V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_
#define V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_
#include "src/codegen/assembler.h"
#include "src/codegen/mips/assembler-mips.h"
#include "src/common/globals.h"
#include "src/mips/assembler-mips.h"
#include "src/objects/contexts.h"
namespace v8 {
@ -32,12 +32,8 @@ enum class AbortReason : uint8_t;
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
// Flags used for LeaveExitFrame function.
enum LeaveExitFrameMode {
EMIT_RETURN = true,
NO_EMIT_RETURN = false
};
enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
// Flags used for the li macro-assembler function.
enum LiFlags {
@ -49,13 +45,11 @@ enum LiFlags {
CONSTANT_SIZE = 1
};
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2 = no_reg,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
@ -68,18 +62,15 @@ inline MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
inline MemOperand CFunctionArgumentOperand(int index) {
@ -131,22 +122,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Abort(AbortReason msg);
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
#define COND_ARGS cond, r1, r2
// Cases when relocation is not needed.
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target) { \
Name(target, bd); \
} \
void Name(target_type target, \
COND_TYPED_ARGS, \
BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, \
target_type target, \
COND_TYPED_ARGS) { \
Name(target, COND_ARGS, bd); \
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target) { \
Name(target, bd); \
} \
void Name(target_type target, COND_TYPED_ARGS, \
BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target, COND_TYPED_ARGS) { \
Name(target, COND_ARGS, bd); \
}
#define DECLARE_BRANCH_PROTOTYPES(Name) \
@ -207,8 +195,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRelative(Register destination, int32_t offset) override;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
const Operand &rt = Operand(zero_reg), \
BranchDelaySlot bd = PROTECT
void Jump(Register target, int16_t offset = 0, COND_ARGS);
void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS);
@ -219,8 +209,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Label* target);
@ -249,25 +238,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
Register rs = zero_reg,
const Operand& rt = Operand(zero_reg)) {
Ret(cond, rs, rt, bd);
}
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count,
Condition cond = cc_always,
Register reg = no_reg,
void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
const Operand& op = Operand(no_reg));
// Trivial case of DropAndRet that utilizes the delay slot and only emits
// 2 instructions.
void DropAndRet(int drop);
void DropAndRet(int drop,
Condition cond,
Register reg,
const Operand& op);
void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
@ -404,13 +389,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
#define DEFINE_INSTRUCTION3(instr) \
void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
instr(rd_hi, rd_lo, rs, Operand(rt)); \
} \
void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
instr(rd_hi, rd_lo, rs, Operand(j)); \
#define DEFINE_INSTRUCTION3(instr) \
void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
instr(rd_hi, rd_lo, rs, Operand(rt)); \
} \
void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
instr(rd_hi, rd_lo, rs, Operand(j)); \
}
DEFINE_INSTRUCTION(Addu)
@ -552,8 +537,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch1, Register scratch2);
void AddPair(Register dst_low, Register dst_high, Register left_low,
Register left_high, int32_t imm,
Register scratch1, Register scratch2);
Register left_high, int32_t imm, Register scratch1,
Register scratch2);
void SubPair(Register dst_low, Register dst_high, Register left_low,
Register left_high, Register right_low, Register right_high,
@ -1032,9 +1017,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// Support functions.
void GetObjectType(Register function,
Register map,
Register type_reg);
void GetObjectType(Register function, Register map, Register type_reg);
// -------------------------------------------------------------------------
// Runtime calls.
@ -1074,17 +1057,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// StatsCounter support.
void IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
// -------------------------------------------------------------------------
// Smi utilities.
void SmiTag(Register reg) {
Addu(reg, reg, reg);
}
void SmiTag(Register reg) { Addu(reg, reg, reg); }
void SmiTag(Register dst, Register src) { Addu(dst, src, src); }
@ -1094,9 +1075,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value,
Label* not_smi_label,
Register scratch = at,
void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
// Abort execution if argument is a smi, enabled via --debug-code.
@ -1121,12 +1100,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
template<typename Field>
template <typename Field>
void DecodeField(Register dst, Register src) {
Ext(dst, src, Field::kShift, Field::kSize);
}
template<typename Field>
template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
@ -1179,4 +1158,4 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
} // namespace internal
} // namespace v8
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#endif // V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_

View File

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_REGISTER_MIPS_H_
#define V8_MIPS_REGISTER_MIPS_H_
#ifndef V8_CODEGEN_MIPS_REGISTER_MIPS_H_
#define V8_CODEGEN_MIPS_REGISTER_MIPS_H_
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/mips/constants-mips.h"
namespace v8 {
namespace internal {
@ -379,4 +379,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
} // namespace internal
} // namespace v8
#endif // V8_MIPS_REGISTER_MIPS_H_
#endif // V8_CODEGEN_MIPS_REGISTER_MIPS_H_

View File

@ -33,10 +33,10 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
#define V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
#ifndef V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_INL_H_
#define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_INL_H_
#include "src/mips64/assembler-mips64.h"
#include "src/codegen/mips64/assembler-mips64.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
@ -52,9 +52,7 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
bool Operand::is_reg() const {
return rm_.is_valid();
}
bool Operand::is_reg() const { return rm_.is_valid(); }
int64_t Operand::immediate() const {
DCHECK(!is_reg());
@ -72,7 +70,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@ -97,15 +94,9 @@ Address RelocInfo::target_address_address() {
return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
}
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
@ -199,7 +190,6 @@ Address RelocInfo::target_internal_reference() {
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
return pc_;
@ -240,14 +230,12 @@ void RelocInfo::WipeOut() {
// -----------------------------------------------------------------------------
// Assembler.
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
@ -262,7 +250,6 @@ void Assembler::CheckForEmitInForbiddenSlot() {
}
}
void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
if (IsPrevInstrCompactBranch()) {
if (Instruction::IsForbiddenAfterBranchInstr(x)) {
@ -307,7 +294,6 @@ void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
EmitHelper(x, is_compact_branch);
}
void Assembler::emit(uint64_t data) {
CheckForEmitInForbiddenSlot();
EmitHelper(data);
@ -318,4 +304,4 @@ EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
#endif // V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
#endif // V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_INL_H_

View File

@ -32,8 +32,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_MIPS64_ASSEMBLER_MIPS64_H_
#define V8_MIPS64_ASSEMBLER_MIPS64_H_
#ifndef V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_
#define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_
#include <stdio.h>
@ -42,8 +42,8 @@
#include "src/codegen/assembler.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/label.h"
#include "src/mips64/constants-mips64.h"
#include "src/mips64/register-mips64.h"
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/register-mips64.h"
#include "src/objects/contexts.h"
#include "src/objects/smi.h"
@ -119,25 +119,19 @@ class Operand {
friend class MacroAssembler;
};
// On MIPS we have only one addressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
// Immediate value attached to offset.
enum OffsetAddend {
offset_minus_one = -1,
offset_zero = 0
};
enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 };
explicit MemOperand(Register rn, int32_t offset = 0);
explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
OffsetAddend offset_addend = offset_zero);
int32_t offset() const { return offset_; }
bool OffsetIsInt16Encodable() const {
return is_int16(offset_);
}
bool OffsetIsInt16Encodable() const { return is_int16(offset_); }
private:
int32_t offset_;
@ -157,7 +151,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
virtual ~Assembler() { }
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@ -358,7 +352,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
sll(zero_reg, nop_rt_reg, type, true);
}
// --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
@ -600,12 +593,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void auipc(Register rs, int16_t imm16);
void aluipc(Register rs, int16_t imm16);
// ----------------Prefetch--------------------
void pref(int32_t hint, const MemOperand& rs);
// -------------Misc-instructions--------------
// Break / Trap instructions.
@ -746,7 +737,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void rint_d(FPURegister fd, FPURegister fs);
void rint(SecondaryField fmt, FPURegister fd, FPURegister fs);
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
void trunc_l_s(FPURegister fd, FPURegister fs);
@ -783,8 +773,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvt_d_s(FPURegister fd, FPURegister fs);
// Conditions and branches for MIPSr6.
void cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister ft, FPURegister fs);
void cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
FPURegister ft, FPURegister fs);
void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
@ -798,8 +788,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs,
uint16_t cc = 0);
void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
@ -1419,9 +1409,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockTrampolinePool();
}
~BlockTrampolinePoolScope() {
assem_->EndBlockTrampolinePool();
}
~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
private:
Assembler* assem_;
@ -1438,9 +1426,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockGrowBuffer();
}
~BlockGrowBufferScope() {
assem_->EndBlockGrowBuffer();
}
~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
private:
Assembler* assem_;
@ -1504,7 +1490,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsBeqc(Instr instr);
static bool IsBnec(Instr instr);
static bool IsJump(Instr instr);
static bool IsJ(Instr instr);
static bool IsLui(Instr instr);
@ -1602,9 +1587,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
no_trampoline_pool_before_ = pc_offset;
}
void StartBlockTrampolinePool() {
trampoline_pool_blocked_nesting_++;
}
void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
@ -1617,13 +1600,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return trampoline_pool_blocked_nesting_ > 0;
}
bool has_exception() const {
return internal_trampoline_exception_;
}
bool has_exception() const { return internal_trampoline_exception_; }
bool is_trampoline_emitted() const {
return trampoline_emitted_;
}
bool is_trampoline_emitted() const { return trampoline_emitted_; }
// Temporarily block automatic assembly buffer growth.
void StartBlockGrowBuffer() {
@ -1636,9 +1615,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
block_buffer_growth_ = false;
}
bool is_buffer_growth_blocked() const {
return block_buffer_growth_;
}
bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
void EmitForbiddenSlotInstruction() {
if (IsPrevInstrCompactBranch()) {
@ -1719,12 +1696,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrRegister(Opcode opcode, Register rs, Register rt, Register rd,
uint16_t sa = 0, SecondaryField func = nullptrSF);
void GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
uint16_t msb,
uint16_t lsb,
SecondaryField func);
void GenInstrRegister(Opcode opcode, Register rs, Register rt, uint16_t msb,
uint16_t lsb, SecondaryField func);
void GenInstrRegister(Opcode opcode, SecondaryField fmt, FPURegister ft,
FPURegister fs, FPURegister fd,
@ -1760,8 +1733,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Opcode opcode, int32_t offset26,
CompactBranchType is_compact_branch = CompactBranchType::NO);
void GenInstrJump(Opcode opcode,
uint32_t address);
void GenInstrJump(Opcode opcode, uint32_t address);
// MSA
void GenInstrMsaI8(SecondaryField operation, uint32_t imm8, MSARegister ws,
@ -1859,12 +1831,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
free_slot_count_ = slot_count;
end_ = start + slot_count * kTrampolineSlotsSize;
}
int start() {
return start_;
}
int end() {
return end_;
}
int start() { return start_; }
int end() { return end_; }
int take_slot() {
int trampoline_slot = kInvalidSlotPos;
if (free_slot_count_ <= 0) {
@ -1947,4 +1915,4 @@ class UseScratchRegisterScope {
} // namespace internal
} // namespace v8
#endif // V8_MIPS64_ASSEMBLER_MIPS64_H_
#endif // V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_

View File

@ -4,35 +4,21 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h"
#include "src/codegen/mips64/constants-mips64.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg",
"at",
"v0", "v1",
"a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"t8", "t9",
"k0", "k1",
"gp",
"sp",
"fp",
"ra",
"LO", "HI",
"pc"
};
"zero_reg", "at", "v0", "v1", "a0", "a1", "a2", "a3", "a4",
"a5", "a6", "a7", "t0", "t1", "t2", "t3", "s0", "s1",
"s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0",
"k1", "gp", "sp", "fp", "ra", "LO", "HI", "pc"};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
@ -52,7 +38,6 @@ const char* Registers::Name(int reg) {
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
@ -74,13 +59,10 @@ int Registers::Number(const char* name) {
return kInvalidRegister;
}
const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
"f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
@ -96,7 +78,6 @@ const char* FPURegisters::Name(int creg) {
return result;
}
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS64_CONSTANTS_MIPS64_H_
#define V8_MIPS64_CONSTANTS_MIPS64_H_
#ifndef V8_CODEGEN_MIPS64_CONSTANTS_MIPS64_H_
#define V8_CODEGEN_MIPS64_CONSTANTS_MIPS64_H_
#include "src/base/logging.h"
#include "src/base/macros.h"
@ -11,8 +11,8 @@
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
#else
#define UNIMPLEMENTED_MIPS()
@ -20,38 +20,32 @@
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants {
kMips64r2,
kMips64r6
};
enum ArchVariants { kMips64r2, kMips64r6 };
#ifdef _MIPS_ARCH_MIPS64R2
static const ArchVariants kArchVariant = kMips64r2;
#elif _MIPS_ARCH_MIPS64R6
static const ArchVariants kArchVariant = kMips64r6;
static const ArchVariants kArchVariant = kMips64r2;
#elif _MIPS_ARCH_MIPS64R6
static const ArchVariants kArchVariant = kMips64r6;
#else
static const ArchVariants kArchVariant = kMips64r2;
static const ArchVariants kArchVariant = kMips64r2;
#endif
enum Endianness { kLittle, kBig };
enum Endianness { kLittle, kBig };
#if defined(V8_TARGET_LITTLE_ENDIAN)
static const Endianness kArchEndian = kLittle;
static const Endianness kArchEndian = kLittle;
#elif defined(V8_TARGET_BIG_ENDIAN)
static const Endianness kArchEndian = kBig;
static const Endianness kArchEndian = kBig;
#else
#error Unknown endianness
#endif
// TODO(plind): consider renaming these ...
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
#if defined(__mips_hard_float) && __mips_hard_float != 0
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
#elif defined(__mips_soft_float) && __mips_soft_float != 0
// This flag is raised when -msoft-float is passed to the compiler.
// Although FPU is a base requirement for v8, soft-float ABI is used
// on soft-float systems with FPU kernel emulation.
@ -166,11 +160,8 @@ const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit;
const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
kFCSRUnderflowFlagMask |
kFCSROverflowFlagMask |
kFCSRDivideByZeroFlagMask |
kFCSRInvalidOpFlagMask;
kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
@ -273,22 +264,21 @@ const uint32_t kMaxWatchpointCode = 31;
const uint32_t kMaxStopCode = 127;
STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
// ----- Fields offset and length.
const int kOpcodeShift = 26;
const int kOpcodeBits = 6;
const int kRsShift = 21;
const int kRsBits = 5;
const int kRtShift = 16;
const int kRtBits = 5;
const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
const int kOpcodeShift = 26;
const int kOpcodeBits = 6;
const int kRsShift = 21;
const int kRsBits = 5;
const int kRtShift = 16;
const int kRtBits = 5;
const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
const int kLsaSaBits = 2;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
const int kFunctionBits = 6;
const int kLuiShift = 16;
const int kBp2Shift = 6;
const int kBp2Bits = 2;
const int kBp3Shift = 6;
@ -327,20 +317,20 @@ const int kMsaImmMI10Bits = 10;
// and are therefore shifted by 2.
const int kImmFieldShift = 2;
const int kFrBits = 5;
const int kFrShift = 21;
const int kFsShift = 11;
const int kFsBits = 5;
const int kFtShift = 16;
const int kFtBits = 5;
const int kFdShift = 6;
const int kFdBits = 5;
const int kFCccShift = 8;
const int kFCccBits = 3;
const int kFBccShift = 18;
const int kFBccBits = 3;
const int kFBtrueShift = 16;
const int kFBtrueBits = 1;
const int kFrBits = 5;
const int kFrShift = 21;
const int kFsShift = 11;
const int kFsBits = 5;
const int kFtShift = 16;
const int kFtBits = 5;
const int kFdShift = 6;
const int kFdBits = 5;
const int kFCccShift = 8;
const int kFCccBits = 3;
const int kFBccShift = 18;
const int kFBccBits = 3;
const int kFBtrueShift = 16;
const int kFBtrueBits = 1;
const int kWtBits = 5;
const int kWtShift = 16;
const int kWsBits = 5;
@ -1080,7 +1070,6 @@ enum Condition {
cc_default = kNoCondition
};
// Returns the equivalent of !cc.
// Negation of the default kNoCondition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
@ -1090,7 +1079,6 @@ inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
inline Condition NegateFpuCondition(Condition cc) {
DCHECK(cc != cc_always);
switch (cc) {
@ -1154,7 +1142,6 @@ enum MSABranchDF {
MSA_BRANCH_V
};
// ----- Coprocessor conditions.
enum FPUCondition {
kNoFPUCondition = -1,
@ -1176,7 +1163,6 @@ enum FPUCondition {
NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only.
};
// FPU rounding modes.
enum FPURoundingMode {
RN = 0 << 0, // Round to Nearest.
@ -1211,15 +1197,9 @@ enum class MaxMinKind : int { kMin = 0, kMax = 1 };
// Branch hints are not used on the MIPS. They are defined so that they can
// appear in shared function signatures, but will be ignored in MIPS
// implementations.
enum Hint {
no_hint = 0
};
inline Hint NegateHint(Hint hint) {
return no_hint;
}
enum Hint { no_hint = 0 };
inline Hint NegateHint(Hint hint) { return no_hint; }
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@ -1279,9 +1259,7 @@ class InstructionBase {
}
// Read one particular bit out of the instruction bits.
inline int Bit(int nr) const {
return (InstructionBits() >> nr) & 1;
}
inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
@ -1344,7 +1322,6 @@ class InstructionBase {
FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeValue() const {
return static_cast<Opcode>(
@ -1735,7 +1712,6 @@ class Instruction : public InstructionGetters<InstructionBase> {
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
@ -2007,4 +1983,4 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
} // namespace internal
} // namespace v8
#endif // V8_MIPS64_CONSTANTS_MIPS64_H_
#endif // V8_CODEGEN_MIPS64_CONSTANTS_MIPS64_H_

View File

@ -18,7 +18,6 @@
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
@ -28,10 +27,10 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#if defined(ANDROID) && !defined(__LP64__)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char *end = reinterpret_cast<char *>(start) + size;
cacheflush(
reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
#else // ANDROID
char* end = reinterpret_cast<char*>(start) + size;
cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
0);
#else // ANDROID
long res; // NOLINT(runtime/int)
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);

View File

@ -101,7 +101,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@ -251,10 +250,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // kApiFunctionAddress
a2, // kArgc
a3, // kCallData
a0, // kHolder
a1, // kApiFunctionAddress
a2, // kArgc
a3, // kCallData
a0, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -28,7 +28,7 @@
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
#include "src/mips64/macro-assembler-mips64.h"
#include "src/codegen/mips64/macro-assembler-mips64.h"
#endif
namespace v8 {
@ -131,7 +131,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
@ -159,7 +158,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
return kSafepointRegisterStackIndexMap[reg_code];
}
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@ -349,9 +347,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
eq,
&done);
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
@ -1075,9 +1071,8 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
}
}
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
pref(hint, rs);
pref(hint, rs);
}
void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
@ -1332,7 +1327,6 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
}
}
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
// bits,
// second word in high bits.
@ -1360,7 +1354,6 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
}
}
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Register scratch) {
@ -1926,7 +1919,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
ori(rd, rd, (immediate >> 16) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, immediate & kImm16Mask);
} else if (mode == ADDRESS_LOAD) {
} else if (mode == ADDRESS_LOAD) {
// We always need the same number of instructions as we may need to patch
// this code to load another value which may need all 4 instructions.
lui(rd, (j.immediate() >> 32) & kImm16Mask);
@ -1967,7 +1960,6 @@ void TurboAssembler::MultiPush(RegList regs) {
}
}
void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
@ -1980,7 +1972,6 @@ void TurboAssembler::MultiPop(RegList regs) {
daddiu(sp, sp, stack_offset);
}
void TurboAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@ -1994,7 +1985,6 @@ void TurboAssembler::MultiPushFPU(RegList regs) {
}
}
void TurboAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
@ -2007,7 +1997,6 @@ void TurboAssembler::MultiPopFPU(RegList regs) {
daddiu(sp, sp, stack_offset);
}
void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK_LT(pos, 32);
@ -2242,29 +2231,23 @@ void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
bind(&conversion_done);
}
void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
round_l_d(fd, fs);
}
void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
floor_l_d(fd, fs);
}
void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
ceil_l_d(fd, fs);
}
void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
trunc_l_d(fd, fs);
}
void MacroAssembler::Trunc_l_ud(FPURegister fd,
FPURegister fs,
void MacroAssembler::Trunc_l_ud(FPURegister fd, FPURegister fs,
FPURegister scratch) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Load to GPR.
@ -2308,22 +2291,18 @@ void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
dmtc1(t8, fd);
}
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
trunc_w_d(fd, fs);
}
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
round_w_d(fd, fs);
}
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
floor_w_d(fd, fs);
}
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
ceil_w_d(fd, fs);
}
@ -2638,7 +2617,7 @@ void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
FPURegister ft, FPURegister scratch) {
DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
@ -3140,13 +3119,10 @@ void TurboAssembler::Dpopcnt(Register rd, Register rs) {
dsrl32(rd, rd, shift);
}
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
Register scratch,
DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
void MacroAssembler::EmitFPUTruncate(
FPURoundingMode rounding_mode, Register result, DoubleRegister double_input,
Register scratch, DoubleRegister double_scratch, Register except_flag,
CheckForInexactConversion check_inexact) {
DCHECK(result != scratch);
DCHECK(double_input != double_scratch);
DCHECK(except_flag != scratch);
@ -3223,8 +3199,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
cfc1(scratch, FCSR);
ctc1(scratch2, FCSR);
// Check for overflow and NaNs.
And(scratch,
scratch,
And(scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
Branch(done, eq, scratch, Operand(zero_reg));
@ -3337,8 +3312,7 @@ void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
b(offset);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bdslot == PROTECT) nop();
}
void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
@ -3365,7 +3339,6 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
}
}
int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
@ -3748,8 +3721,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bdslot == PROTECT) nop();
return true;
}
@ -3845,8 +3817,7 @@ void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
bal(offset);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bdslot == PROTECT) nop();
}
void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
@ -3978,7 +3949,6 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
}
break;
// Unsigned comparison.
case Ugreater:
// rs > r2
@ -4102,8 +4072,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bdslot == PROTECT) nop();
return true;
}
@ -4440,7 +4409,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
Label skip;
if (cond != al) {
Branch(&skip, NegateCondition(cond), reg, op);
Branch(&skip, NegateCondition(cond), reg, op);
}
Daddu(sp, sp, Operand(count * kPointerSize));
@ -4450,11 +4419,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
}
}
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch) {
void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
if (scratch == no_reg) {
Xor(reg1, reg1, Operand(reg2));
Xor(reg2, reg2, Operand(reg1));
@ -4510,12 +4475,12 @@ void MacroAssembler::PushStackHandler() {
Sd(sp, MemOperand(t2));
}
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
kPointerSize)));
Daddu(sp, sp,
Operand(
static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch,
@ -4599,7 +4564,6 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
// -----------------------------------------------------------------------------
// JavaScript invokes.
@ -4849,18 +4813,15 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
// ---------------------------------------------------------------------------
// Support functions.
void MacroAssembler::GetObjectType(Register object,
Register map,
void MacroAssembler::GetObjectType(Register object, Register map,
Register type_reg) {
Ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
// -----------------------------------------------------------------------------
// Runtime calls.
@ -5031,7 +4992,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@ -5046,14 +5006,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
// -----------------------------------------------------------------------------
// Debugging.
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
if (emit_debug_code()) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@ -5121,7 +5079,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
Ld(dst, ContextMemOperand(dst, index));
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@ -5240,8 +5197,9 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
kNumOfSavedRegisters * kDoubleSize));
Dsubu(t8, fp,
Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
kNumOfSavedRegisters * kDoubleSize));
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
Ldc1(reg, MemOperand(t8, i * kDoubleSize));
@ -5291,7 +5249,7 @@ int TurboAssembler::ActivationFrameAlignment() {
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_MIPS
#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
@ -5300,26 +5258,25 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_MIPS
}
void MacroAssembler::AssertStackIsAligned() {
if (emit_debug_code()) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
// Don't use Check here, as it will call Runtime_Abort re-entering here.
stop("Unexpected stack alignment");
bind(&alignment_as_expected);
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
// Don't use Check here, as it will call Runtime_Abort re-entering here.
stop("Unexpected stack alignment");
bind(&alignment_as_expected);
}
}
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
@ -5339,10 +5296,8 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value,
Label* not_smi_label,
Register scratch,
BranchDelaySlot bd) {
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
Register scratch, BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
@ -5358,7 +5313,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -5397,7 +5351,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@ -5451,7 +5404,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1 == src2) {
@ -5678,8 +5630,8 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// Both ABIs: Remaining arguments are pushed on the stack, above (higher
// address than) the (O32) argument slots. (arg slot calculation handled by
// CalculateStackPassedWords()).
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@ -5794,8 +5746,8 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@ -5804,7 +5756,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
#undef BRANCH_ARGS_CHECK
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
@ -5815,12 +5766,8 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();

View File

@ -6,12 +6,12 @@
#error This header must be included via macro-assembler.h
#endif
#ifndef V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#define V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#ifndef V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#define V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#include "src/codegen/assembler.h"
#include "src/codegen/mips64/assembler-mips64.h"
#include "src/common/globals.h"
#include "src/mips64/assembler-mips64.h"
namespace v8 {
namespace internal {
@ -31,18 +31,11 @@ enum class AbortReason : uint8_t;
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
// Flags used for LeaveExitFrame function.
enum LeaveExitFrameMode {
EMIT_RETURN = true,
NO_EMIT_RETURN = false
};
enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
enum BranchDelaySlot {
USE_DELAY_SLOT,
PROTECT
};
enum BranchDelaySlot { USE_DELAY_SLOT, PROTECT };
// Flags used for the li macro-assembler function.
enum LiFlags {
@ -66,8 +59,7 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2 = no_reg,
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
@ -82,23 +74,19 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
#define SmiWordOffset(offset) offset
#endif
inline MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
// TODO(plind): Currently ONLY used for O32. Should be fixed for
@ -153,22 +141,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Abort(AbortReason msg);
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
#define COND_ARGS cond, r1, r2
// Cases when relocation is not needed.
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target) { \
Name(target, bd); \
} \
void Name(target_type target, \
COND_TYPED_ARGS, \
BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, \
target_type target, \
COND_TYPED_ARGS) { \
Name(target, COND_ARGS, bd); \
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target) { \
Name(target, bd); \
} \
void Name(target_type target, COND_TYPED_ARGS, \
BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target, COND_TYPED_ARGS) { \
Name(target, COND_ARGS, bd); \
}
#define DECLARE_BRANCH_PROTOTYPES(Name) \
@ -235,8 +220,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRelative(Register destination, int32_t offset) override;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
const Operand &rt = Operand(zero_reg), \
BranchDelaySlot bd = PROTECT
void Jump(Register target, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
@ -244,8 +231,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
void Call(Register target, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Label* target);
@ -274,25 +260,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
Register rs = zero_reg,
const Operand& rt = Operand(zero_reg)) {
Ret(cond, rs, rt, bd);
}
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count,
Condition cond = cc_always,
Register reg = no_reg,
void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
const Operand& op = Operand(no_reg));
// Trivial case of DropAndRet that utilizes the delay slot and only emits
// 2 instructions.
void DropAndRet(int drop);
void DropAndRet(int drop,
Condition cond,
Register reg,
const Operand& op);
void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
void Ld(Register rd, const MemOperand& rs);
void Sd(Register rd, const MemOperand& rs);
@ -357,7 +339,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Address wasm_target);
void CallEphemeronKeyBarrier(Register object, Register address,
SaveFPRegsMode fp_mode);
SaveFPRegsMode fp_mode);
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
@ -1039,8 +1021,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
void LoadGlobalFunctionInitialMap(Register function,
Register map,
void LoadGlobalFunctionInitialMap(Register function, Register map,
Register scratch);
// -------------------------------------------------------------------------
@ -1079,9 +1060,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// Support functions.
void GetObjectType(Register function,
Register map,
Register type_reg);
void GetObjectType(Register function, Register map, Register type_reg);
// -------------------------------------------------------------------------
// Runtime calls.
@ -1121,10 +1100,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// StatsCounter support.
void IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
// -------------------------------------------------------------------------
// Smi utilities.
@ -1139,9 +1118,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
}
void SmiTag(Register reg) {
SmiTag(reg, reg);
}
void SmiTag(Register reg) { SmiTag(reg, reg); }
// Left-shifted from int32 equivalent of Smi.
void SmiScale(Register dst, Register src, int scale) {
@ -1161,9 +1138,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value,
Label* not_smi_label,
Register scratch = at,
void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
// Abort execution if argument is a smi, enabled via --debug-code.
@ -1188,12 +1163,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
template<typename Field>
template <typename Field>
void DecodeField(Register dst, Register src) {
Ext(dst, src, Field::kShift, Field::kSize);
}
template<typename Field>
template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
@ -1254,4 +1229,4 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
} // namespace internal
} // namespace v8
#endif // V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#endif // V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_

View File

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS64_REGISTER_MIPS64_H_
#define V8_MIPS64_REGISTER_MIPS64_H_
#ifndef V8_CODEGEN_MIPS64_REGISTER_MIPS64_H_
#define V8_CODEGEN_MIPS64_REGISTER_MIPS64_H_
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/mips64/constants-mips64.h"
namespace v8 {
namespace internal {
@ -386,4 +386,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
} // namespace internal
} // namespace v8
#endif // V8_MIPS64_REGISTER_MIPS64_H_
#endif // V8_CODEGEN_MIPS64_REGISTER_MIPS64_H_

View File

@ -34,10 +34,10 @@
// significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
#ifndef V8_PPC_ASSEMBLER_PPC_INL_H_
#define V8_PPC_ASSEMBLER_PPC_INL_H_
#ifndef V8_CODEGEN_PPC_ASSEMBLER_PPC_INL_H_
#define V8_CODEGEN_PPC_ASSEMBLER_PPC_INL_H_
#include "src/ppc/assembler-ppc.h"
#include "src/codegen/ppc/assembler-ppc.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
@ -65,7 +65,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
// Jump table entry
@ -77,13 +76,11 @@ Address RelocInfo::target_internal_reference() {
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@ -112,7 +109,6 @@ Address RelocInfo::target_address_address() {
return pc_;
}
Address RelocInfo::constant_pool_entry_address() {
if (FLAG_enable_embedded_constant_pool) {
DCHECK(constant_pool_);
@ -124,7 +120,6 @@ Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
HeapObject RelocInfo::target_object() {
@ -248,7 +243,6 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
UNREACHABLE();
}
#if V8_TARGET_ARCH_PPC64
const uint32_t kLoadIntptrOpcode = LD;
#else
@ -281,7 +275,6 @@ bool Assembler::IsConstantPoolLoadStart(Address pc,
return true;
}
bool Assembler::IsConstantPoolLoadEnd(Address pc,
ConstantPoolEntry::Access* access) {
Instr instr = instr_at(pc);
@ -303,7 +296,6 @@ bool Assembler::IsConstantPoolLoadEnd(Address pc,
return true;
}
int Assembler::GetConstantPoolOffset(Address pc,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@ -325,7 +317,6 @@ int Assembler::GetConstantPoolOffset(Address pc,
return offset;
}
void Assembler::PatchConstantPoolAccessInstruction(
int pc_offset, int offset, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@ -359,7 +350,6 @@ void Assembler::PatchConstantPoolAccessInstruction(
}
}
Address Assembler::target_constant_pool_address_at(
Address pc, Address constant_pool, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@ -369,7 +359,6 @@ Address Assembler::target_constant_pool_address_at(
return addr;
}
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the mov instructions etc.
@ -395,7 +384,6 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
}
// This code assumes the FIXED_SEQUENCE of lis/ori
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
@ -466,4 +454,4 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
} // namespace internal
} // namespace v8
#endif // V8_PPC_ASSEMBLER_PPC_INL_H_
#endif // V8_CODEGEN_PPC_ASSEMBLER_PPC_INL_H_

File diff suppressed because it is too large Load Diff

View File

@ -37,8 +37,8 @@
// A light-weight PPC Assembler
// Generates user mode instructions for the PPC architecture up
#ifndef V8_PPC_ASSEMBLER_PPC_H_
#define V8_PPC_ASSEMBLER_PPC_H_
#ifndef V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
#define V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
#include <stdio.h>
#include <vector>
@ -47,10 +47,10 @@
#include "src/codegen/constant-pool.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/label.h"
#include "src/codegen/ppc/constants-ppc.h"
#include "src/codegen/ppc/register-ppc.h"
#include "src/numbers/double.h"
#include "src/objects/smi.h"
#include "src/ppc/constants-ppc.h"
#include "src/ppc/register-ppc.h"
namespace v8 {
namespace internal {
@ -135,18 +135,12 @@ class V8_EXPORT_PRIVATE MemOperand {
explicit MemOperand(Register ra, Register rb);
int32_t offset() const {
return offset_;
}
int32_t offset() const { return offset_; }
// PowerPC - base register
Register ra() const {
return ra_;
}
Register ra() const { return ra_; }
Register rb() const {
return rb_;
}
Register rb() const { return rb_; }
private:
Register ra_; // base
@ -172,7 +166,6 @@ class DeferredRelocInfo {
intptr_t data_;
};
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
@ -309,21 +302,21 @@ class Assembler : public AssemblerBase {
}
#define DECLARE_PPC_X_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
inline void name(const Register rt, const Register ra, \
const Register rb, const RCBit rc = LeaveRC) { \
inline void name(const Register rt, const Register ra, const Register rb, \
const RCBit rc = LeaveRC) { \
x_form(instr_name, rt, ra, rb, rc); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
inline void name(const Register ra, const Register rs, \
const Register rb, const RCBit rc = LeaveRC) { \
inline void name(const Register ra, const Register rs, const Register rb, \
const RCBit rc = LeaveRC) { \
x_form(instr_name, rs, ra, rb, rc); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
inline void name(const Register dst, const Register src, \
const RCBit rc = LeaveRC) { \
x_form(instr_name, src, dst, r0, rc); \
#define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
inline void name(const Register dst, const Register src, \
const RCBit rc = LeaveRC) { \
x_form(instr_name, src, dst, r0, rc); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
@ -337,10 +330,10 @@ class Assembler : public AssemblerBase {
name(dst, src.ra(), src.rb()); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
inline void name(const Register dst, const Register src, \
const int sh, const RCBit rc = LeaveRC) { \
x_form(instr_name, src.code(), dst.code(), sh, rc); \
#define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
inline void name(const Register dst, const Register src, const int sh, \
const RCBit rc = LeaveRC) { \
x_form(instr_name, src.code(), dst.code(), sh, rc); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_F_FORM(name, instr_name, instr_value) \
@ -381,8 +374,8 @@ class Assembler : public AssemblerBase {
#else
int L = 0;
#endif
emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 |
s2.code() * B11 | rc);
emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 | s2.code() * B11 |
rc);
}
PPC_X_OPCODE_A_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_A_FORM)
@ -1321,4 +1314,4 @@ class PatchingAssembler : public Assembler {
} // namespace internal
} // namespace v8
#endif // V8_PPC_ASSEMBLER_PPC_H_
#endif // V8_CODEGEN_PPC_ASSEMBLER_PPC_H_

View File

@ -22,9 +22,7 @@
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
} // namespace internal
namespace internal {} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_PPC

View File

@ -4,8 +4,7 @@
#if V8_TARGET_ARCH_PPC
#include "src/ppc/constants-ppc.h"
#include "src/codegen/ppc/constants-ppc.h"
namespace v8 {
namespace internal {
@ -17,13 +16,11 @@ const char* Registers::names_[kNumRegisters] = {
"r11", "ip", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
"r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "fp"};
const char* DoubleRegisters::names_[kNumDoubleRegisters] = {
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
"d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
"d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
int DoubleRegisters::Number(const char* name) {
for (int i = 0; i < kNumDoubleRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
@ -35,7 +32,6 @@ int DoubleRegisters::Number(const char* name) {
return kNoRegister;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {

File diff suppressed because it is too large Load Diff

View File

@ -28,10 +28,10 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
const int kCacheLineSize = CpuFeatures::icache_line_size();
intptr_t mask = kCacheLineSize - 1;
byte *start =
reinterpret_cast<byte *>(reinterpret_cast<intptr_t>(buffer) & ~mask);
byte *end = static_cast<byte *>(buffer) + size;
for (byte *pointer = start; pointer < end; pointer += kCacheLineSize) {
byte* start =
reinterpret_cast<byte*>(reinterpret_cast<intptr_t>(buffer) & ~mask);
byte* end = static_cast<byte*>(buffer) + size;
for (byte* pointer = start; pointer < end; pointer += kCacheLineSize) {
__asm__(
"dcbf 0, %0 \n"
"sync \n"

View File

@ -74,7 +74,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
@ -203,7 +202,6 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3};

View File

@ -27,7 +27,7 @@
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
#include "src/ppc/macro-assembler-ppc.h"
#include "src/codegen/ppc/macro-assembler-ppc.h"
#endif
namespace v8 {
@ -673,7 +673,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
return index;
}
void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN.
@ -1087,7 +1086,6 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif
}
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
@ -1424,20 +1422,18 @@ void MacroAssembler::PushStackHandler() {
StoreP(sp, MemOperand(r3));
}
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r4);
Move(ip, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
isolate()));
Move(ip,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
StoreP(r4, MemOperand(ip));
Drop(1); // Drop padding.
}
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
const Register temp = type_reg == no_reg ? r0 : type_reg;
@ -1446,7 +1442,6 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
CompareInstanceType(map, temp, type);
}
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
@ -1476,13 +1471,13 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
// C = A+B; C overflows if A/B have same sign and C has diff sign than A
if (dst == left) {
mr(scratch, left); // Preserve left.
add(dst, left, right); // Left is overwritten.
mr(scratch, left); // Preserve left.
add(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch, xorRC); // Original left.
if (!left_is_right) xor_(scratch, dst, right);
} else if (dst == right) {
mr(scratch, right); // Preserve right.
add(dst, left, right); // Right is overwritten.
mr(scratch, right); // Preserve right.
add(dst, left, right); // Right is overwritten.
xor_(overflow_dst, dst, left, xorRC);
if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
} else {
@ -1661,7 +1656,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference::Create(fid));
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r4, builtin);
@ -1698,7 +1692,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@ -1713,8 +1706,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
void TurboAssembler::Assert(Condition cond, AbortReason reason,
CRegister cr) {
void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
@ -1777,7 +1769,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -1812,7 +1803,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -1868,7 +1858,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
static const int kRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@ -2015,7 +2004,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
void TurboAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
@ -2041,7 +2029,6 @@ void TurboAssembler::ResetRoundingMode() {
mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
}
////////////////////////////////////////////////////////////////////////////////
//
// New MacroAssembler Interfaces added for PPC
@ -2178,7 +2165,6 @@ void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
addi(sp, sp, Operand(kDoubleSize));
}
#if V8_TARGET_ARCH_PPC64
void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
Register src_hi,
@ -2319,7 +2305,6 @@ void TurboAssembler::Add(Register dst, Register src, intptr_t value,
}
}
void TurboAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
CRegister cr) {
intptr_t value = src2.immediate();
@ -2353,7 +2338,6 @@ void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
}
}
void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
@ -2365,7 +2349,6 @@ void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
}
}
void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
RCBit rc) {
if (rb.is_reg()) {
@ -2383,7 +2366,6 @@ void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
}
}
void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
if (rb.is_reg()) {
orx(ra, rs, rb.rm(), rc);
@ -2400,7 +2382,6 @@ void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
}
}
void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
RCBit rc) {
if (rb.is_reg()) {
@ -2468,7 +2449,6 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#endif
}
// Load a "pointer" sized value from the memory location
void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
@ -2593,7 +2573,6 @@ void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
@ -2609,7 +2588,6 @@ void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
@ -2625,7 +2603,6 @@ void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
}
}
void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@ -2639,7 +2616,6 @@ void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
@ -2656,7 +2632,6 @@ void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
@ -2672,7 +2647,6 @@ void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
@ -2688,7 +2662,6 @@ void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
@ -2718,7 +2691,7 @@ void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
}
void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@ -2731,7 +2704,7 @@ void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem,
}
void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@ -2972,7 +2945,7 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
ShiftRightArithImm(builtin_pointer, builtin_pointer,
kSmiShift - kSystemPointerSizeLog2);
addi(builtin_pointer, builtin_pointer,
Operand(IsolateData::builtin_entry_table_offset()));
Operand(IsolateData::builtin_entry_table_offset()));
LoadPX(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
Call(builtin_pointer);
}
@ -3018,7 +2991,8 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
LoadP(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()), r0);
MemOperand(destination, IsolateData::builtin_entry_table_offset()),
r0);
bind(&out);
} else {

View File

@ -6,14 +6,14 @@
#error This header must be included via macro-assembler.h
#endif
#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
#define V8_PPC_MACRO_ASSEMBLER_PPC_H_
#ifndef V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
#define V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
#include "src/codegen/bailout-reason.h"
#include "src/codegen/ppc/assembler-ppc.h"
#include "src/common/globals.h"
#include "src/numbers/double.h"
#include "src/objects/contexts.h"
#include "src/ppc/assembler-ppc.h"
namespace v8 {
namespace internal {
@ -30,7 +30,6 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
@ -152,7 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadDouble(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadFloat32(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
Register scratch = no_reg);
void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
// load a literal signed int value <value> to GPR <dst>
@ -844,8 +843,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
@ -894,8 +891,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
#if V8_TARGET_ARCH_PPC64
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
@ -965,7 +960,6 @@ inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@ -975,4 +969,4 @@ inline MemOperand NativeContextMemOperand() {
} // namespace internal
} // namespace v8
#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
#endif // V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_PPC_REGISTER_PPC_H_
#define V8_PPC_REGISTER_PPC_H_
#ifndef V8_CODEGEN_PPC_REGISTER_PPC_H_
#define V8_CODEGEN_PPC_REGISTER_PPC_H_
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
@ -145,8 +145,9 @@ const int kNumSafepointRegisters = 32;
// The following constants describe the stack frame linkage area as
// defined by the ABI. Note that kNumRequiredStackFrameSlots must
// satisfy alignment requirements (rounding up if required).
#if V8_TARGET_ARCH_PPC64 && (V8_TARGET_LITTLE_ENDIAN || \
(defined(_CALL_ELF) && _CALL_ELF == 2)) // ELFv2 ABI
#if V8_TARGET_ARCH_PPC64 && \
(V8_TARGET_LITTLE_ENDIAN || \
(defined(_CALL_ELF) && _CALL_ELF == 2)) // ELFv2 ABI
// [0] back chain
// [1] condition register save area
// [2] link register save area
@ -319,4 +320,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = r15;
} // namespace internal
} // namespace v8
#endif // V8_PPC_REGISTER_PPC_H_
#endif // V8_CODEGEN_PPC_REGISTER_PPC_H_

View File

@ -9,21 +9,21 @@
#include "src/codegen/reglist.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/register-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/register-x64.h"
#include "src/codegen/x64/register-x64.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/register-arm64.h"
#include "src/codegen/arm64/register-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/register-arm.h"
#include "src/codegen/arm/register-arm.h"
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/register-ppc.h"
#include "src/codegen/ppc/register-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/register-mips.h"
#include "src/codegen/mips/register-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/register-mips64.h"
#include "src/codegen/mips64/register-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/register-s390.h"
#include "src/codegen/s390/register-s390.h"
#else
#error Unknown architecture.
#endif

View File

@ -34,10 +34,10 @@
// significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
#ifndef V8_S390_ASSEMBLER_S390_INL_H_
#define V8_S390_ASSEMBLER_S390_INL_H_
#ifndef V8_CODEGEN_S390_ASSEMBLER_S390_INL_H_
#define V8_CODEGEN_S390_ASSEMBLER_S390_INL_H_
#include "src/s390/assembler-s390.h"
#include "src/codegen/s390/assembler-s390.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
@ -112,9 +112,7 @@ Address RelocInfo::target_address_address() {
return pc_;
}
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
@ -342,4 +340,4 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
} // namespace internal
} // namespace v8
#endif // V8_S390_ASSEMBLER_S390_INL_H_
#endif // V8_CODEGEN_S390_ASSEMBLER_S390_INL_H_

View File

@ -34,7 +34,7 @@
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
#include "src/s390/assembler-s390.h"
#include "src/codegen/s390/assembler-s390.h"
#include <sys/auxv.h>
#include <set>
#include <string>
@ -48,9 +48,9 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/s390/assembler-s390-inl.h"
#include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/s390/assembler-s390-inl.h"
namespace v8 {
namespace internal {
@ -99,6 +99,9 @@ static bool supportsCPUFeature(const char* feature) {
return features.find(feature) != features.end();
}
#undef CHECK_AVAILABILITY_FOR
#undef HWCAP_S390_VX
// Check whether Store Facility STFLE instruction is available on the platform.
// Instruction returns a bit vector of the enabled hardware facilities.
static bool supportsSTFLE() {
@ -474,9 +477,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
if (is_branch != nullptr) {
*is_branch = (opcode == BRC || opcode == BRCT || opcode == BRCTG ||
opcode == BRCL || opcode == BRASL || opcode == BRXH ||
opcode == BRXHG);
*is_branch =
(opcode == BRC || opcode == BRCT || opcode == BRCTG || opcode == BRCL ||
opcode == BRASL || opcode == BRXH || opcode == BRXHG);
}
if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
@ -502,8 +505,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
} else if (BRXHG == opcode) {
// Immediate is in bits 16-31 of 48 bit instruction
int32_t imm16 = target_pos - pos;
instr &= (0xFFFF0000FFFF); // clear bits 16-31
imm16 &= 0xFFFF; // clear high halfword
instr &= (0xFFFF0000FFFF); // clear bits 16-31
imm16 &= 0xFFFF; // clear high halfword
imm16 <<= 16;
// Immediate is in # of halfwords
instr_at_put<SixByteInstr>(pos, instr | (imm16 >> 1));
@ -517,7 +520,7 @@ int Assembler::max_reach_from(int pos) {
Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
// Check which type of instr. In theory, we can return
// the values below + 1, given offset is # of halfwords
if (BRC == opcode || BRCT == opcode || BRCTG == opcode|| BRXH == opcode ||
if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode ||
BRXHG == opcode) {
return 16;
} else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||

View File

@ -37,8 +37,8 @@
// A light-weight S390 Assembler
// Generates user mode instructions for z/Architecture
#ifndef V8_S390_ASSEMBLER_S390_H_
#define V8_S390_ASSEMBLER_S390_H_
#ifndef V8_CODEGEN_S390_ASSEMBLER_S390_H_
#define V8_CODEGEN_S390_ASSEMBLER_S390_H_
#include <stdio.h>
#if V8_HOST_ARCH_S390
// elf.h include is required for auxv check for STFLE facility used
@ -53,9 +53,9 @@
#include "src/codegen/assembler.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/label.h"
#include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/register-s390.h"
#include "src/objects/smi.h"
#include "src/s390/constants-s390.h"
#include "src/s390/register-s390.h"
#define ABI_USES_FUNCTION_DESCRIPTORS 0
@ -402,8 +402,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#define DECLARE_S390_RX_INSTRUCTIONS(name, op_name, op_value) \
template <class R1> \
inline void name(R1 r1, Register x2, Register b2, const Operand& d2) { \
rx_format(op_name, r1.code(), x2.code(), b2.code(), \
d2.immediate()); \
rx_format(op_name, r1.code(), x2.code(), b2.code(), d2.immediate()); \
} \
template <class R1> \
inline void name(R1 r1, const MemOperand& opnd) { \
@ -414,17 +413,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void rx_format(Opcode opcode, int f1, int f2, int f3, int f4) {
DCHECK(is_uint8(opcode));
DCHECK(is_uint12(f4));
emit4bytes(getfield<uint32_t, 4, 0, 8>(opcode) |
getfield<uint32_t, 4, 8, 12>(f1) |
getfield<uint32_t, 4, 12, 16>(f2) |
getfield<uint32_t, 4, 16, 20>(f3) |
getfield<uint32_t, 4, 20, 32>(f4));
emit4bytes(
getfield<uint32_t, 4, 0, 8>(opcode) | getfield<uint32_t, 4, 8, 12>(f1) |
getfield<uint32_t, 4, 12, 16>(f2) | getfield<uint32_t, 4, 16, 20>(f3) |
getfield<uint32_t, 4, 20, 32>(f4));
}
S390_RX_A_OPCODE_LIST(DECLARE_S390_RX_INSTRUCTIONS)
void bc(Condition cond, const MemOperand& opnd) {
bc(cond, opnd.getIndexRegister(),
opnd.getBaseRegister(), Operand(opnd.getDisplacement()));
bc(cond, opnd.getIndexRegister(), opnd.getBaseRegister(),
Operand(opnd.getDisplacement()));
}
void bc(Condition cond, Register x2, Register b2, const Operand& d2) {
rx_format(BC, cond, x2.code(), b2.code(), d2.immediate());
@ -456,90 +454,84 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
S390_RXY_A_OPCODE_LIST(DECLARE_S390_RXY_INSTRUCTIONS)
void pfd(Condition cond, const MemOperand& opnd) {
pfd(cond, opnd.getIndexRegister(),
opnd.getBaseRegister(), Operand(opnd.getDisplacement()));
pfd(cond, opnd.getIndexRegister(), opnd.getBaseRegister(),
Operand(opnd.getDisplacement()));
}
void pfd(Condition cond, Register x2, Register b2, const Operand& d2) {
rxy_format(PFD, cond, x2.code(), b2.code(), d2.immediate());
}
#undef DECLARE_S390_RXY_INSTRUCTIONS
inline void rsy_format(Opcode op, int f1, int f2, int f3, int f4) {
DCHECK(is_int20(f4));
DCHECK(is_uint16(op));
uint64_t code =
(getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 12>(f1) | getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 20>(f3) |
getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
getfield<uint64_t, 6, 32, 40>(f4 >> 12) |
getfield<uint64_t, 6, 40, 48>(op & 0xff));
emit6bytes(code);
}
inline void rsy_format(Opcode op, int f1, int f2, int f3, int f4) {
DCHECK(is_int20(f4));
DCHECK(is_uint16(op));
uint64_t code = (getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 20>(f3) |
getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
getfield<uint64_t, 6, 32, 40>(f4 >> 12) |
getfield<uint64_t, 6, 40, 48>(op & 0xff));
emit6bytes(code);
}
#define DECLARE_S390_RSY_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, Register b2, \
const Operand& d2 = Operand::Zero()) { \
rsy_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
} \
void name(Register r1, Register r3, Operand d2) { \
name(r1, r3, r0, d2); \
} \
void name(Register r1, Register r3, const MemOperand& opnd) { \
name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
#define DECLARE_S390_RSY_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, Register b2, \
const Operand& d2 = Operand::Zero()) { \
rsy_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
} \
void name(Register r1, Register r3, Operand d2) { name(r1, r3, r0, d2); } \
void name(Register r1, Register r3, const MemOperand& opnd) { \
name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RSY_A_OPCODE_LIST(DECLARE_S390_RSY_A_INSTRUCTIONS)
#undef DECLARE_S390_RSY_A_INSTRUCTIONS
#define DECLARE_S390_RSY_B_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
rsy_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
} \
void name(Register r1, Condition m3, const MemOperand& opnd) { \
name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
#define DECLARE_S390_RSY_B_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
rsy_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
} \
void name(Register r1, Condition m3, const MemOperand& opnd) { \
name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RSY_B_OPCODE_LIST(DECLARE_S390_RSY_B_INSTRUCTIONS)
#undef DECLARE_S390_RSY_B_INSTRUCTIONS
inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
uint32_t code =
getfield<uint32_t, 4, 0, 8>(op) | getfield<uint32_t, 4, 8, 12>(f1) |
getfield<uint32_t, 4, 12, 16>(f2) | getfield<uint32_t, 4, 16, 20>(f3) |
getfield<uint32_t, 4, 20, 32>(f4);
emit4bytes(code);
}
inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
getfield<uint32_t, 4, 8, 12>(f1) |
getfield<uint32_t, 4, 12, 16>(f2) |
getfield<uint32_t, 4, 16, 20>(f3) |
getfield<uint32_t, 4, 20, 32>(f4);
emit4bytes(code);
}
#define DECLARE_S390_RS_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, Register b2, const Operand& d2) { \
rs_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
} \
void name(Register r1, Register r3, const MemOperand& opnd) { \
name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
#define DECLARE_S390_RS_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, Register b2, const Operand& d2) { \
rs_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
} \
void name(Register r1, Register r3, const MemOperand& opnd) { \
name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RS_A_OPCODE_LIST(DECLARE_S390_RS_A_INSTRUCTIONS)
#undef DECLARE_S390_RS_A_INSTRUCTIONS
#define DECLARE_S390_RS_B_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
rs_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
} \
void name(Register r1, Condition m3, const MemOperand& opnd) { \
name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
#define DECLARE_S390_RS_B_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
rs_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
} \
void name(Register r1, Condition m3, const MemOperand& opnd) { \
name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RS_B_OPCODE_LIST(DECLARE_S390_RS_B_INSTRUCTIONS)
#undef DECLARE_S390_RS_B_INSTRUCTIONS
#define DECLARE_S390_RS_SHIFT_FORMAT(name, opcode) \
void name(Register r1, Register r2, const Operand& opnd = \
Operand::Zero()) { \
DCHECK(r2 != r0); \
rs_format(opcode, r1.code(), r0.code(), r2.code(), opnd.immediate()); \
} \
void name(Register r1, const Operand& opnd) { \
rs_format(opcode, r1.code(), r0.code(), r0.code(), opnd.immediate()); \
#define DECLARE_S390_RS_SHIFT_FORMAT(name, opcode) \
void name(Register r1, Register r2, const Operand& opnd = Operand::Zero()) { \
DCHECK(r2 != r0); \
rs_format(opcode, r1.code(), r0.code(), r2.code(), opnd.immediate()); \
} \
void name(Register r1, const Operand& opnd) { \
rs_format(opcode, r1.code(), r0.code(), r0.code(), opnd.immediate()); \
}
DECLARE_S390_RS_SHIFT_FORMAT(sll, SLL)
DECLARE_S390_RS_SHIFT_FORMAT(srl, SRL)
@ -550,448 +542,408 @@ inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
DECLARE_S390_RS_SHIFT_FORMAT(srdl, SRDL)
#undef DECLARE_S390_RS_SHIFT_FORMAT
inline void rxe_format(Opcode op, int f1, int f2, int f3, int f4,
int f5 = 0) {
DCHECK(is_uint12(f4));
DCHECK(is_uint16(op));
uint64_t code =
(getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 12>(f1) | getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 20>(f3) |
getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
getfield<uint64_t, 6, 32, 36>(f5) |
getfield<uint64_t, 6, 40, 48>(op & 0xff));
emit6bytes(code);
}
inline void rxe_format(Opcode op, int f1, int f2, int f3, int f4, int f5 = 0) {
DCHECK(is_uint12(f4));
DCHECK(is_uint16(op));
uint64_t code = (getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 20>(f3) |
getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
getfield<uint64_t, 6, 32, 36>(f5) |
getfield<uint64_t, 6, 40, 48>(op & 0xff));
emit6bytes(code);
}
#define DECLARE_S390_RXE_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register x2, Register b2, const Operand& d2, \
Condition m3 = static_cast<Condition>(0)) { \
rxe_format(op_name, r1.code(), x2.code(), b2.code(), d2.immediate(), \
m3); \
} \
template<class _R1Type> \
void name(_R1Type r1, const MemOperand& opnd) { \
name(Register::from_code(r1.code()), opnd.rx(), opnd.rb(), \
Operand(opnd.offset())); \
#define DECLARE_S390_RXE_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register x2, Register b2, const Operand& d2, \
Condition m3 = static_cast<Condition>(0)) { \
rxe_format(op_name, r1.code(), x2.code(), b2.code(), d2.immediate(), m3); \
} \
template <class _R1Type> \
void name(_R1Type r1, const MemOperand& opnd) { \
name(Register::from_code(r1.code()), opnd.rx(), opnd.rb(), \
Operand(opnd.offset())); \
}
S390_RXE_OPCODE_LIST(DECLARE_S390_RXE_INSTRUCTIONS)
#undef DECLARE_S390_RXE_INSTRUCTIONS
inline void ri_format(Opcode opcode, int f1, int f2) {
uint32_t op1 = opcode >> 4;
uint32_t op2 = opcode & 0xf;
emit4bytes(
getfield<uint32_t, 4, 0, 8>(op1) | getfield<uint32_t, 4, 8, 12>(f1) |
getfield<uint32_t, 4, 12, 16>(op2) | getfield<uint32_t, 4, 16, 32>(f2));
}
inline void ri_format(Opcode opcode, int f1, int f2) {
uint32_t op1 = opcode >> 4;
uint32_t op2 = opcode & 0xf;
emit4bytes(getfield<uint32_t, 4, 0, 8>(op1) |
getfield<uint32_t, 4, 8, 12>(f1) |
getfield<uint32_t, 4, 12, 16>(op2) |
getfield<uint32_t, 4, 16, 32>(f2));
}
#define DECLARE_S390_RI_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r, const Operand& i2) { \
DCHECK(is_uint12(op_name)); \
DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate())); \
ri_format(op_name, r.code(), i2.immediate()); \
#define DECLARE_S390_RI_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r, const Operand& i2) { \
DCHECK(is_uint12(op_name)); \
DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate())); \
ri_format(op_name, r.code(), i2.immediate()); \
}
S390_RI_A_OPCODE_LIST(DECLARE_S390_RI_A_INSTRUCTIONS)
#undef DECLARE_S390_RI_A_INSTRUCTIONS
#define DECLARE_S390_RI_B_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, const Operand& imm) { \
/* 2nd argument encodes # of halfwords, so divide by 2. */ \
int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2; \
Operand halfwordOp = Operand(numHalfwords); \
halfwordOp.setBits(16); \
ri_format(op_name, r1.code(), halfwordOp.immediate()); \
#define DECLARE_S390_RI_B_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, const Operand& imm) { \
/* 2nd argument encodes # of halfwords, so divide by 2. */ \
int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2; \
Operand halfwordOp = Operand(numHalfwords); \
halfwordOp.setBits(16); \
ri_format(op_name, r1.code(), halfwordOp.immediate()); \
}
S390_RI_B_OPCODE_LIST(DECLARE_S390_RI_B_INSTRUCTIONS)
#undef DECLARE_S390_RI_B_INSTRUCTIONS
#define DECLARE_S390_RI_C_INSTRUCTIONS(name, op_name, op_value) \
void name(Condition m, const Operand& i2) { \
DCHECK(is_uint12(op_name)); \
DCHECK(is_uint4(m)); \
DCHECK(op_name == BRC ? \
is_int16(i2.immediate()) : is_uint16(i2.immediate())); \
ri_format(op_name, m, i2.immediate()); \
#define DECLARE_S390_RI_C_INSTRUCTIONS(name, op_name, op_value) \
void name(Condition m, const Operand& i2) { \
DCHECK(is_uint12(op_name)); \
DCHECK(is_uint4(m)); \
DCHECK(op_name == BRC ? is_int16(i2.immediate()) \
: is_uint16(i2.immediate())); \
ri_format(op_name, m, i2.immediate()); \
}
S390_RI_C_OPCODE_LIST(DECLARE_S390_RI_C_INSTRUCTIONS)
#undef DECLARE_S390_RI_C_INSTRUCTIONS
inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
uint32_t code =
getfield<uint32_t, 4, 0, 16>(op) | getfield<uint32_t, 4, 16, 20>(f1) |
getfield<uint32_t, 4, 20, 24>(f2) | getfield<uint32_t, 4, 24, 28>(f3) |
getfield<uint32_t, 4, 28, 32>(f4);
emit4bytes(code);
}
inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
uint32_t code = getfield<uint32_t, 4, 0, 16>(op) |
getfield<uint32_t, 4, 16, 20>(f1) |
getfield<uint32_t, 4, 20, 24>(f2) |
getfield<uint32_t, 4, 24, 28>(f3) |
getfield<uint32_t, 4, 28, 32>(f4);
emit4bytes(code);
}
#define DECLARE_S390_RRF_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m4, Register r2, Register r3) { \
rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
} \
void name(Register r1, Register r2, Register r3) { \
name(r1, Condition(0), r2, r3); \
#define DECLARE_S390_RRF_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m4, Register r2, Register r3) { \
rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
} \
void name(Register r1, Register r2, Register r3) { \
name(r1, Condition(0), r2, r3); \
}
S390_RRF_A_OPCODE_LIST(DECLARE_S390_RRF_A_INSTRUCTIONS)
#undef DECLARE_S390_RRF_A_INSTRUCTIONS
#define DECLARE_S390_RRF_B_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m4, Register r2, Register r3) { \
rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
} \
void name(Register r1, Register r2, Register r3) { \
name(r1, Condition(0), r2, r3); \
#define DECLARE_S390_RRF_B_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m4, Register r2, Register r3) { \
rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
} \
void name(Register r1, Register r2, Register r3) { \
name(r1, Condition(0), r2, r3); \
}
S390_RRF_B_OPCODE_LIST(DECLARE_S390_RRF_B_INSTRUCTIONS)
#undef DECLARE_S390_RRF_B_INSTRUCTIONS
#define DECLARE_S390_RRF_C_INSTRUCTIONS(name, op_name, op_value) \
template <class R1, class R2> \
void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
} \
template <class R1, class R2> \
void name(Condition m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
#define DECLARE_S390_RRF_C_INSTRUCTIONS(name, op_name, op_value) \
template <class R1, class R2> \
void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
} \
template <class R1, class R2> \
void name(Condition m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
}
S390_RRF_C_OPCODE_LIST(DECLARE_S390_RRF_C_INSTRUCTIONS)
#undef DECLARE_S390_RRF_C_INSTRUCTIONS
#define DECLARE_S390_RRF_D_INSTRUCTIONS(name, op_name, op_value) \
template <class R1, class R2> \
void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
} \
template <class R1, class R2> \
void name(Condition m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
#define DECLARE_S390_RRF_D_INSTRUCTIONS(name, op_name, op_value) \
template <class R1, class R2> \
void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
} \
template <class R1, class R2> \
void name(Condition m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
}
S390_RRF_D_OPCODE_LIST(DECLARE_S390_RRF_D_INSTRUCTIONS)
#undef DECLARE_S390_RRF_D_INSTRUCTIONS
#define DECLARE_S390_RRF_E_INSTRUCTIONS(name, op_name, op_value) \
template <class M3, class M4, class R1, class R2> \
void name(M3 m3, M4 m4, R1 r1, R2 r2) { \
rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
} \
template <class M3, class R1, class R2> \
void name(M3 m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
#define DECLARE_S390_RRF_E_INSTRUCTIONS(name, op_name, op_value) \
template <class M3, class M4, class R1, class R2> \
void name(M3 m3, M4 m4, R1 r1, R2 r2) { \
rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
} \
template <class M3, class R1, class R2> \
void name(M3 m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
}
S390_RRF_E_OPCODE_LIST(DECLARE_S390_RRF_E_INSTRUCTIONS)
#undef DECLARE_S390_RRF_E_INSTRUCTIONS
enum FIDBRA_FLAGS {
FIDBRA_CURRENT_ROUNDING_MODE = 0,
FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
// ...
FIDBRA_ROUND_TOWARD_0 = 5,
FIDBRA_ROUND_TOWARD_POS_INF = 6,
FIDBRA_ROUND_TOWARD_NEG_INF = 7
};
enum FIDBRA_FLAGS {
FIDBRA_CURRENT_ROUNDING_MODE = 0,
FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
// ...
FIDBRA_ROUND_TOWARD_0 = 5,
FIDBRA_ROUND_TOWARD_POS_INF = 6,
FIDBRA_ROUND_TOWARD_NEG_INF = 7
};
inline void rsi_format(Opcode op, int f1, int f2, int f3) {
DCHECK(is_uint8(op));
DCHECK(is_uint16(f3) || is_int16(f3));
uint32_t code =
getfield<uint32_t, 4, 0, 8>(op) | getfield<uint32_t, 4, 8, 12>(f1) |
getfield<uint32_t, 4, 12, 16>(f2) | getfield<uint32_t, 4, 16, 32>(f3);
emit4bytes(code);
}
inline void rsi_format(Opcode op, int f1, int f2, int f3) {
DCHECK(is_uint8(op));
DCHECK(is_uint16(f3) || is_int16(f3));
uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
getfield<uint32_t, 4, 8, 12>(f1) |
getfield<uint32_t, 4, 12, 16>(f2) |
getfield<uint32_t, 4, 16, 32>(f3);
emit4bytes(code);
}
#define DECLARE_S390_RSI_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, const Operand& i2) { \
rsi_format(op_name, r1.code(), r3.code(), i2.immediate()); \
#define DECLARE_S390_RSI_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, const Operand& i2) { \
rsi_format(op_name, r1.code(), r3.code(), i2.immediate()); \
}
S390_RSI_OPCODE_LIST(DECLARE_S390_RSI_INSTRUCTIONS)
#undef DECLARE_S390_RSI_INSTRUCTIONS
inline void rsl_format(Opcode op, uint16_t f1, int f2, int f3, int f4,
int f5) {
DCHECK(is_uint16(op));
uint64_t code =
getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 16>(f1) | getfield<uint64_t, 6, 16, 20>(f2) |
getfield<uint64_t, 6, 20, 32>(f3) | getfield<uint64_t, 6, 32, 36>(f4) |
getfield<uint64_t, 6, 36, 40>(f5) |
getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
emit6bytes(code);
}
inline void rsl_format(Opcode op, uint16_t f1, int f2, int f3, int f4,
int f5) {
DCHECK(is_uint16(op));
uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 16>(f1) |
getfield<uint64_t, 6, 16, 20>(f2) |
getfield<uint64_t, 6, 20, 32>(f3) |
getfield<uint64_t, 6, 32, 36>(f4) |
getfield<uint64_t, 6, 36, 40>(f5) |
getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
emit6bytes(code);
}
#define DECLARE_S390_RSL_A_INSTRUCTIONS(name, op_name, op_value) \
void name(const Operand& l1, Register b1, const Operand& d1) { \
uint16_t L = static_cast<uint16_t>(l1.immediate() << 8); \
rsl_format(op_name, L, b1.code(), d1.immediate(), 0, 0); \
#define DECLARE_S390_RSL_A_INSTRUCTIONS(name, op_name, op_value) \
void name(const Operand& l1, Register b1, const Operand& d1) { \
uint16_t L = static_cast<uint16_t>(l1.immediate() << 8); \
rsl_format(op_name, L, b1.code(), d1.immediate(), 0, 0); \
}
S390_RSL_A_OPCODE_LIST(DECLARE_S390_RSL_A_INSTRUCTIONS)
#undef DECLARE_S390_RSL_A_INSTRUCTIONS
#define DECLARE_S390_RSL_B_INSTRUCTIONS(name, op_name, op_value) \
void name(const Operand& l2, Register b2, const Operand& d2, \
Register r1, Condition m3) { \
uint16_t L = static_cast<uint16_t>(l2.immediate()); \
rsl_format(op_name, L, b2.code(), d2.immediate(), r1.code(), m3); \
#define DECLARE_S390_RSL_B_INSTRUCTIONS(name, op_name, op_value) \
void name(const Operand& l2, Register b2, const Operand& d2, Register r1, \
Condition m3) { \
uint16_t L = static_cast<uint16_t>(l2.immediate()); \
rsl_format(op_name, L, b2.code(), d2.immediate(), r1.code(), m3); \
}
S390_RSL_B_OPCODE_LIST(DECLARE_S390_RSL_B_INSTRUCTIONS)
#undef DECLARE_S390_RSL_B_INSTRUCTIONS
inline void s_format(Opcode op, int f1, int f2) {
DCHECK_NE(op & 0xff00, 0);
DCHECK(is_uint12(f2));
uint32_t code = getfield<uint32_t, 4, 0, 16>(op) |
getfield<uint32_t, 4, 16, 20>(f1) |
getfield<uint32_t, 4, 20, 32>(f2);
emit4bytes(code);
}
inline void s_format(Opcode op, int f1, int f2) {
DCHECK_NE(op & 0xff00, 0);
DCHECK(is_uint12(f2));
uint32_t code = getfield<uint32_t, 4, 0, 16>(op) |
getfield<uint32_t, 4, 16, 20>(f1) |
getfield<uint32_t, 4, 20, 32>(f2);
emit4bytes(code);
}
#define DECLARE_S390_S_INSTRUCTIONS(name, op_name, op_value) \
void name(Register b1, const Operand& d2) { \
Opcode op = op_name; \
if ((op & 0xFF00) == 0) { \
op = (Opcode)(op << 8); \
} \
s_format(op, b1.code(), d2.immediate()); \
} \
void name(const MemOperand& opnd) { \
Operand d2 = Operand(opnd.getDisplacement()); \
name(opnd.getBaseRegister(), d2); \
#define DECLARE_S390_S_INSTRUCTIONS(name, op_name, op_value) \
void name(Register b1, const Operand& d2) { \
Opcode op = op_name; \
if ((op & 0xFF00) == 0) { \
op = (Opcode)(op << 8); \
} \
s_format(op, b1.code(), d2.immediate()); \
} \
void name(const MemOperand& opnd) { \
Operand d2 = Operand(opnd.getDisplacement()); \
name(opnd.getBaseRegister(), d2); \
}
S390_S_OPCODE_LIST(DECLARE_S390_S_INSTRUCTIONS)
#undef DECLARE_S390_S_INSTRUCTIONS
inline void si_format(Opcode op, int f1, int f2, int f3) {
uint32_t code =
getfield<uint32_t, 4, 0, 8>(op) | getfield<uint32_t, 4, 8, 16>(f1) |
getfield<uint32_t, 4, 16, 20>(f2) | getfield<uint32_t, 4, 20, 32>(f3);
emit4bytes(code);
}
inline void si_format(Opcode op, int f1, int f2, int f3) {
uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
getfield<uint32_t, 4, 8, 16>(f1) |
getfield<uint32_t, 4, 16, 20>(f2) |
getfield<uint32_t, 4, 20, 32>(f3);
emit4bytes(code);
}
#define DECLARE_S390_SI_INSTRUCTIONS(name, op_name, op_value) \
void name(const Operand& i2, Register b1, const Operand& d1) { \
si_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
} \
void name(const MemOperand& opnd, const Operand& i2) { \
name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
#define DECLARE_S390_SI_INSTRUCTIONS(name, op_name, op_value) \
void name(const Operand& i2, Register b1, const Operand& d1) { \
si_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
} \
void name(const MemOperand& opnd, const Operand& i2) { \
name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_SI_OPCODE_LIST(DECLARE_S390_SI_INSTRUCTIONS)
#undef DECLARE_S390_SI_INSTRUCTIONS
inline void siy_format(Opcode op, int f1, int f2, int f3) {
DCHECK(is_uint20(f3) || is_int20(f3));
DCHECK(is_uint16(op));
DCHECK(is_uint8(f1) || is_int8(f1));
uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 16>(f1) |
getfield<uint64_t, 6, 16, 20>(f2) |
getfield<uint64_t, 6, 20, 32>(f3) |
getfield<uint64_t, 6, 32, 40>(f3 >> 12) |
getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
emit6bytes(code);
}
inline void siy_format(Opcode op, int f1, int f2, int f3) {
DCHECK(is_uint20(f3) || is_int20(f3));
DCHECK(is_uint16(op));
DCHECK(is_uint8(f1) || is_int8(f1));
uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 16>(f1) |
getfield<uint64_t, 6, 16, 20>(f2) |
getfield<uint64_t, 6, 20, 32>(f3) |
getfield<uint64_t, 6, 32, 40>(f3 >> 12) |
getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
emit6bytes(code);
}
#define DECLARE_S390_SIY_INSTRUCTIONS(name, op_name, op_value) \
void name(const Operand& i2, Register b1, const Operand& d1) { \
siy_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
} \
void name(const MemOperand& opnd, const Operand& i2) { \
name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
#define DECLARE_S390_SIY_INSTRUCTIONS(name, op_name, op_value) \
void name(const Operand& i2, Register b1, const Operand& d1) { \
siy_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
} \
void name(const MemOperand& opnd, const Operand& i2) { \
name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_SIY_OPCODE_LIST(DECLARE_S390_SIY_INSTRUCTIONS)
#undef DECLARE_S390_SIY_INSTRUCTIONS
inline void rrs_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
DCHECK(is_uint12(f4));
DCHECK(is_uint16(op));
uint64_t code =
getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 12>(f1) | getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 20>(f3) | getfield<uint64_t, 6, 20, 32>(f4) |
getfield<uint64_t, 6, 32, 36>(f5) |
getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
emit6bytes(code);
}
inline void rrs_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
DCHECK(is_uint12(f4));
DCHECK(is_uint16(op));
uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 20>(f3) |
getfield<uint64_t, 6, 20, 32>(f4) |
getfield<uint64_t, 6, 32, 36>(f5) |
getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
emit6bytes(code);
}
#define DECLARE_S390_RRS_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r2, Register b4, const Operand& d4, \
Condition m3) { \
rrs_format(op_name, r1.code(), r2.code(), b4.code(), d4.immediate(), \
m3); \
} \
void name(Register r1, Register r2, Condition m3, \
const MemOperand& opnd) { \
name(r1, r2, opnd.getBaseRegister(), \
Operand(opnd.getDisplacement()), m3); \
#define DECLARE_S390_RRS_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r2, Register b4, const Operand& d4, \
Condition m3) { \
rrs_format(op_name, r1.code(), r2.code(), b4.code(), d4.immediate(), m3); \
} \
void name(Register r1, Register r2, Condition m3, const MemOperand& opnd) { \
name(r1, r2, opnd.getBaseRegister(), Operand(opnd.getDisplacement()), m3); \
}
S390_RRS_OPCODE_LIST(DECLARE_S390_RRS_INSTRUCTIONS)
#undef DECLARE_S390_RRS_INSTRUCTIONS
inline void ris_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
DCHECK(is_uint12(f3));
DCHECK(is_uint16(op));
DCHECK(is_uint8(f5));
uint64_t code =
getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 12>(f1) | getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 20>(f3) | getfield<uint64_t, 6, 20, 32>(f4) |
getfield<uint64_t, 6, 32, 40>(f5) |
getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
emit6bytes(code);
}
inline void ris_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
DCHECK(is_uint12(f3));
DCHECK(is_uint16(op));
DCHECK(is_uint8(f5));
uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 20>(f3) |
getfield<uint64_t, 6, 20, 32>(f4) |
getfield<uint64_t, 6, 32, 40>(f5) |
getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
emit6bytes(code);
}
#define DECLARE_S390_RIS_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m3, Register b4, const Operand& d4, \
const Operand& i2) { \
ris_format(op_name, r1.code(), m3, b4.code(), d4.immediate(), \
i2.immediate()); \
} \
void name(Register r1, const Operand& i2, Condition m3, \
const MemOperand& opnd) { \
name(r1, m3, opnd.getBaseRegister(), \
Operand(opnd.getDisplacement()), i2); \
#define DECLARE_S390_RIS_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Condition m3, Register b4, const Operand& d4, \
const Operand& i2) { \
ris_format(op_name, r1.code(), m3, b4.code(), d4.immediate(), \
i2.immediate()); \
} \
void name(Register r1, const Operand& i2, Condition m3, \
const MemOperand& opnd) { \
name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement()), i2); \
}
S390_RIS_OPCODE_LIST(DECLARE_S390_RIS_INSTRUCTIONS)
#undef DECLARE_S390_RIS_INSTRUCTIONS
inline void sil_format(Opcode op, int f1, int f2, int f3) {
DCHECK(is_uint12(f2));
DCHECK(is_uint16(op));
DCHECK(is_uint16(f3));
uint64_t code =
getfield<uint64_t, 6, 0, 16>(op) | getfield<uint64_t, 6, 16, 20>(f1) |
getfield<uint64_t, 6, 20, 32>(f2) | getfield<uint64_t, 6, 32, 48>(f3);
emit6bytes(code);
}
inline void sil_format(Opcode op, int f1, int f2, int f3) {
DCHECK(is_uint12(f2));
DCHECK(is_uint16(op));
DCHECK(is_uint16(f3));
uint64_t code = getfield<uint64_t, 6, 0, 16>(op) |
getfield<uint64_t, 6, 16, 20>(f1) |
getfield<uint64_t, 6, 20, 32>(f2) |
getfield<uint64_t, 6, 32, 48>(f3);
emit6bytes(code);
}
#define DECLARE_S390_SIL_INSTRUCTIONS(name, op_name, op_value) \
void name(Register b1, const Operand& d1, const Operand& i2) { \
sil_format(op_name, b1.code(), d1.immediate(), i2.immediate()); \
} \
void name(const MemOperand& opnd, const Operand& i2) { \
name(opnd.getBaseRegister(), Operand(opnd.getDisplacement()), i2); \
#define DECLARE_S390_SIL_INSTRUCTIONS(name, op_name, op_value) \
void name(Register b1, const Operand& d1, const Operand& i2) { \
sil_format(op_name, b1.code(), d1.immediate(), i2.immediate()); \
} \
void name(const MemOperand& opnd, const Operand& i2) { \
name(opnd.getBaseRegister(), Operand(opnd.getDisplacement()), i2); \
}
S390_SIL_OPCODE_LIST(DECLARE_S390_SIL_INSTRUCTIONS)
#undef DECLARE_S390_SIL_INSTRUCTIONS
inline void rie_d_format(Opcode opcode, int f1, int f2, int f3, int f4) {
uint32_t op1 = opcode >> 8;
uint32_t op2 = opcode & 0xff;
uint64_t code =
getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
getfield<uint64_t, 6, 32, 40>(f4) | getfield<uint64_t, 6, 40, 48>(op2);
emit6bytes(code);
}
inline void rie_d_format(Opcode opcode, int f1, int f2, int f3, int f4) {
uint32_t op1 = opcode >> 8;
uint32_t op2 = opcode & 0xff;
uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 32>(f3) |
getfield<uint64_t, 6, 32, 40>(f4) |
getfield<uint64_t, 6, 40, 48>(op2);
emit6bytes(code);
}
#define DECLARE_S390_RIE_D_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, const Operand& i2) { \
rie_d_format(op_name, r1.code(), r3.code(), i2.immediate(), 0); \
#define DECLARE_S390_RIE_D_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, const Operand& i2) { \
rie_d_format(op_name, r1.code(), r3.code(), i2.immediate(), 0); \
}
S390_RIE_D_OPCODE_LIST(DECLARE_S390_RIE_D_INSTRUCTIONS)
#undef DECLARE_S390_RIE_D_INSTRUCTIONS
inline void rie_e_format(Opcode opcode, int f1, int f2, int f3) {
uint32_t op1 = opcode >> 8;
uint32_t op2 = opcode & 0xff;
uint64_t code =
getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
getfield<uint64_t, 6, 40, 48>(op2);
emit6bytes(code);
}
inline void rie_e_format(Opcode opcode, int f1, int f2, int f3) {
uint32_t op1 = opcode >> 8;
uint32_t op2 = opcode & 0xff;
uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 32>(f3) |
getfield<uint64_t, 6, 40, 48>(op2);
emit6bytes(code);
}
#define DECLARE_S390_RIE_E_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, const Operand& i2) { \
rie_e_format(op_name, r1.code(), r3.code(), i2.immediate()); \
#define DECLARE_S390_RIE_E_INSTRUCTIONS(name, op_name, op_value) \
void name(Register r1, Register r3, const Operand& i2) { \
rie_e_format(op_name, r1.code(), r3.code(), i2.immediate()); \
}
S390_RIE_E_OPCODE_LIST(DECLARE_S390_RIE_E_INSTRUCTIONS)
#undef DECLARE_S390_RIE_E_INSTRUCTIONS
inline void rie_f_format(Opcode opcode, int f1, int f2, int f3, int f4,
int f5) {
uint32_t op1 = opcode >> 8;
uint32_t op2 = opcode & 0xff;
uint64_t code =
getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 24>(f3) |
getfield<uint64_t, 6, 24, 32>(f4) | getfield<uint64_t, 6, 32, 40>(f5) |
getfield<uint64_t, 6, 40, 48>(op2);
emit6bytes(code);
}
inline void rie_f_format(Opcode opcode, int f1, int f2, int f3, int f4,
int f5) {
uint32_t op1 = opcode >> 8;
uint32_t op2 = opcode & 0xff;
uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
getfield<uint64_t, 6, 8, 12>(f1) |
getfield<uint64_t, 6, 12, 16>(f2) |
getfield<uint64_t, 6, 16, 24>(f3) |
getfield<uint64_t, 6, 24, 32>(f4) |
getfield<uint64_t, 6, 32, 40>(f5) |
getfield<uint64_t, 6, 40, 48>(op2);
emit6bytes(code);
}
#define DECLARE_S390_RIE_F_INSTRUCTIONS(name, op_name, op_value) \
void name(Register dst, Register src, const Operand& startBit, \
const Operand& endBit, const Operand& shiftAmt) { \
DCHECK(is_uint8(startBit.immediate())); \
DCHECK(is_uint8(endBit.immediate())); \
DCHECK(is_uint8(shiftAmt.immediate())); \
rie_f_format(op_name, dst.code(), src.code(), startBit.immediate(), \
endBit.immediate(), shiftAmt.immediate()); \
#define DECLARE_S390_RIE_F_INSTRUCTIONS(name, op_name, op_value) \
void name(Register dst, Register src, const Operand& startBit, \
const Operand& endBit, const Operand& shiftAmt) { \
DCHECK(is_uint8(startBit.immediate())); \
DCHECK(is_uint8(endBit.immediate())); \
DCHECK(is_uint8(shiftAmt.immediate())); \
rie_f_format(op_name, dst.code(), src.code(), startBit.immediate(), \
endBit.immediate(), shiftAmt.immediate()); \
}
S390_RIE_F_OPCODE_LIST(DECLARE_S390_RIE_F_INSTRUCTIONS)
#undef DECLARE_S390_RIE_F_INSTRUCTIONS
inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
DCHECK(is_uint12(f5));
DCHECK(is_uint12(f3));
DCHECK(is_uint8(f1));
DCHECK(is_uint8(op));
uint64_t code =
getfield<uint64_t, 6, 0, 8>(op) | getfield<uint64_t, 6, 8, 16>(f1) |
getfield<uint64_t, 6, 16, 20>(f2) | getfield<uint64_t, 6, 20, 32>(f3) |
getfield<uint64_t, 6, 32, 36>(f4) | getfield<uint64_t, 6, 36, 48>(f5);
emit6bytes(code);
}
inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
DCHECK(is_uint12(f5));
DCHECK(is_uint12(f3));
DCHECK(is_uint8(f1));
DCHECK(is_uint8(op));
uint64_t code = getfield<uint64_t, 6, 0, 8>(op) |
getfield<uint64_t, 6, 8, 16>(f1) |
getfield<uint64_t, 6, 16, 20>(f2) |
getfield<uint64_t, 6, 20, 32>(f3) |
getfield<uint64_t, 6, 32, 36>(f4) |
getfield<uint64_t, 6, 36, 48>(f5);
emit6bytes(code);
}
#define DECLARE_S390_SS_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register b1, const Operand& d1, Register b2, \
const Operand& d2, const Operand& length) { \
ss_a_format(op_name, length.immediate(), b1.code(), d1.immediate(), \
b2.code(), d2.immediate()); \
} \
void name(const MemOperand& opnd1, const MemOperand& opnd2, \
const Operand& length) { \
ss_a_format(op_name, length.immediate(), \
opnd1.getBaseRegister().code(), \
opnd1.getDisplacement(), opnd2.getBaseRegister().code(), \
opnd2.getDisplacement()); \
#define DECLARE_S390_SS_A_INSTRUCTIONS(name, op_name, op_value) \
void name(Register b1, const Operand& d1, Register b2, const Operand& d2, \
const Operand& length) { \
ss_a_format(op_name, length.immediate(), b1.code(), d1.immediate(), \
b2.code(), d2.immediate()); \
} \
void name(const MemOperand& opnd1, const MemOperand& opnd2, \
const Operand& length) { \
ss_a_format(op_name, length.immediate(), opnd1.getBaseRegister().code(), \
opnd1.getDisplacement(), opnd2.getBaseRegister().code(), \
opnd2.getDisplacement()); \
}
S390_SS_A_OPCODE_LIST(DECLARE_S390_SS_A_INSTRUCTIONS)
#undef DECLARE_S390_SS_A_INSTRUCTIONS
// Helper for unconditional branch to Label with update to save register
void b(Register r, Label* l) {
int32_t halfwords = branch_offset(l) / 2;
@ -1513,4 +1465,4 @@ class EnsureSpace {
} // namespace internal
} // namespace v8
#endif // V8_S390_ASSEMBLER_S390_H_
#endif // V8_CODEGEN_S390_ASSEMBLER_S390_H_

View File

@ -21,9 +21,7 @@
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
} // namespace internal
namespace internal {} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390

View File

@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_S390
#include "src/s390/constants-s390.h"
#include "src/codegen/s390/constants-s390.h"
namespace v8 {
namespace internal {

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_CONSTANTS_S390_H_
#define V8_S390_CONSTANTS_S390_H_
#ifndef V8_CODEGEN_S390_CONSTANTS_S390_H_
#define V8_CODEGEN_S390_CONSTANTS_S390_H_
// Get the standard printf format macros for C99 stdint types.
#ifndef __STDC_FORMAT_MACROS
@ -1159,7 +1159,7 @@ using SixByteInstr = uint64_t;
V(sth, STH, 0x40) /* type = RX_A STORE HALFWORD (16) */
#define S390_RX_B_OPCODE_LIST(V) \
V(bc, BC, 0x47) /* type = RX_B BRANCH ON CONDITION */
V(bc, BC, 0x47) /* type = RX_B BRANCH ON CONDITION */
#define S390_RIE_A_OPCODE_LIST(V) \
V(cgit, CGIT, 0xEC70) /* type = RIE_A COMPARE IMMEDIATE AND TRAP (64<-16) */ \
@ -2384,4 +2384,4 @@ class DoubleRegisters {
} // namespace internal
} // namespace v8
#endif // V8_S390_CONSTANTS_S390_H_
#endif // V8_CODEGEN_S390_CONSTANTS_S390_H_

View File

@ -28,7 +28,7 @@
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
#include "src/s390/macro-assembler-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h"
#endif
namespace v8 {
@ -175,7 +175,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependent(builtin_index);
if (options().inline_offheap_trampolines &&
target_is_isolate_independent_builtin) {
target_is_isolate_independent_builtin) {
Label skip;
if (cond != al) {
b(NegateCondition(cond), &skip, Label::kNear);
@ -236,7 +236,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependent(builtin_index);
if (options().inline_offheap_trampolines &&
target_is_isolate_independent_builtin) {
target_is_isolate_independent_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@ -316,8 +316,7 @@ void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
}
// Wrapper around Assembler::mvc (SS-a format)
void TurboAssembler::MoveChar(const MemOperand& opnd1,
const MemOperand& opnd2,
void TurboAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
const Operand& length) {
mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
}
@ -338,8 +337,10 @@ void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1,
// Wrapper around Assembler::risbg(n) (RIE-f)
void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
const Operand& startBit, const Operand& endBit,
const Operand& shiftAmt, bool zeroBits) {
const Operand& startBit,
const Operand& endBit,
const Operand& shiftAmt,
bool zeroBits) {
if (zeroBits)
// High tag the top bit of I4/EndBit to zero out any unselected bits
risbg(dst, src, startBit,
@ -354,7 +355,7 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
brxhg(dst, inc, L);
#else
brxh(dst, inc, L);
#endif // V8_TARGET_ARCH_S390X
#endif // V8_TARGET_ARCH_S390X
}
void TurboAssembler::MultiPush(RegList regs, Register location) {
@ -1465,8 +1466,8 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
Move(r7, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
isolate()));
Move(r7,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
// Buy the full stack frame for 5 slots.
lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
@ -1477,7 +1478,7 @@ void MacroAssembler::PushStackHandler() {
// Copy the old handler into the next handler slot.
MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
Operand(kPointerSize));
Operand(kPointerSize));
// Set this new handler as the current one.
StoreP(sp, MemOperand(r7));
}
@ -1488,8 +1489,8 @@ void MacroAssembler::PopStackHandler() {
// Pop the Next Handler into r3 and store it into Handler Address reference.
Pop(r3);
Move(ip, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
isolate()));
Move(ip,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
StoreP(r3, MemOperand(ip));
Drop(1); // Drop padding.
@ -1917,7 +1918,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
if (isolate() != nullptr) {
// We don't unset the PC; the FP is the source of truth.
Register scratch1 = r6;
@ -2193,7 +2193,7 @@ void TurboAssembler::Div32(Register dst, Register src1, Register src2) {
#define Generate_DivU32(instr) \
{ \
lr(r0, src1); \
srdl(r0, Operand(32)); \
srdl(r0, Operand(32)); \
instr(r0, src2); \
LoadlW(dst, r1); \
}
@ -2267,7 +2267,7 @@ void TurboAssembler::Mod32(Register dst, Register src1, Register src2) {
#define Generate_ModU32(instr) \
{ \
lr(r0, src1); \
srdl(r0, Operand(32)); \
srdl(r0, Operand(32)); \
instr(r0, src2); \
LoadlW(dst, r0); \
}
@ -2968,14 +2968,14 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
int endBit = 63 - trailing_zeros;
// Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
Operand::Zero(), true);
Operand::Zero(), true);
return;
} else if (-1 == shifted_value) {
// A Special case in which all top bits up to MSB are 1's. In this case,
// we can set startBit to be 0.
int endBit = 63 - trailing_zeros;
RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
Operand::Zero(), true);
Operand::Zero(), true);
return;
}
}
@ -3729,7 +3729,6 @@ void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
LoadLogicalHalfWordP(dst, dst);
}
// Load And Test (Reg <- Reg)
void TurboAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
@ -4153,7 +4152,7 @@ void TurboAssembler::ClearRightImm(Register dst, Register src,
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int endBit = 63 - numBitsToClear;
RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
Operand::Zero(), true);
Operand::Zero(), true);
return;
}
@ -4344,7 +4343,7 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
ShiftRightArithP(builtin_pointer, builtin_pointer,
Operand(kSmiShift - kSystemPointerSizeLog2));
AddP(builtin_pointer, builtin_pointer,
Operand(IsolateData::builtin_entry_table_offset()));
Operand(IsolateData::builtin_entry_table_offset()));
LoadP(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
Call(builtin_pointer);
}
@ -4388,7 +4387,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
ShiftLeftP(destination, scratch, Operand(kSystemPointerSizeLog2));
AddP(destination, destination, kRootRegister);
LoadP(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {

View File

@ -6,13 +6,13 @@
#error This header must be included via macro-assembler.h
#endif
#ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
#define V8_S390_MACRO_ASSEMBLER_S390_H_
#ifndef V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
#define V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
#include "src/codegen/bailout-reason.h"
#include "src/codegen/s390/assembler-s390.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
#include "src/s390/assembler-s390.h"
namespace v8 {
namespace internal {
@ -180,17 +180,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(DoubleRegister dst, DoubleRegister src);
void MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
const Operand& length);
const Operand& length);
void CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2,
const Operand& length);
const Operand& length);
void ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2,
const Operand& length);
const Operand& length);
void RotateInsertSelectBits(Register dst, Register src,
const Operand& startBit, const Operand& endBit,
const Operand& shiftAmt, bool zeroBits);
const Operand& startBit, const Operand& endBit,
const Operand& shiftAmt, bool zeroBits);
void BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L);
@ -886,7 +886,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
int endBit = 63; // End is always LSB after shifting.
int startBit = 63 - rangeStart + rangeEnd;
RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
Operand(shiftAmount), true);
Operand(shiftAmount), true);
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
@ -1300,4 +1300,4 @@ inline MemOperand NativeContextMemOperand() {
} // namespace internal
} // namespace v8
#endif // V8_S390_MACRO_ASSEMBLER_S390_H_
#endif // V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_REGISTER_S390_H_
#define V8_S390_REGISTER_S390_H_
#ifndef V8_CODEGEN_S390_REGISTER_S390_H_
#define V8_CODEGEN_S390_REGISTER_S390_H_
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
@ -278,4 +278,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = r7;
} // namespace internal
} // namespace v8
#endif // V8_S390_REGISTER_S390_H_
#endif // V8_CODEGEN_S390_REGISTER_S390_H_

View File

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
#ifndef V8_CODEGEN_X64_ASSEMBLER_X64_INL_H_
#define V8_CODEGEN_X64_ASSEMBLER_X64_INL_H_
#include "src/x64/assembler-x64.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/base/cpu.h"
#include "src/common/v8memory.h"
@ -22,7 +22,6 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
// -----------------------------------------------------------------------------
// Implementation of Assembler
void Assembler::emitl(uint32_t x) {
WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint32_t);
@ -82,7 +81,6 @@ void Assembler::emit_rex_64(XMMRegister reg, Operand op) {
emit(0x48 | (reg.code() & 0x8) >> 1 | op.data().rex);
}
void Assembler::emit_rex_64(Register rm_reg) {
DCHECK_EQ(rm_reg.code() & 0xf, rm_reg.code());
emit(0x48 | rm_reg.high_bit());
@ -98,10 +96,7 @@ void Assembler::emit_rex_32(Register reg, Operand op) {
emit(0x40 | reg.high_bit() << 2 | op.data().rex);
}
void Assembler::emit_rex_32(Register rm_reg) {
emit(0x40 | rm_reg.high_bit());
}
void Assembler::emit_rex_32(Register rm_reg) { emit(0x40 | rm_reg.high_bit()); }
void Assembler::emit_rex_32(Operand op) { emit(0x40 | op.data().rex); }
@ -120,25 +115,21 @@ void Assembler::emit_optional_rex_32(XMMRegister reg, Operand op) {
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(Register rm_reg) {
if (rm_reg.high_bit()) emit(0x41);
}
@ -151,7 +142,6 @@ void Assembler::emit_optional_rex_32(Operand op) {
if (op.data().rex != 0) emit(0x40 | op.data().rex);
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
LeadingOpcode m) {
@ -159,14 +149,12 @@ void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
emit(rxb | m);
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m) {
byte rxb = static_cast<byte>(~((reg.high_bit() << 2) | rm.data().rex)) << 5;
emit(rxb | m);
}
// byte 1 of 2-byte VEX
void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
@ -174,14 +162,12 @@ void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
emit(rv | l | pp);
}
// byte 2 of 3-byte VEX
void Assembler::emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
emit(w | ((~v.code() & 0xf) << 3) | l | pp);
}
void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
XMMRegister rm, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
@ -195,7 +181,6 @@ void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
}
}
void Assembler::emit_vex_prefix(Register reg, Register vreg, Register rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
VexW w) {
@ -226,7 +211,6 @@ void Assembler::emit_vex_prefix(Register reg, Register vreg, Operand rm,
emit_vex_prefix(ireg, ivreg, rm, l, pp, mm, w);
}
Address Assembler::target_address_at(Address pc, Address constant_pool) {
return ReadUnalignedValue<int32_t>(pc) + pc + 4;
}
@ -283,7 +267,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@ -297,11 +280,7 @@ Address RelocInfo::target_address_address() {
return pc_;
}
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
@ -365,7 +344,6 @@ Address RelocInfo::target_internal_reference() {
return ReadUnalignedValue<Address>(pc_);
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
@ -428,4 +406,4 @@ void RelocInfo::WipeOut() {
} // namespace internal
} // namespace v8
#endif // V8_X64_ASSEMBLER_X64_INL_H_
#endif // V8_CODEGEN_X64_ASSEMBLER_X64_INL_H_

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More