Rename A64 port to ARM64 port

BUG=354405
R=ulan@chromium.org, rodolph.perfetta@arm.com
LOG=y

Review URL: https://codereview.chromium.org/207823003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20148 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
jochen@chromium.org 2014-03-21 09:28:26 +00:00
parent 9fccfc37c8
commit 2ce0bebba1
101 changed files with 526 additions and 530 deletions

View File

@ -223,11 +223,11 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm a64 mipsel
ARCHES = ia32 x64 arm arm64 mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
@ -373,8 +373,8 @@ native.check: native
--arch-and-mode=. $(TESTFLAGS)
SUPERFASTTESTMODES = ia32.release
FASTTESTMODES = $(SUPERFASTTESTMODES),x64.release,ia32.optdebug,x64.optdebug,arm.optdebug,a64.release
FASTCOMPILEMODES = $(FASTTESTMODES),a64.optdebug
FASTTESTMODES = $(SUPERFASTTESTMODES),x64.release,ia32.optdebug,x64.optdebug,arm.optdebug,arm64.release
FASTCOMPILEMODES = $(FASTTESTMODES),arm64.optdebug
COMMA = ,
EMPTY =

View File

@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@ -53,8 +53,8 @@ ifeq ($(ARCH), android_arm)
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
else
ifeq ($(ARCH), android_a64)
DEFINES = target_arch=a64 v8_target_arch=a64 android_target_arch=arm64
ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8

View File

@ -184,7 +184,7 @@
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="a64"', {
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64',
],
@ -214,7 +214,7 @@
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="a64"', {
['target_arch=="arm64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],

View File

@ -55,7 +55,7 @@
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;\
s/amd64/x64/;\
s/aarch64/a64/;\
s/aarch64/arm64/;\
s/arm.*/arm/;\
s/mips.*/mipsel/")',
}, {
@ -102,7 +102,6 @@
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="a64" and host_arch!="a64") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \

View File

@ -268,9 +268,9 @@
}], # _toolset=="target"
],
}], # v8_target_arch=="arm"
['v8_target_arch=="a64" or v8_target_arch=="arm64"', {
['v8_target_arch=="arm64"', {
'defines': [
'V8_TARGET_ARCH_A64',
'V8_TARGET_ARCH_ARM64',
],
}],
['v8_target_arch=="ia32"', {
@ -413,8 +413,7 @@
],
}],
['(OS=="linux" or OS=="android") and \
(v8_target_arch=="x64" or v8_target_arch=="a64" or \
v8_target_arch=="arm64")', {
(v8_target_arch=="x64" or v8_target_arch=="arm64")', {
# Check whether the host compiler and target compiler support the
# '-m64' option and set it if so.
'target_conditions': [

View File

@ -25,10 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_ASSEMBLER_A64_INL_H_
#define V8_A64_ASSEMBLER_A64_INL_H_
#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
#include "a64/assembler-a64.h"
#include "arm64/assembler-arm64.h"
#include "cpu.h"
#include "debug.h"
@ -573,7 +573,7 @@ Address Assembler::target_address_at(Address pc, Code* code) {
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
// Call sequence on A64 is:
// Call sequence on ARM64 is:
// ldr ip0, #... @ load from literal pool
// blr ip0
Address candidate = pc - 2 * kInstructionSize;
@ -745,7 +745,7 @@ static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on A64.
UNREACHABLE(); // This should never be reached on ARM64.
return Handle<Object>();
}
@ -803,7 +803,7 @@ bool RelocInfo::IsPatchedReturnSequence() {
// The sequence must be:
// ldr ip0, [pc, #offset]
// blr ip0
// See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn().
// See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
@ -1228,4 +1228,4 @@ void Assembler::ClearRecordedAstId() {
} } // namespace v8::internal
#endif // V8_A64_ASSEMBLER_A64_INL_H_
#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_

View File

@ -28,11 +28,11 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#define A64_DEFINE_REG_STATICS
#define ARM64_DEFINE_REG_STATICS
#include "a64/assembler-a64-inl.h"
#include "arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
@ -155,7 +155,7 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
// specially coded on A64 means that it is a movz/movk sequence. We don't
// specially coded on ARM64 means that it is a movz/movk sequence. We don't
// generate those for relocatable pointers.
return false;
}
@ -1944,7 +1944,7 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
Label start;
bind(&start);
// Refer to instructions-a64.h for a description of the marker and its
// Refer to instructions-arm64.h for a description of the marker and its
// arguments.
hlt(kImmExceptionIsDebug);
ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
@ -2594,7 +2594,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// 1) Encode the size of the constant pool, for use by the disassembler.
// 2) Terminate the program, to try to prevent execution from accidentally
// flowing into the constant pool.
// The header is therefore made of two a64 instructions:
// The header is therefore made of two arm64 instructions:
// ldr xzr, #<size of the constant pool in 32-bit words>
// blr xzr
// If executed the code will likely segfault and lr will point to the
@ -2810,4 +2810,4 @@ void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_ASSEMBLER_A64_H_
#define V8_A64_ASSEMBLER_A64_H_
#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
#define V8_ARM64_ASSEMBLER_ARM64_H_
#include <list>
#include <map>
@ -35,8 +35,8 @@
#include "utils.h"
#include "assembler.h"
#include "serialize.h"
#include "a64/instructions-a64.h"
#include "a64/cpu-a64.h"
#include "arm64/instructions-arm64.h"
#include "arm64/cpu-arm64.h"
namespace v8 {
@ -332,7 +332,7 @@ STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
#if defined(A64_DEFINE_REG_STATICS)
#if defined(ARM64_DEFINE_REG_STATICS)
#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
const CPURegister init_##register_class##_##name = {code, size, type}; \
const register_class& name = *reinterpret_cast<const register_class*>( \
@ -345,7 +345,7 @@ STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
extern const register_class& name
#define ALIAS_REGISTER(register_class, alias, name) \
extern const register_class& alias
#endif // defined(A64_DEFINE_REG_STATICS)
#endif // defined(ARM64_DEFINE_REG_STATICS)
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and FPRegister
@ -1663,7 +1663,7 @@ class Assembler : public AssemblerBase {
// Pseudo-instructions ------------------------------------------------------
// Parameters are described in a64/instructions-a64.h.
// Parameters are described in arm64/instructions-arm64.h.
void debug(const char* message, uint32_t code, Instr params = BREAK);
// Required by V8.
@ -2220,4 +2220,4 @@ class EnsureSpace BASE_EMBEDDED {
} } // namespace v8::internal
#endif // V8_A64_ASSEMBLER_A64_H_
#endif // V8_ARM64_ASSEMBLER_ARM64_H_

View File

@ -27,7 +27,7 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "debug.h"

View File

@ -27,7 +27,7 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "bootstrapper.h"
#include "code-stubs.h"
@ -1237,7 +1237,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Fadd(base_double, base_double, fp_zero);
// The operation -0+0 results in +0 in all cases except where the
// FPCR rounding mode is 'round towards minus infinity' (RM). The
// A64 simulator does not currently simulate FPCR (where the rounding
// ARM64 simulator does not currently simulate FPCR (where the rounding
// mode is set), so test the operation with some debug code.
if (masm->emit_debug_code()) {
UseScratchRegisterScope temps(masm);
@ -1259,7 +1259,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// If base is -INFINITY, make it +INFINITY.
// * Calculate base - base: All infinities will become NaNs since both
// -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64.
// -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
// * If the result is NaN, calculate abs(base).
__ Fsub(scratch0_double, base_double, base_double);
__ Fcmp(scratch0_double, 0.0);
@ -1399,7 +1399,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Floating-point code doesn't get special handling in A64, so there's
// Floating-point code doesn't get special handling in ARM64, so there's
// nothing to do here.
USE(isolate);
}
@ -4635,7 +4635,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
bool CodeStub::CanUseFPRegisters() {
// FP registers always available on A64.
// FP registers always available on ARM64.
return true;
}
@ -4933,7 +4933,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Compute the function's address as the first argument.
__ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart);
#if V8_HOST_ARCH_A64
#if V8_HOST_ARCH_ARM64
uintptr_t entry_hook =
reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
__ Mov(x10, entry_hook);
@ -5726,4 +5726,4 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CODE_STUBS_A64_H_
#define V8_A64_CODE_STUBS_A64_H_
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_
#include "ic-inl.h"
@ -466,4 +466,4 @@ struct PlatformCallInterfaceDescriptor {
} } // namespace v8::internal
#endif // V8_A64_CODE_STUBS_A64_H_
#endif // V8_ARM64_CODE_STUBS_ARM64_H_

View File

@ -27,11 +27,11 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "macro-assembler.h"
#include "simulator-a64.h"
#include "simulator-arm64.h"
namespace v8 {
namespace internal {
@ -39,14 +39,14 @@ namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
byte* fast_exp_a64_machine_code = NULL;
byte* fast_exp_arm64_machine_code = NULL;
double fast_exp_simulator(double x) {
Simulator * simulator = Simulator::current(Isolate::Current());
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
};
return simulator->CallDouble(fast_exp_a64_machine_code, args);
return simulator->CallDouble(fast_exp_arm64_machine_code, args);
}
#endif
@ -92,7 +92,7 @@ UnaryMathFunction CreateExpFunction() {
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
fast_exp_a64_machine_code = buffer;
fast_exp_arm64_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
@ -546,7 +546,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// Continue the common case first. 'mi' tests N == 1.
__ B(&result_is_finite_non_zero, mi);
// TODO(jbramley): Consider adding a +infinity register for A64.
// TODO(jbramley): Consider adding a +infinity register for ARM64.
__ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
// Select between +0.0 and +infinity. 'lo' tests C == 0.
@ -612,4 +612,4 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CODEGEN_A64_H_
#define V8_A64_CODEGEN_A64_H_
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
#include "ast.h"
#include "ic-inl.h"
@ -68,4 +68,4 @@ class MathExpGenerator : public AllStatic {
} } // namespace v8::internal
#endif // V8_A64_CODEGEN_A64_H_
#endif // V8_ARM64_CODEGEN_ARM64_H_

View File

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CONSTANTS_A64_H_
#define V8_A64_CONSTANTS_A64_H_
#ifndef V8_ARM64_CONSTANTS_ARM64_H_
#define V8_ARM64_CONSTANTS_ARM64_H_
// Assert that this is an LP64 system.
@ -308,7 +308,7 @@ inline Condition ReverseConditionForCmp(Condition cond) {
return eq;
default:
// In practice this function is only used with a condition coming from
// TokenToCondition in lithium-codegen-a64.cc. Any other condition is
// TokenToCondition in lithium-codegen-arm64.cc. Any other condition is
// invalid as it doesn't necessary make sense to reverse it (consider
// 'mi' for instance).
UNREACHABLE();
@ -1268,4 +1268,4 @@ enum UnallocatedOp {
} } // namespace v8::internal
#endif // V8_A64_CONSTANTS_A64_H_
#endif // V8_ARM64_CONSTANTS_ARM64_H_

View File

@ -29,10 +29,10 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "a64/cpu-a64.h"
#include "a64/utils-a64.h"
#include "arm64/cpu-arm64.h"
#include "arm64/utils-arm64.h"
namespace v8 {
namespace internal {
@ -196,4 +196,4 @@ uint32_t CpuFeatures::GetCacheType() {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CPU_A64_H_
#define V8_A64_CPU_A64_H_
#ifndef V8_ARM64_CPU_ARM64_H_
#define V8_ARM64_CPU_ARM64_H_
#include <stdio.h>
#include "serialize.h"
@ -47,13 +47,13 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for A64.
// There are no optional features for ARM64.
return false;
};
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for A64.
// There are no optional features for ARM64.
return false;
}
@ -69,13 +69,13 @@ class CpuFeatures : public AllStatic {
static unsigned supported_;
static bool VerifyCrossCompiling() {
// There are no optional features for A64.
// There are no optional features for ARM64.
ASSERT(cross_compile_ == 0);
return true;
}
static bool VerifyCrossCompiling(CpuFeature f) {
// There are no optional features for A64.
// There are no optional features for ARM64.
USE(f);
ASSERT(cross_compile_ == 0);
return true;
@ -104,4 +104,4 @@ class CpuFeatures : public AllStatic {
} } // namespace v8::internal
#endif // V8_A64_CPU_A64_H_
#endif // V8_ARM64_CPU_ARM64_H_

View File

@ -27,7 +27,7 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "debug.h"
@ -314,7 +314,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-a64.cc).
// Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -----------------------------------
@ -323,7 +323,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-a64.cc).
// Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -- x2 : feedback array
@ -334,7 +334,7 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-a64.cc).
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
@ -344,7 +344,7 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-a64.cc).
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
@ -376,12 +376,12 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
}
const bool Debug::kFrameDropperSupported = false;
@ -390,4 +390,4 @@ const bool Debug::kFrameDropperSupported = false;
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,10 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DECODER_A64_INL_H_
#define V8_A64_DECODER_A64_INL_H_
#ifndef V8_ARM64_DECODER_ARM64_INL_H_
#define V8_ARM64_DECODER_ARM64_INL_H_
#include "a64/decoder-a64.h"
#include "arm64/decoder-arm64.h"
#include "globals.h"
#include "utils.h"
@ -668,4 +668,4 @@ void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
} } // namespace v8::internal
#endif // V8_A64_DECODER_A64_INL_H_
#endif // V8_ARM64_DECODER_ARM64_INL_H_

View File

@ -27,11 +27,11 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "globals.h"
#include "utils.h"
#include "a64/decoder-a64.h"
#include "arm64/decoder-arm64.h"
namespace v8 {
@ -106,4 +106,4 @@ VISITOR_LIST(DEFINE_VISITOR_CALLERS)
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DECODER_A64_H_
#define V8_A64_DECODER_A64_H_
#ifndef V8_ARM64_DECODER_ARM64_H_
#define V8_ARM64_DECODER_ARM64_H_
#include <list>
#include "globals.h"
#include "a64/instructions-a64.h"
#include "arm64/instructions-arm64.h"
namespace v8 {
namespace internal {
@ -207,4 +207,4 @@ class Decoder : public V {
} } // namespace v8::internal
#endif // V8_A64_DECODER_A64_H_
#endif // V8_ARM64_DECODER_ARM64_H_

View File

@ -109,7 +109,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// There is no dynamic alignment padding on A64 in the input frame.
// There is no dynamic alignment padding on ARM64 in the input frame.
return false;
}

View File

@ -32,11 +32,11 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "disasm.h"
#include "a64/decoder-a64-inl.h"
#include "a64/disasm-a64.h"
#include "arm64/decoder-arm64-inl.h"
#include "arm64/disasm-arm64.h"
#include "macro-assembler.h"
#include "platform.h"
@ -1782,13 +1782,13 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // A64 does not have the concept of a byte register
UNREACHABLE(); // ARM64 does not have the concept of a byte register
return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // A64 does not have any XMM registers
UNREACHABLE(); // ARM64 does not have any XMM registers
return "noxmmreg";
}
@ -1853,4 +1853,4 @@ void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
} // namespace disasm
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,15 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DISASM_A64_H
#define V8_A64_DISASM_A64_H
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
#include "v8.h"
#include "globals.h"
#include "utils.h"
#include "instructions-a64.h"
#include "decoder-a64.h"
#include "instructions-arm64.h"
#include "decoder-arm64.h"
namespace v8 {
namespace internal {
@ -112,4 +112,4 @@ class PrintDisassembler: public Disassembler {
} } // namespace v8::internal
#endif // V8_A64_DISASM_A64_H
#endif // V8_ARM64_DISASM_ARM64_H

View File

@ -27,11 +27,11 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "assembler.h"
#include "assembler-a64.h"
#include "assembler-a64-inl.h"
#include "assembler-arm64.h"
#include "assembler-arm64-inl.h"
#include "frames.h"
namespace v8 {
@ -62,4 +62,4 @@ Object*& ExitFrame::constant_pool_slot() const {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,11 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "a64/constants-a64.h"
#include "a64/assembler-a64.h"
#include "arm64/constants-arm64.h"
#include "arm64/assembler-arm64.h"
#ifndef V8_A64_FRAMES_A64_H_
#define V8_A64_FRAMES_A64_H_
#ifndef V8_ARM64_FRAMES_ARM64_H_
#define V8_ARM64_FRAMES_ARM64_H_
namespace v8 {
namespace internal {
@ -130,4 +130,4 @@ inline void StackHandler::SetFp(Address slot, Address fp) {
} } // namespace v8::internal
#endif // V8_A64_FRAMES_A64_H_
#endif // V8_ARM64_FRAMES_ARM64_H_

View File

@ -27,7 +27,7 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "code-stubs.h"
#include "codegen.h"
@ -39,8 +39,8 @@
#include "scopes.h"
#include "stub-cache.h"
#include "a64/code-stubs-a64.h"
#include "a64/macro-assembler-a64.h"
#include "arm64/code-stubs-arm64.h"
#include "arm64/macro-assembler-arm64.h"
namespace v8 {
namespace internal {
@ -64,7 +64,7 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitJumpIfNotSmi(Register reg, Label* target) {
// This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
// This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
InstructionAccurateScope scope(masm_, 1);
ASSERT(!info_emitted_);
ASSERT(reg.Is64Bits());
@ -75,7 +75,7 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitJumpIfSmi(Register reg, Label* target) {
// This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
// This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
InstructionAccurateScope scope(masm_, 1);
ASSERT(!info_emitted_);
ASSERT(reg.Is64Bits());
@ -414,7 +414,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Make sure that the constant pool is not emitted inside of the return
// sequence. This sequence can get patched when the debugger is used. See
// debug-a64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
// debug-arm64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
{
InstructionAccurateScope scope(masm_,
Assembler::kJSRetSequenceInstructions);
@ -4300,7 +4300,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Try to generate an optimized comparison with a literal value.
// TODO(jbramley): This only checks common values like NaN or undefined.
// Should it also handle A64 immediate operands?
// Should it also handle ARM64 immediate operands?
if (TryLiteralCompare(expr)) {
return;
}
@ -4979,4 +4979,4 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -27,9 +27,9 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "a64/assembler-a64.h"
#include "arm64/assembler-arm64.h"
#include "code-stubs.h"
#include "codegen.h"
#include "disasm.h"
@ -1404,4 +1404,4 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -27,12 +27,12 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#define A64_DEFINE_FP_STATICS
#define ARM64_DEFINE_FP_STATICS
#include "a64/instructions-a64.h"
#include "a64/assembler-a64-inl.h"
#include "arm64/instructions-arm64.h"
#include "arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
@ -306,7 +306,7 @@ void Instruction::SetImmLLiteral(Instruction* source) {
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-a64-inl.h to work around this.
// instructions-arm64-inl.h to work around this.
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
@ -318,7 +318,7 @@ bool InstructionSequence::IsInlineData() const {
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-a64-inl.h to work around this.
// instructions-arm64-inl.h to work around this.
uint64_t InstructionSequence::InlineData() const {
ASSERT(IsInlineData());
uint64_t payload = ImmMoveWide();
@ -330,4 +330,4 @@ uint64_t InstructionSequence::InlineData() const {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_INSTRUCTIONS_A64_H_
#define V8_A64_INSTRUCTIONS_A64_H_
#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_ARM64_INSTRUCTIONS_ARM64_H_
#include "globals.h"
#include "utils.h"
#include "a64/constants-a64.h"
#include "a64/utils-a64.h"
#include "arm64/constants-arm64.h"
#include "arm64/utils-arm64.h"
namespace v8 {
namespace internal {
@ -42,16 +42,16 @@ namespace internal {
typedef uint32_t Instr;
// The following macros initialize a float/double variable with a bit pattern
// without using static initializers: If A64_DEFINE_FP_STATICS is defined, the
// without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
#if defined(A64_DEFINE_FP_STATICS)
#if defined(ARM64_DEFINE_FP_STATICS)
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
#endif // defined(A64_DEFINE_FP_STATICS)
#endif // defined(ARM64_DEFINE_FP_STATICS)
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
@ -422,7 +422,7 @@ const Instr kImmExceptionIsUnreachable = 0xdebf;
// A pseudo 'printf' instruction. The arguments will be passed to the platform
// printf method.
const Instr kImmExceptionIsPrintf = 0xdeb1;
// Parameters are stored in A64 registers as if the printf pseudo-instruction
// Parameters are stored in ARM64 registers as if the printf pseudo-instruction
// was a call to the real printf method:
//
// x0: The format string, then either of:
@ -498,4 +498,4 @@ enum DebugParameters {
} } // namespace v8::internal
#endif // V8_A64_INSTRUCTIONS_A64_H_
#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_

View File

@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "a64/instrument-a64.h"
#include "arm64/instrument-arm64.h"
namespace v8 {
namespace internal {

View File

@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_INSTRUMENT_A64_H_
#define V8_A64_INSTRUMENT_A64_H_
#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
#define V8_ARM64_INSTRUMENT_ARM64_H_
#include "globals.h"
#include "utils.h"
#include "a64/decoder-a64.h"
#include "a64/constants-a64.h"
#include "arm64/decoder-arm64.h"
#include "arm64/constants-arm64.h"
namespace v8 {
namespace internal {
@ -104,4 +104,4 @@ class Instrument: public DecoderVisitor {
} } // namespace v8::internal
#endif // V8_A64_INSTRUMENT_A64_H_
#endif // V8_ARM64_INSTRUMENT_ARM64_H_

View File

@ -28,8 +28,8 @@
#include "v8.h"
#include "lithium-allocator-inl.h"
#include "a64/lithium-a64.h"
#include "a64/lithium-codegen-a64.h"
#include "arm64/lithium-arm64.h"
#include "arm64/lithium-codegen-arm64.h"
#include "hydrogen-osr.h"
namespace v8 {
@ -2456,7 +2456,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathFloor: {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->value()->representation().IsDouble());
// TODO(jbramley): A64 can easily handle a double argument with frintm,
// TODO(jbramley): ARM64 can easily handle a double argument with frintm,
// but we're never asked for it here. At the moment, we fall back to the
// runtime if the result doesn't fit, like the other architectures.
LOperand* input = UseRegisterAtStart(instr->value());

View File

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_LITHIUM_A64_H_
#define V8_A64_LITHIUM_A64_H_
#ifndef V8_ARM64_LITHIUM_ARM64_H_
#define V8_ARM64_LITHIUM_ARM64_H_
#include "hydrogen.h"
#include "lithium-allocator.h"
@ -3018,8 +3018,8 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// register by the instruction implementation.
//
// This behaves identically to ARM's UseTempRegister. However, it is renamed
// to discourage its use in A64, since in most cases it is better to allocate
// a temporary register for the Lithium instruction.
// to discourage its use in ARM64, since in most cases it is better to
// allocate a temporary register for the Lithium instruction.
MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
// The operand created by UseRegisterAtStart is guaranteed to be live only at
@ -3097,4 +3097,4 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
} } // namespace v8::internal
#endif // V8_A64_LITHIUM_A64_H_
#endif // V8_ARM64_LITHIUM_ARM64_H_

View File

@ -27,8 +27,8 @@
#include "v8.h"
#include "a64/lithium-codegen-a64.h"
#include "a64/lithium-gap-resolver-a64.h"
#include "arm64/lithium-codegen-arm64.h"
#include "arm64/lithium-gap-resolver-arm64.h"
#include "code-stubs.h"
#include "stub-cache.h"
#include "hydrogen-osr.h"
@ -1698,8 +1698,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
// calling a helper function. With frintz (to produce the intermediate
// quotient) and fmsub (to calculate the remainder without loss of
// precision), it should be possible. However, we would need support for
// fdiv in round-towards-zero mode, and the A64 simulator doesn't support
// that yet.
// fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
// support that yet.
ASSERT(left.Is(d0));
ASSERT(right.Is(d1));
__ CallCFunction(
@ -2000,7 +2000,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
// TODO(all): on ARM we use a call descriptor to specify a storage mode
// but on A64 we only have one storage mode so it isn't necessary. Check
// but on ARM64 we only have one storage mode so it isn't necessary. Check
// this understanding is correct.
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
} else {

View File

@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_
#define V8_A64_LITHIUM_CODEGEN_A64_H_
#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
#include "a64/lithium-a64.h"
#include "arm64/lithium-arm64.h"
#include "a64/lithium-gap-resolver-a64.h"
#include "arm64/lithium-gap-resolver-arm64.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
@ -454,7 +454,7 @@ class LDeferredCode: public ZoneObject {
// the branch when the inverted condition is verified.
//
// For actual examples of condition see the concrete implementation in
// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
class BranchGenerator BASE_EMBEDDED {
public:
explicit BranchGenerator(LCodeGen* codegen)
@ -473,4 +473,4 @@ class BranchGenerator BASE_EMBEDDED {
} } // namespace v8::internal
#endif // V8_A64_LITHIUM_CODEGEN_A64_H_
#endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_

View File

@ -27,8 +27,8 @@
#include "v8.h"
#include "a64/lithium-gap-resolver-a64.h"
#include "a64/lithium-codegen-a64.h"
#include "arm64/lithium-gap-resolver-arm64.h"
#include "arm64/lithium-codegen-arm64.h"
namespace v8 {
namespace internal {

View File

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#include "v8.h"
@ -87,4 +87,4 @@ class LGapResolver BASE_EMBEDDED {
} } // namespace v8::internal
#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
#endif // V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_

View File

@ -25,18 +25,18 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_MACRO_ASSEMBLER_A64_INL_H_
#define V8_A64_MACRO_ASSEMBLER_A64_INL_H_
#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#include <ctype.h>
#include "v8globals.h"
#include "globals.h"
#include "a64/assembler-a64.h"
#include "a64/assembler-a64-inl.h"
#include "a64/macro-assembler-a64.h"
#include "a64/instrument-a64.h"
#include "arm64/assembler-arm64.h"
#include "arm64/assembler-arm64-inl.h"
#include "arm64/macro-assembler-arm64.h"
#include "arm64/instrument-arm64.h"
namespace v8 {
@ -1674,4 +1674,4 @@ void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
} } // namespace v8::internal
#endif // V8_A64_MACRO_ASSEMBLER_A64_INL_H_
#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_

View File

@ -27,7 +27,7 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "bootstrapper.h"
#include "codegen.h"
@ -1854,19 +1854,19 @@ void MacroAssembler::InitializeNewString(Register string,
int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_A64
#if V8_HOST_ARCH_ARM64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
return OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_A64
#else // V8_HOST_ARCH_ARM64
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
#endif // V8_HOST_ARCH_A64
#endif // V8_HOST_ARCH_ARM64
}
@ -3252,7 +3252,7 @@ void MacroAssembler::Allocate(int object_size,
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on A64.
// the same alignment on ARM64.
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and bail out if new space is exhausted.
@ -3324,7 +3324,7 @@ void MacroAssembler::Allocate(Register object_size,
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on A64.
// the same alignment on ARM64.
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and bail out if new space is exhausted
@ -3839,7 +3839,7 @@ void MacroAssembler::CheckFastObjectElements(Register map,
// Note: The ARM version of this clobbers elements_reg, but this version does
// not. Some uses of this in A64 assume that elements_reg will be preserved.
// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register elements_reg,
@ -5025,7 +5025,7 @@ void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
__ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
__ adr(x0, &start);
__ br(ip0);
// IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up
// IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
// until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
__ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
if (stub) {
@ -5168,4 +5168,4 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,15 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_MACRO_ASSEMBLER_A64_H_
#define V8_A64_MACRO_ASSEMBLER_A64_H_
#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#include <vector>
#include "v8globals.h"
#include "globals.h"
#include "a64/assembler-a64-inl.h"
#include "arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
@ -1212,7 +1212,7 @@ class MacroAssembler : public Assembler {
// On successful conversion, the least significant 32 bits of the result are
// equivalent to the ECMA-262 operation "ToInt32".
//
// Only public for the test code in test-code-stubs-a64.cc.
// Only public for the test code in test-code-stubs-arm64.cc.
void TryConvertDoubleToInt64(Register result,
DoubleRegister input,
Label* done);
@ -1963,7 +1963,7 @@ class MacroAssembler : public Assembler {
// Code ageing support functions.
// Code ageing on A64 works similarly to on ARM. When V8 wants to mark a
// Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
// function as old, it replaces some of the function prologue (generated by
// FullCodeGenerator::Generate) with a call to a special stub (ultimately
// generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
@ -2300,4 +2300,4 @@ class InlineSmiCheckInfo {
#define ACCESS_MASM(masm) masm->
#endif
#endif // V8_A64_MACRO_ASSEMBLER_A64_H_
#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_

View File

@ -27,7 +27,7 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "cpu-profiler.h"
#include "unicode.h"
@ -36,7 +36,7 @@
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
#include "a64/regexp-macro-assembler-a64.h"
#include "arm64/regexp-macro-assembler-arm64.h"
namespace v8 {
namespace internal {
@ -124,7 +124,7 @@ namespace internal {
* Isolate* isolate)
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in a64/simulator-a64.h.
* in arm64/simulator-arm64.h.
* When calling as a non-direct call (i.e., from C++ code), the return address
* area is overwritten with the LR register by the RegExp code. When doing a
* direct call from generated code, the return address is placed there by
@ -133,7 +133,7 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
RegExpMacroAssemblerA64::RegExpMacroAssemblerA64(
RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(
Mode mode,
int registers_to_save,
Zone* zone)
@ -157,7 +157,7 @@ RegExpMacroAssemblerA64::RegExpMacroAssemblerA64(
}
RegExpMacroAssemblerA64::~RegExpMacroAssemblerA64() {
RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
@ -169,12 +169,12 @@ RegExpMacroAssemblerA64::~RegExpMacroAssemblerA64() {
stack_overflow_label_.Unuse();
}
int RegExpMacroAssemblerA64::stack_limit_slack() {
int RegExpMacroAssemblerARM64::stack_limit_slack() {
return RegExpStack::kStackLimitSlack;
}
void RegExpMacroAssemblerA64::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
if (by != 0) {
__ Add(current_input_offset(),
current_input_offset(), by * char_size());
@ -182,7 +182,7 @@ void RegExpMacroAssemblerA64::AdvanceCurrentPosition(int by) {
}
void RegExpMacroAssemblerA64::AdvanceRegister(int reg, int by) {
void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
ASSERT((reg >= 0) && (reg < num_registers_));
if (by != 0) {
Register to_advance;
@ -210,7 +210,7 @@ void RegExpMacroAssemblerA64::AdvanceRegister(int reg, int by) {
}
void RegExpMacroAssemblerA64::Backtrack() {
void RegExpMacroAssemblerARM64::Backtrack() {
CheckPreemption();
Pop(w10);
__ Add(x10, code_pointer(), Operand(w10, UXTW));
@ -218,22 +218,23 @@ void RegExpMacroAssemblerA64::Backtrack() {
}
void RegExpMacroAssemblerA64::Bind(Label* label) {
void RegExpMacroAssemblerARM64::Bind(Label* label) {
__ Bind(label);
}
void RegExpMacroAssemblerA64::CheckCharacter(uint32_t c, Label* on_equal) {
void RegExpMacroAssemblerARM64::CheckCharacter(uint32_t c, Label* on_equal) {
CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
}
void RegExpMacroAssemblerA64::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
Label* on_greater) {
CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
}
void RegExpMacroAssemblerA64::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the input string?
CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
@ -245,7 +246,7 @@ void RegExpMacroAssemblerA64::CheckAtStart(Label* on_at_start) {
}
void RegExpMacroAssemblerA64::CheckNotAtStart(Label* on_not_at_start) {
void RegExpMacroAssemblerARM64::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the input string?
CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
// If we did, are we still at the start of the input string?
@ -255,12 +256,12 @@ void RegExpMacroAssemblerA64::CheckNotAtStart(Label* on_not_at_start) {
}
void RegExpMacroAssemblerA64::CheckCharacterLT(uc16 limit, Label* on_less) {
void RegExpMacroAssemblerARM64::CheckCharacterLT(uc16 limit, Label* on_less) {
CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
}
void RegExpMacroAssemblerA64::CheckCharacters(Vector<const uc16> str,
void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string) {
@ -292,7 +293,7 @@ void RegExpMacroAssemblerA64::CheckCharacters(Vector<const uc16> str,
}
void RegExpMacroAssemblerA64::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
__ Ldr(w10, MemOperand(backtrack_stackpointer()));
__ Cmp(current_input_offset(), w10);
__ Cset(x11, eq);
@ -301,7 +302,7 @@ void RegExpMacroAssemblerA64::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(eq, on_equal);
}
void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
int start_reg,
Label* on_no_match) {
Label fallthrough;
@ -428,7 +429,7 @@ void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
__ Bind(&fallthrough);
}
void RegExpMacroAssemblerA64::CheckNotBackReference(
void RegExpMacroAssemblerARM64::CheckNotBackReference(
int start_reg,
Label* on_no_match) {
Label fallthrough;
@ -490,29 +491,29 @@ void RegExpMacroAssemblerA64::CheckNotBackReference(
}
void RegExpMacroAssemblerA64::CheckNotCharacter(unsigned c,
Label* on_not_equal) {
void RegExpMacroAssemblerARM64::CheckNotCharacter(unsigned c,
Label* on_not_equal) {
CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
}
void RegExpMacroAssemblerA64::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal) {
void RegExpMacroAssemblerARM64::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal) {
__ And(w10, current_character(), mask);
CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
}
void RegExpMacroAssemblerA64::CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal) {
void RegExpMacroAssemblerARM64::CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal) {
__ And(w10, current_character(), mask);
CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
}
void RegExpMacroAssemblerA64::CheckNotCharacterAfterMinusAnd(
void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
uc16 c,
uc16 minus,
uc16 mask,
@ -524,7 +525,7 @@ void RegExpMacroAssemblerA64::CheckNotCharacterAfterMinusAnd(
}
void RegExpMacroAssemblerA64::CheckCharacterInRange(
void RegExpMacroAssemblerARM64::CheckCharacterInRange(
uc16 from,
uc16 to,
Label* on_in_range) {
@ -534,7 +535,7 @@ void RegExpMacroAssemblerA64::CheckCharacterInRange(
}
void RegExpMacroAssemblerA64::CheckCharacterNotInRange(
void RegExpMacroAssemblerARM64::CheckCharacterNotInRange(
uc16 from,
uc16 to,
Label* on_not_in_range) {
@ -544,7 +545,7 @@ void RegExpMacroAssemblerA64::CheckCharacterNotInRange(
}
void RegExpMacroAssemblerA64::CheckBitInTable(
void RegExpMacroAssemblerARM64::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ Mov(x11, Operand(table));
@ -559,8 +560,8 @@ void RegExpMacroAssemblerA64::CheckBitInTable(
}
bool RegExpMacroAssemblerA64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check
switch (type) {
@ -663,13 +664,13 @@ bool RegExpMacroAssemblerA64::CheckSpecialCharacterClass(uc16 type,
}
void RegExpMacroAssemblerA64::Fail() {
void RegExpMacroAssemblerARM64::Fail() {
__ Mov(w0, FAILURE);
__ B(&exit_label_);
}
Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
Label return_w0;
// Finalize code - write the entry point code now we know how many
// registers we need.
@ -1065,43 +1066,40 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
}
void RegExpMacroAssemblerA64::GoTo(Label* to) {
void RegExpMacroAssemblerARM64::GoTo(Label* to) {
BranchOrBacktrack(al, to);
}
void RegExpMacroAssemblerA64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
void RegExpMacroAssemblerARM64::IfRegisterGE(int reg, int comparand,
Label* if_ge) {
Register to_compare = GetRegister(reg, w10);
CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
}
void RegExpMacroAssemblerA64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
void RegExpMacroAssemblerARM64::IfRegisterLT(int reg, int comparand,
Label* if_lt) {
Register to_compare = GetRegister(reg, w10);
CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
}
void RegExpMacroAssemblerA64::IfRegisterEqPos(int reg,
Label* if_eq) {
void RegExpMacroAssemblerARM64::IfRegisterEqPos(int reg, Label* if_eq) {
Register to_compare = GetRegister(reg, w10);
__ Cmp(to_compare, current_input_offset());
BranchOrBacktrack(eq, if_eq);
}
RegExpMacroAssembler::IrregexpImplementation
RegExpMacroAssemblerA64::Implementation() {
return kA64Implementation;
RegExpMacroAssemblerARM64::Implementation() {
return kARM64Implementation;
}
void RegExpMacroAssemblerA64::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
// TODO(pielan): Make sure long strings are caught before this, and not
// just asserted in debug mode.
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
@ -1114,18 +1112,18 @@ void RegExpMacroAssemblerA64::LoadCurrentCharacter(int cp_offset,
}
void RegExpMacroAssemblerA64::PopCurrentPosition() {
void RegExpMacroAssemblerARM64::PopCurrentPosition() {
Pop(current_input_offset());
}
void RegExpMacroAssemblerA64::PopRegister(int register_index) {
void RegExpMacroAssemblerARM64::PopRegister(int register_index) {
Pop(w10);
StoreRegister(register_index, w10);
}
void RegExpMacroAssemblerA64::PushBacktrack(Label* label) {
void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
if (label->is_bound()) {
int target = label->pos();
__ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
@ -1143,20 +1141,20 @@ void RegExpMacroAssemblerA64::PushBacktrack(Label* label) {
}
void RegExpMacroAssemblerA64::PushCurrentPosition() {
void RegExpMacroAssemblerARM64::PushCurrentPosition() {
Push(current_input_offset());
}
void RegExpMacroAssemblerA64::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
void RegExpMacroAssemblerARM64::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
Register to_push = GetRegister(register_index, w10);
Push(to_push);
if (check_stack_limit) CheckStackLimit();
}
void RegExpMacroAssemblerA64::ReadCurrentPositionFromRegister(int reg) {
void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
Register cached_register;
RegisterState register_state = GetRegisterState(reg);
switch (register_state) {
@ -1178,14 +1176,14 @@ void RegExpMacroAssemblerA64::ReadCurrentPositionFromRegister(int reg) {
}
void RegExpMacroAssemblerA64::ReadStackPointerFromRegister(int reg) {
void RegExpMacroAssemblerARM64::ReadStackPointerFromRegister(int reg) {
Register read_from = GetRegister(reg, w10);
__ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
__ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
}
void RegExpMacroAssemblerA64::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ Cmp(current_input_offset(), -by * char_size());
__ B(ge, &after_position);
@ -1198,7 +1196,7 @@ void RegExpMacroAssemblerA64::SetCurrentPositionFromEnd(int by) {
}
void RegExpMacroAssemblerA64::SetRegister(int register_index, int to) {
void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
Register set_to = wzr;
if (to != 0) {
@ -1209,14 +1207,14 @@ void RegExpMacroAssemblerA64::SetRegister(int register_index, int to) {
}
bool RegExpMacroAssemblerA64::Succeed() {
bool RegExpMacroAssemblerARM64::Succeed() {
__ B(&success_label_);
return global();
}
void RegExpMacroAssemblerA64::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
void RegExpMacroAssemblerARM64::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
Register position = current_input_offset();
if (cp_offset != 0) {
position = w10;
@ -1226,7 +1224,7 @@ void RegExpMacroAssemblerA64::WriteCurrentPositionToRegister(int reg,
}
void RegExpMacroAssemblerA64::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
ASSERT(reg_from <= reg_to);
int num_registers = reg_to - reg_from + 1;
@ -1287,7 +1285,7 @@ void RegExpMacroAssemblerA64::ClearRegisters(int reg_from, int reg_to) {
}
void RegExpMacroAssemblerA64::WriteStackPointerToRegister(int reg) {
void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
__ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
__ Sub(x10, backtrack_stackpointer(), x10);
if (masm_->emit_debug_code()) {
@ -1306,7 +1304,7 @@ static T& frame_entry(Address re_frame, int frame_offset) {
}
int RegExpMacroAssemblerA64::CheckStackGuardState(Address* return_address,
int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame,
int start_offset,
@ -1406,8 +1404,8 @@ int RegExpMacroAssemblerA64::CheckStackGuardState(Address* return_address,
}
void RegExpMacroAssemblerA64::CheckPosition(int cp_offset,
Label* on_outside_input) {
void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
Label* on_outside_input) {
CompareAndBranchOrBacktrack(current_input_offset(),
-cp_offset * char_size(),
ge,
@ -1415,7 +1413,7 @@ void RegExpMacroAssemblerA64::CheckPosition(int cp_offset,
}
bool RegExpMacroAssemblerA64::CanReadUnaligned() {
bool RegExpMacroAssemblerARM64::CanReadUnaligned() {
// TODO(pielan): See whether or not we should disable unaligned accesses.
return !slow_safe();
}
@ -1423,7 +1421,7 @@ bool RegExpMacroAssemblerA64::CanReadUnaligned() {
// Private methods:
void RegExpMacroAssemblerA64::CallCheckStackGuardState(Register scratch) {
void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
// Allocate space on the stack to store the return address. The
// CheckStackGuardState C++ function will override it if the code
// moved. Allocate extra space for 2 arguments passed by pointers.
@ -1470,8 +1468,8 @@ void RegExpMacroAssemblerA64::CallCheckStackGuardState(Register scratch) {
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerA64::BranchOrBacktrack(Condition condition,
Label* to) {
void RegExpMacroAssemblerARM64::BranchOrBacktrack(Condition condition,
Label* to) {
if (condition == al) { // Unconditional.
if (to == NULL) {
Backtrack();
@ -1491,10 +1489,10 @@ void RegExpMacroAssemblerA64::BranchOrBacktrack(Condition condition,
__ Bind(&no_branch);
}
void RegExpMacroAssemblerA64::CompareAndBranchOrBacktrack(Register reg,
int immediate,
Condition condition,
Label* to) {
void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
int immediate,
Condition condition,
Label* to) {
if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
if (to == NULL) {
to = &backtrack_label_;
@ -1515,7 +1513,7 @@ void RegExpMacroAssemblerA64::CompareAndBranchOrBacktrack(Register reg,
}
void RegExpMacroAssemblerA64::CheckPreemption() {
void RegExpMacroAssemblerARM64::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
@ -1527,7 +1525,7 @@ void RegExpMacroAssemblerA64::CheckPreemption() {
}
void RegExpMacroAssemblerA64::CheckStackLimit() {
void RegExpMacroAssemblerARM64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ Mov(x10, stack_limit);
@ -1537,7 +1535,7 @@ void RegExpMacroAssemblerA64::CheckStackLimit() {
}
void RegExpMacroAssemblerA64::Push(Register source) {
void RegExpMacroAssemblerARM64::Push(Register source) {
ASSERT(source.Is32Bits());
ASSERT(!source.is(backtrack_stackpointer()));
__ Str(source,
@ -1547,7 +1545,7 @@ void RegExpMacroAssemblerA64::Push(Register source) {
}
void RegExpMacroAssemblerA64::Pop(Register target) {
void RegExpMacroAssemblerARM64::Pop(Register target) {
ASSERT(target.Is32Bits());
ASSERT(!target.is(backtrack_stackpointer()));
__ Ldr(target,
@ -1555,14 +1553,14 @@ void RegExpMacroAssemblerA64::Pop(Register target) {
}
Register RegExpMacroAssemblerA64::GetCachedRegister(int register_index) {
Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
ASSERT(register_index < kNumCachedRegisters);
return Register::Create(register_index / 2, kXRegSizeInBits);
}
Register RegExpMacroAssemblerA64::GetRegister(int register_index,
Register maybe_result) {
Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
Register maybe_result) {
ASSERT(maybe_result.Is32Bits());
ASSERT(register_index >= 0);
if (num_registers_ <= register_index) {
@ -1592,8 +1590,8 @@ Register RegExpMacroAssemblerA64::GetRegister(int register_index,
}
void RegExpMacroAssemblerA64::StoreRegister(int register_index,
Register source) {
void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
Register source) {
ASSERT(source.Is32Bits());
ASSERT(register_index >= 0);
if (num_registers_ <= register_index) {
@ -1623,7 +1621,7 @@ void RegExpMacroAssemblerA64::StoreRegister(int register_index,
}
void RegExpMacroAssemblerA64::CallIf(Label* to, Condition condition) {
void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
Label skip_call;
if (condition != al) __ B(&skip_call, InvertCondition(condition));
__ Bl(to);
@ -1631,21 +1629,21 @@ void RegExpMacroAssemblerA64::CallIf(Label* to, Condition condition) {
}
void RegExpMacroAssemblerA64::RestoreLinkRegister() {
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
ASSERT(csp.Is(__ StackPointer()));
__ Pop(lr, xzr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerA64::SaveLinkRegister() {
void RegExpMacroAssemblerARM64::SaveLinkRegister() {
ASSERT(csp.Is(__ StackPointer()));
__ Sub(lr, lr, Operand(masm_->CodeObject()));
__ Push(xzr, lr);
}
MemOperand RegExpMacroAssemblerA64::register_location(int register_index) {
MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
ASSERT(register_index < (1<<30));
ASSERT(register_index >= kNumCachedRegisters);
if (num_registers_ <= register_index) {
@ -1656,7 +1654,7 @@ MemOperand RegExpMacroAssemblerA64::register_location(int register_index) {
return MemOperand(frame_pointer(), offset);
}
MemOperand RegExpMacroAssemblerA64::capture_location(int register_index,
MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
Register scratch) {
ASSERT(register_index < (1<<30));
ASSERT(register_index < num_saved_registers_);
@ -1674,8 +1672,8 @@ MemOperand RegExpMacroAssemblerA64::capture_location(int register_index,
}
}
void RegExpMacroAssemblerA64::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
@ -1727,4 +1725,4 @@ void RegExpMacroAssemblerA64::LoadCurrentCharacterUnchecked(int cp_offset,
}} // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,11 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
#include "a64/assembler-a64.h"
#include "a64/assembler-a64-inl.h"
#include "arm64/assembler-arm64.h"
#include "arm64/assembler-arm64-inl.h"
#include "macro-assembler.h"
namespace v8 {
@ -37,10 +37,10 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerA64();
RegExpMacroAssemblerARM64(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerARM64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
@ -312,4 +312,4 @@ class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
#endif // V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_

View File

@ -30,12 +30,12 @@
#include <cstdarg>
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "disasm.h"
#include "assembler.h"
#include "a64/decoder-a64-inl.h"
#include "a64/simulator-a64.h"
#include "arm64/decoder-arm64-inl.h"
#include "arm64/simulator-arm64.h"
#include "macro-assembler.h"
namespace v8 {
@ -3643,4 +3643,4 @@ void Simulator::VisitException(Instruction* instr) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_SIMULATOR_A64_H_
#define V8_A64_SIMULATOR_A64_H_
#ifndef V8_ARM64_SIMULATOR_ARM64_H_
#define V8_ARM64_SIMULATOR_ARM64_H_
#include <stdarg.h>
#include <vector>
@ -37,10 +37,10 @@
#include "utils.h"
#include "allocation.h"
#include "assembler.h"
#include "a64/assembler-a64.h"
#include "a64/decoder-a64.h"
#include "a64/disasm-a64.h"
#include "a64/instrument-a64.h"
#include "arm64/assembler-arm64.h"
#include "arm64/decoder-arm64.h"
#include "arm64/disasm-arm64.h"
#include "arm64/instrument-arm64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
@ -53,28 +53,28 @@ namespace internal {
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native A64 platform.
// Running without a simulator on a native ARM64 platform.
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*a64_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
typedef int (*arm64_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type a64_regexp_matcher.
// should act as a function matching the type arm64_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<a64_regexp_matcher>(entry)( \
(FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@ -905,4 +905,4 @@ class SimulatorStack : public v8::internal::AllStatic {
} } // namespace v8::internal
#endif // V8_A64_SIMULATOR_A64_H_
#endif // V8_ARM64_SIMULATOR_ARM64_H_

View File

@ -27,7 +27,7 @@
#include "v8.h"
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "ic-inl.h"
#include "codegen.h"
@ -1496,4 +1496,4 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,9 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
#include "a64/utils-a64.h"
#include "arm64/utils-arm64.h"
namespace v8 {
@ -37,7 +37,7 @@ namespace internal {
int CountLeadingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
// TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
@ -50,7 +50,7 @@ int CountLeadingZeros(uint64_t value, int width) {
int CountLeadingSignBits(int64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
// TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
@ -61,7 +61,7 @@ int CountLeadingSignBits(int64_t value, int width) {
int CountTrailingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
// TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
@ -109,4 +109,4 @@ int MaskToBit(uint64_t mask) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64
#endif // V8_TARGET_ARCH_ARM64

View File

@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_UTILS_A64_H_
#define V8_A64_UTILS_A64_H_
#ifndef V8_ARM64_UTILS_ARM64_H_
#define V8_ARM64_UTILS_ARM64_H_
#include <cmath>
#include "v8.h"
#include "a64/constants-a64.h"
#include "arm64/constants-arm64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
@ -132,4 +132,4 @@ inline float FusedMultiplyAdd(float op1, float op2, float a) {
} } // namespace v8::internal
#endif // V8_A64_UTILS_A64_H_
#endif // V8_ARM64_UTILS_ARM64_H_

View File

@ -59,8 +59,8 @@
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
#elif V8_TARGET_ARCH_A64
#include "a64/assembler-a64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
@ -75,8 +75,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@ -286,7 +286,7 @@ int Label::pos() const {
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
// 1101: constant or veneer pool. Used only on ARM and A64 for now.
// 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
// The format is: [2-bit sub-type] 1101 11
// signed int (size of the pool).
// The 2-bit sub-types are:
@ -1343,8 +1343,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_A64
function = FUNCTION_ADDR(RegExpMacroAssemblerA64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM64
function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS

View File

@ -215,9 +215,9 @@ class Label BASE_EMBEDDED {
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
#if V8_TARGET_ARCH_A64
// On A64, the Assembler keeps track of pointers to Labels to resolve branches
// to distant targets. Copying labels would confuse the Assembler.
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
// branches to distant targets. Copying labels would confuse the Assembler.
DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
#endif
};
@ -286,7 +286,7 @@ class RelocInfo BASE_EMBEDDED {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// Marks constant and veneer pools. Only used on ARM and A64.
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
VENEER_POOL,

View File

@ -160,8 +160,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__)
#include "atomicops_internals_mac.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_A64
#include "atomicops_internals_a64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
#include "atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)

View File

@ -1653,7 +1653,7 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
// TODO(jbramley): I had to increase the size of this buffer from 8KB because
// we can generate a lot of debug code on A64.
// we can generate a lot of debug code on ARM64.
union { int force_alignment; byte buffer[16*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a

View File

@ -53,7 +53,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
#endif
// Simulator specific helpers.
#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_A64)
#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64)
// TODO(all): If possible automatically prepend an indicator like
// UNIMPLEMENTED or LOCATION.
#define ASM_UNIMPLEMENTED(message) \

View File

@ -101,7 +101,7 @@ namespace internal {
V(KeyedStringLength)
// List of code stubs only used on ARM platforms.
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_A64)
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64)
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
V(SetProperty) \
@ -441,8 +441,8 @@ class RuntimeCallHelper {
#include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/code-stubs-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS

View File

@ -72,8 +72,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/codegen-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS

View File

@ -282,7 +282,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else

View File

@ -585,13 +585,13 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
// simulator-arm.cc, simulator-a64.cc and simulator-mips.cc
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(debug_sim, false, "Enable debugging the simulator")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
#ifdef V8_TARGET_ARCH_A64
#ifdef V8_TARGET_ARCH_ARM64
DEFINE_int(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
@ -600,7 +600,7 @@ DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
#endif
DEFINE_int(sim_stack_size, 2 * MB / KB,
"Stack size of the A64 simulator in kBytes (default is 2 MB)")
"Stack size of the ARM64 simulator in kBytes (default is 2 MB)")
DEFINE_bool(log_regs_modified, true,
"When logging register values, only print modified registers.")
DEFINE_bool(log_colour, true,
@ -819,7 +819,7 @@ DEFINE_bool(log_timer_events, false,
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
DEFINE_string(log_instruction_file, "a64_inst.csv",
DEFINE_string(log_instruction_file, "arm64_inst.csv",
"AArch64 instruction statistics log file.")
DEFINE_int(log_instruction_period, 1 << 22,
"AArch64 instruction statistics logging period.")

View File

@ -36,8 +36,8 @@
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/frames-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/frames-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS

View File

@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
#if V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_ARM64
typedef uint64_t RegList;
#else
typedef uint32_t RegList;

View File

@ -127,8 +127,8 @@ class FullCodeGenerator: public AstVisitor {
static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_A64
// TODO(all): Copied ARM value. Check this is sensible for A64.
#elif V8_TARGET_ARCH_ARM64
// TODO(all): Copied ARM value. Check this is sensible for ARM64.
static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 142;

View File

@ -72,7 +72,7 @@ namespace internal {
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__AARCH64EL__)
#define V8_HOST_ARCH_A64 1
#define V8_HOST_ARCH_ARM64 1
#define V8_HOST_ARCH_64_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
@ -99,13 +99,13 @@ namespace internal {
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_A64 && !V8_TARGET_ARCH_MIPS
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
#elif defined(__AARCH64EL__)
#define V8_TARGET_ARCH_A64 1
#define V8_TARGET_ARCH_ARM64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__MIPSEL__)
@ -125,8 +125,8 @@ namespace internal {
#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
#if (V8_TARGET_ARCH_A64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_A64))
#error Target architecture a64 is only supported on a64 and x64 host
#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
#error Target architecture arm64 is only supported on arm64 and x64 host
#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
@ -136,7 +136,7 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
#if (V8_TARGET_ARCH_A64 && !V8_HOST_ARCH_A64)
#if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
@ -154,7 +154,7 @@ namespace internal {
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_A64
#elif V8_TARGET_ARCH_ARM64
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_LITTLE_ENDIAN 1

View File

@ -36,8 +36,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS

View File

@ -67,8 +67,8 @@
#include "ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS

View File

@ -1972,7 +1972,7 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_A64 || V8_TARGET_ARCH_MIPS
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif

View File

@ -102,7 +102,7 @@ class DebuggerAgent;
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
@ -310,7 +310,7 @@ class ThreadLocalTop BASE_EMBEDDED {
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
#define ISOLATE_INIT_SIMULATOR_LIST(V) \
@ -408,7 +408,7 @@ class Isolate {
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
@ -422,7 +422,7 @@ class Isolate {
FIELD_ACCESSOR(ThreadState*, thread_state)
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
FIELD_ACCESSOR(Simulator*, simulator)
#endif
@ -438,7 +438,7 @@ class Isolate {
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif

View File

@ -49,8 +49,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@ -6090,9 +6090,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#elif V8_TARGET_ARCH_A64
RegExpMacroAssemblerA64 macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#elif V8_TARGET_ARCH_ARM64
RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);

View File

@ -34,8 +34,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS

View File

@ -35,8 +35,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS

View File

@ -38,9 +38,9 @@
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_A64
#include "a64/lithium-a64.h"
#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/lithium-arm64.h"
#include "arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"

View File

@ -41,9 +41,9 @@
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
#elif V8_TARGET_ARCH_A64
#include "a64/lithium-a64.h"
#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/lithium-arm64.h"
#include "arm64/lithium-codegen-arm64.h"
#else
#error "Unknown architecture."
#endif

View File

@ -72,14 +72,14 @@ const int kInvalidProtoDepth = -1;
#include "x64/assembler-x64-inl.h"
#include "code.h" // must be after assembler_*.h
#include "x64/macro-assembler-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/constants-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/constants-arm64.h"
#include "assembler.h"
#include "a64/assembler-a64.h"
#include "a64/assembler-a64-inl.h"
#include "arm64/assembler-arm64.h"
#include "arm64/assembler-arm64-inl.h"
#include "code.h" // must be after assembler_*.h
#include "a64/macro-assembler-a64.h"
#include "a64/macro-assembler-a64-inl.h"
#include "arm64/macro-assembler-arm64.h"
#include "arm64/macro-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"

View File

@ -37,8 +37,8 @@
#include "property-details.h"
#include "smart-pointers.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_A64
#include "a64/constants-a64.h"
#if V8_TARGET_ARCH_ARM64
#include "arm64/constants-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
@ -1246,8 +1246,8 @@ class MaybeObject BASE_EMBEDDED {
V(kLetBindingReInitialization, "Let binding re-initialization") \
V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
V(kLiveEditFrameDroppingIsNotSupportedOnA64, \
"LiveEdit frame dropping is not supported on a64") \
V(kLiveEditFrameDroppingIsNotSupportedOnARM64, \
"LiveEdit frame dropping is not supported on arm64") \
V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
"LiveEdit frame dropping is not supported on arm") \
V(kLiveEditFrameDroppingIsNotSupportedOnMips, \

View File

@ -276,7 +276,7 @@ void OS::Abort() {
void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
asm("bkpt 0");
#elif V8_HOST_ARCH_A64
#elif V8_HOST_ARCH_ARM64
asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");

View File

@ -39,7 +39,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
assembler_(assembler) {
unsigned int type = assembler->Implementation();
ASSERT(type < 6);
const char* impl_names[] = {"IA32", "ARM", "A64", "MIPS", "X64", "Bytecode"};
const char* impl_names[] = {"IA32", "ARM", "ARM64",
"MIPS", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}

View File

@ -53,7 +53,7 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
kA64Implementation,
kARM64Implementation,
kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation

View File

@ -245,10 +245,10 @@ class SimulatorHelper {
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
#elif V8_TARGET_ARCH_A64
#elif V8_TARGET_ARCH_ARM64
if (simulator_->sp() == 0 || simulator_->fp() == 0) {
// It possible that the simulator is interrupted while it is updating
// the sp or fp register. A64 simulator does this in two steps:
// the sp or fp register. ARM64 simulator does this in two steps:
// first setting it to zero and then setting it to the new value.
// Bailout if sp/fp doesn't contain the new value.
return;
@ -357,7 +357,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
if (!helper.Init(sampler, isolate)) return;
helper.FillRegisters(&state);
// It possible that the simulator is interrupted while it is updating
// the sp or fp register. A64 simulator does this in two steps:
// the sp or fp register. ARM64 simulator does this in two steps:
// first setting it to zero and then setting it to the new value.
// Bailout if sp/fp doesn't contain the new value.
if (state.sp == 0 || state.fp == 0) return;
@ -390,7 +390,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_A64
#elif V8_HOST_ARCH_ARM64
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.sp);
// FP is an alias for x29.

View File

@ -32,8 +32,8 @@
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/simulator-a64.h"
#elif V8_TARGET_ARCH_ARM64
#include "arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS

View File

@ -154,16 +154,16 @@
'test-macro-assembler-arm.cc'
],
}],
['v8_target_arch=="a64" or v8_target_arch=="arm64"', {
['v8_target_arch=="arm64"', {
'sources': [
'test-utils-a64.cc',
'test-assembler-a64.cc',
'test-utils-arm64.cc',
'test-assembler-arm64.cc',
'test-code-stubs.cc',
'test-code-stubs-a64.cc',
'test-disasm-a64.cc',
'test-fuzz-a64.cc',
'test-javascript-a64.cc',
'test-js-a64-variables.cc'
'test-code-stubs-arm64.cc',
'test-disasm-arm64.cc',
'test-fuzz-arm64.cc',
'test-javascript-arm64.cc',
'test-js-arm64-variables.cc'
],
}],
['v8_target_arch=="mipsel"', {

View File

@ -74,7 +74,7 @@
}], # ALWAYS
##############################################################################
['arch == a64', {
['arch == arm64', {
'test-api/Bug618': [PASS],
@ -86,16 +86,16 @@
# BUG(v8:3155).
'test-strings/AsciiArrayJoin': [PASS, ['mode == debug', FAIL]],
}], # 'arch == a64'
}], # 'arch == arm64'
['arch == a64 and simulator_run == True', {
['arch == arm64 and simulator_run == True', {
# Pass but take too long with the simulator.
'test-api/ExternalArrays': [PASS, TIMEOUT],
'test-api/Threading1': [SKIP],
}], # 'arch == a64 and simulator_run == True'
}], # 'arch == arm64 and simulator_run == True'
['arch == a64 and mode == debug and simulator_run == True', {
['arch == arm64 and mode == debug and simulator_run == True', {
# Pass but take too long with the simulator in debug mode.
'test-api/ExternalDoubleArray': [SKIP],
@ -105,7 +105,7 @@
'test-api/Float32Array': [SKIP],
'test-api/Float64Array': [SKIP],
'test-debug/DebugBreakLoop': [SKIP],
}], # 'arch == a64 and mode == debug and simulator_run == True'
}], # 'arch == arm64 and mode == debug and simulator_run == True'
##############################################################################
['asan == True', {

View File

@ -34,12 +34,12 @@
#include "v8.h"
#include "macro-assembler.h"
#include "a64/simulator-a64.h"
#include "a64/decoder-a64-inl.h"
#include "a64/disasm-a64.h"
#include "a64/utils-a64.h"
#include "arm64/simulator-arm64.h"
#include "arm64/decoder-arm64-inl.h"
#include "arm64/disasm-arm64.h"
#include "arm64/utils-arm64.h"
#include "cctest.h"
#include "test-utils-a64.h"
#include "test-utils-arm64.h"
using namespace v8::internal;
@ -70,7 +70,7 @@ using namespace v8::internal;
//
// Once the test has been run all integer and floating point registers as well
// as flags are accessible through a RegisterDump instance, see
// utils-a64.cc for more info on RegisterDump.
// utils-arm64.cc for more info on RegisterDump.
//
// We provide some helper assert to handle common cases:
//
@ -6299,7 +6299,8 @@ TEST(fcvt_ds) {
ASSERT_EQUAL_FP64(FLT_MAX, d11);
ASSERT_EQUAL_FP64(FLT_MIN, d12);
// Check that the NaN payload is preserved according to A64 conversion rules:
// Check that the NaN payload is preserved according to ARM64 conversion
// rules:
// - The sign bit is preserved.
// - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
// - The remaining mantissa bits are copied until they run out.

View File

@ -32,11 +32,11 @@
#include "v8.h"
#include "macro-assembler.h"
#include "a64/assembler-a64.h"
#include "a64/macro-assembler-a64.h"
#include "a64/decoder-a64-inl.h"
#include "a64/disasm-a64.h"
#include "a64/utils-a64.h"
#include "arm64/assembler-arm64.h"
#include "arm64/macro-assembler-arm64.h"
#include "arm64/decoder-arm64-inl.h"
#include "arm64/disasm-arm64.h"
#include "arm64/utils-arm64.h"
using namespace v8::internal;

View File

@ -25,9 +25,9 @@
#include <stdlib.h>
#include "cctest.h"
#include "a64/decoder-a64.h"
#include "a64/decoder-a64-inl.h"
#include "a64/disasm-a64.h"
#include "arm64/decoder-arm64.h"
#include "arm64/decoder-arm64-inl.h"
#include "arm64/disasm-arm64.h"
using namespace v8::internal;

View File

@ -96,9 +96,9 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
StringHelper::GenerateHashGetHash(masm, r0);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
#elif V8_TARGET_ARCH_A64
// The A64 assembler usually uses jssp (x28) as a stack pointer, but only csp
// is initialized by the calling (C++) code.
#elif V8_TARGET_ARCH_ARM64
// The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
// csp is initialized by the calling (C++) code.
Register old_stack_pointer = __ StackPointer();
__ SetStackPointer(csp);
__ Push(root, xzr);
@ -158,9 +158,9 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ GetNumberHash(r0, ip);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
#elif V8_TARGET_ARCH_A64
// The A64 assembler usually uses jssp (x28) as a stack pointer, but only csp
// is initialized by the calling (C++) code.
#elif V8_TARGET_ARCH_ARM64
// The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
// csp is initialized by the calling (C++) code.
Register old_stack_pointer = __ StackPointer();
__ SetStackPointer(csp);
__ Push(root, xzr);

View File

@ -190,7 +190,7 @@ TEST(HeapObjects) {
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_A64)
#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = heap->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
CHECK(value->IsHeapNumber());

View File

@ -49,10 +49,10 @@
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
#if V8_TARGET_ARCH_A64
#include "a64/assembler-a64.h"
#include "a64/macro-assembler-a64.h"
#include "a64/regexp-macro-assembler-a64.h"
#if V8_TARGET_ARCH_ARM64
#include "arm64/assembler-arm64.h"
#include "arm64/macro-assembler-arm64.h"
#include "arm64/regexp-macro-assembler-arm64.h"
#endif
#if V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips.h"
@ -694,8 +694,8 @@ typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_A64
typedef RegExpMacroAssemblerA64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM64
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#endif

View File

@ -28,9 +28,9 @@
#include "v8.h"
#include "macro-assembler.h"
#include "a64/utils-a64.h"
#include "arm64/utils-arm64.h"
#include "cctest.h"
#include "test-utils-a64.h"
#include "test-utils-arm64.h"
using namespace v8::internal;

View File

@ -25,14 +25,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_TEST_UTILS_A64_H_
#define V8_A64_TEST_UTILS_A64_H_
#ifndef V8_ARM64_TEST_UTILS_ARM64_H_
#define V8_ARM64_TEST_UTILS_ARM64_H_
#include "v8.h"
#include "macro-assembler.h"
#include "a64/macro-assembler-a64.h"
#include "a64/utils-a64.h"
#include "arm64/macro-assembler-arm64.h"
#include "arm64/utils-arm64.h"
#include "cctest.h"
@ -230,4 +230,4 @@ void ClobberFP(MacroAssembler* masm, RegList reg_list,
// Clobber or ClobberFP functions.
void Clobber(MacroAssembler* masm, CPURegList reg_list);
#endif // V8_A64_TEST_UTILS_A64_H_
#endif // V8_ARM64_TEST_UTILS_ARM64_H_

View File

@ -25,9 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// We change the stack size for the A64 simulator because at one point this test
// enters an infinite recursion which goes through the runtime and we overflow
// the system stack before the simulator stack.
// We change the stack size for the ARM64 simulator because at one point this
// test enters an infinite recursion which goes through the runtime and we
// overflow the system stack before the simulator stack.
// Flags: --harmony-proxies --sim-stack-size=500

View File

@ -138,7 +138,7 @@
}], # 'gc_stress == True'
##############################################################################
['arch == a64', {
['arch == arm64', {
# Requires bigger stack size in the Genesis and if stack size is increased,
# the test requires too much time to run. However, the problem test covers
@ -146,7 +146,8 @@
'regress/regress-1132': [SKIP],
# Pass but take too long to run. Skip.
# Some similar tests (with fewer iterations) may be included in a64-js tests.
# Some similar tests (with fewer iterations) may be included in arm64-js
# tests.
'compiler/regress-arguments': [SKIP],
'compiler/regress-gvn': [SKIP],
'compiler/regress-max-locals-for-osr': [SKIP],
@ -209,9 +210,9 @@
'unicodelctest-no-optimization': [PASS, SLOW],
'unicodelctest': [PASS, SLOW],
'unicode-test': [PASS, SLOW],
}], # 'arch == a64'
}], # 'arch == arm64'
['arch == a64 and mode == debug and simulator_run == True', {
['arch == arm64 and mode == debug and simulator_run == True', {
# Pass but take too long with the simulator in debug mode.
'array-sort': [PASS, TIMEOUT],
@ -221,7 +222,7 @@
'harmony/symbols': [SKIP],
# Issue 3219:
'getters-on-elements': [PASS, ['gc_stress == True', FAIL]],
}], # 'arch == a64 and mode == debug and simulator_run == True'
}], # 'arch == arm64 and mode == debug and simulator_run == True'
##############################################################################
['asan == True', {

View File

@ -828,7 +828,7 @@
}], # ALWAYS
['arch == arm or arch == a64', {
['arch == arm or arch == arm64', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
@ -845,13 +845,13 @@
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == arm or arch == a64'
}], # 'arch == arm or arch == arm64'
['arch == a64', {
['arch == arm64', {
# BUG(v8:3152): Runs out of stack in debug mode.
'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
}], # 'arch == a64'
}], # 'arch == arm64'
['arch == mipsel', {
@ -873,7 +873,7 @@
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel'
['arch == a64 and simulator_run == True', {
['arch == arm64 and simulator_run == True', {
'js1_5/GC/regress-203278-2': [SKIP],
@ -892,5 +892,5 @@
'js1_5/extensions/regress-330569': [SKIP],
'js1_5/extensions/regress-351448': [SKIP],
'js1_5/extensions/regress-336410-1': [SKIP],
}], # 'arch == a64 and simulator_run == True'
}], # 'arch == arm64 and simulator_run == True'
]

View File

@ -99,7 +99,7 @@
'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
['arch == arm or arch == mipsel or arch == a64', {
['arch == arm or arch == mipsel or arch == arm64', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.
@ -112,5 +112,5 @@
'S15.1.3.2_A2.5_T1': [SKIP],
'S15.1.3.3_A2.3_T1': [SKIP],
'S15.1.3.4_A2.3_T1': [SKIP],
}], # 'arch == arm or arch == mipsel or arch == a64'
}], # 'arch == arm or arch == mipsel or arch == arm64'
]

View File

@ -51,7 +51,7 @@
['simulator', {
'function-apply-aliased': [SKIP],
}], # 'simulator'
['arch == a64 and simulator_run == True', {
['arch == arm64 and simulator_run == True', {
'dfg-int-overflow-in-loop': [SKIP],
}], # 'arch == a64 and simulator_run == True'
}], # 'arch == arm64 and simulator_run == True'
]

View File

@ -30,8 +30,8 @@
# This script reads in CSV formatted instruction data, and draws a stacked
# graph in png format.
defaultfile=a64_inst.csv
defaultout=a64_inst.png
defaultfile=arm64_inst.csv
defaultout=arm64_inst.png
gnuplot=/usr/bin/gnuplot

View File

@ -645,50 +645,50 @@
'../../src/arm/stub-cache-arm.cc',
],
}],
['v8_target_arch=="a64" or v8_target_arch=="arm64"', {
'sources': [ ### gcmole(arch:a64) ###
'../../src/a64/assembler-a64.cc',
'../../src/a64/assembler-a64.h',
'../../src/a64/assembler-a64-inl.h',
'../../src/a64/builtins-a64.cc',
'../../src/a64/codegen-a64.cc',
'../../src/a64/codegen-a64.h',
'../../src/a64/code-stubs-a64.cc',
'../../src/a64/code-stubs-a64.h',
'../../src/a64/constants-a64.h',
'../../src/a64/cpu-a64.cc',
'../../src/a64/cpu-a64.h',
'../../src/a64/debug-a64.cc',
'../../src/a64/decoder-a64.cc',
'../../src/a64/decoder-a64.h',
'../../src/a64/decoder-a64-inl.h',
'../../src/a64/deoptimizer-a64.cc',
'../../src/a64/disasm-a64.cc',
'../../src/a64/disasm-a64.h',
'../../src/a64/frames-a64.cc',
'../../src/a64/frames-a64.h',
'../../src/a64/full-codegen-a64.cc',
'../../src/a64/ic-a64.cc',
'../../src/a64/instructions-a64.cc',
'../../src/a64/instructions-a64.h',
'../../src/a64/instrument-a64.cc',
'../../src/a64/instrument-a64.h',
'../../src/a64/lithium-a64.cc',
'../../src/a64/lithium-a64.h',
'../../src/a64/lithium-codegen-a64.cc',
'../../src/a64/lithium-codegen-a64.h',
'../../src/a64/lithium-gap-resolver-a64.cc',
'../../src/a64/lithium-gap-resolver-a64.h',
'../../src/a64/macro-assembler-a64.cc',
'../../src/a64/macro-assembler-a64.h',
'../../src/a64/macro-assembler-a64-inl.h',
'../../src/a64/regexp-macro-assembler-a64.cc',
'../../src/a64/regexp-macro-assembler-a64.h',
'../../src/a64/simulator-a64.cc',
'../../src/a64/simulator-a64.h',
'../../src/a64/stub-cache-a64.cc',
'../../src/a64/utils-a64.cc',
'../../src/a64/utils-a64.h',
['v8_target_arch=="arm64"', {
'sources': [ ### gcmole(arch:arm64) ###
'../../src/arm64/assembler-arm64.cc',
'../../src/arm64/assembler-arm64.h',
'../../src/arm64/assembler-arm64-inl.h',
'../../src/arm64/builtins-arm64.cc',
'../../src/arm64/codegen-arm64.cc',
'../../src/arm64/codegen-arm64.h',
'../../src/arm64/code-stubs-arm64.cc',
'../../src/arm64/code-stubs-arm64.h',
'../../src/arm64/constants-arm64.h',
'../../src/arm64/cpu-arm64.cc',
'../../src/arm64/cpu-arm64.h',
'../../src/arm64/debug-arm64.cc',
'../../src/arm64/decoder-arm64.cc',
'../../src/arm64/decoder-arm64.h',
'../../src/arm64/decoder-arm64-inl.h',
'../../src/arm64/deoptimizer-arm64.cc',
'../../src/arm64/disasm-arm64.cc',
'../../src/arm64/disasm-arm64.h',
'../../src/arm64/frames-arm64.cc',
'../../src/arm64/frames-arm64.h',
'../../src/arm64/full-codegen-arm64.cc',
'../../src/arm64/ic-arm64.cc',
'../../src/arm64/instructions-arm64.cc',
'../../src/arm64/instructions-arm64.h',
'../../src/arm64/instrument-arm64.cc',
'../../src/arm64/instrument-arm64.h',
'../../src/arm64/lithium-arm64.cc',
'../../src/arm64/lithium-arm64.h',
'../../src/arm64/lithium-codegen-arm64.cc',
'../../src/arm64/lithium-codegen-arm64.h',
'../../src/arm64/lithium-gap-resolver-arm64.cc',
'../../src/arm64/lithium-gap-resolver-arm64.h',
'../../src/arm64/macro-assembler-arm64.cc',
'../../src/arm64/macro-assembler-arm64.h',
'../../src/arm64/macro-assembler-arm64-inl.h',
'../../src/arm64/regexp-macro-assembler-arm64.cc',
'../../src/arm64/regexp-macro-assembler-arm64.h',
'../../src/arm64/simulator-arm64.cc',
'../../src/arm64/simulator-arm64.h',
'../../src/arm64/stub-cache-arm64.cc',
'../../src/arm64/utils-arm64.cc',
'../../src/arm64/utils-arm64.h',
],
}],
['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {

View File

@ -82,7 +82,6 @@ SUPPORTED_ARCHS = ["android_arm",
"nacl_ia32",
"nacl_x64",
"x64",
"a64",
"arm64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
@ -91,7 +90,7 @@ SLOW_ARCHS = ["android_arm",
"mipsel",
"nacl_ia32",
"nacl_x64",
"a64"]
"arm64"]
def BuildOptions():
@ -353,8 +352,6 @@ def Main():
for (arch, mode) in options.arch_and_mode:
try:
if arch == "arm64":
arch = "a64"
code = Execute(arch, mode, args, options, suites, workspace)
except KeyboardInterrupt:
return 2
@ -398,7 +395,7 @@ def Execute(arch, mode, args, options, suites, workspace):
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
arch in ['a64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,

View File

@ -52,7 +52,7 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_ia32", "arm", "a64",
for var in ["debug", "release", "android_arm", "android_ia32", "arm", "arm64",
"ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos",
"windows", "linux"]:
VARIABLES[var] = var

Some files were not shown because too many files have changed in this diff Show More