MIPS64: Add big-endian support for mips64.
TEST= BUG= Review URL: https://codereview.chromium.org/1334793004 Cr-Commit-Position: refs/heads/master@{#31011}
This commit is contained in:
parent
ddafe2c494
commit
8bd431de92
2
Makefile
2
Makefile
@ -244,7 +244,7 @@ endif
|
||||
|
||||
# Architectures and modes to be compiled. Consider these to be internal
|
||||
# variables, don't override them (use the targets instead).
|
||||
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 ppc ppc64
|
||||
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64
|
||||
DEFAULT_ARCHES = ia32 x64 arm
|
||||
MODES = release debug optdebug
|
||||
DEFAULT_MODES = release debug
|
||||
|
@ -341,7 +341,8 @@
|
||||
'cflags': ['-march=i586'],
|
||||
}], # v8_target_arch=="x87"
|
||||
['(v8_target_arch=="mips" or v8_target_arch=="mipsel" \
|
||||
or v8_target_arch=="mips64el") and v8_target_arch==target_arch', {
|
||||
or v8_target_arch=="mips64" or v8_target_arch=="mips64el") \
|
||||
and v8_target_arch==target_arch', {
|
||||
'target_conditions': [
|
||||
['_toolset=="target"', {
|
||||
# Target built with a Mips CXX compiler.
|
||||
@ -743,7 +744,7 @@
|
||||
}],
|
||||
],
|
||||
}], # v8_target_arch=="mipsel"
|
||||
['v8_target_arch=="mips64el"', {
|
||||
['v8_target_arch=="mips64el" or v8_target_arch=="mips64"', {
|
||||
'defines': [
|
||||
'V8_TARGET_ARCH_MIPS64',
|
||||
],
|
||||
@ -753,6 +754,16 @@
|
||||
'CAN_USE_FPU_INSTRUCTIONS',
|
||||
],
|
||||
}],
|
||||
[ 'v8_host_byteorder=="little"', {
|
||||
'defines': [
|
||||
'V8_TARGET_ARCH_MIPS64_LE',
|
||||
],
|
||||
}],
|
||||
[ 'v8_host_byteorder=="big"', {
|
||||
'defines': [
|
||||
'V8_TARGET_ARCH_MIPS64_BE',
|
||||
],
|
||||
}],
|
||||
[ 'v8_use_mips_abi_hardfloat=="true"', {
|
||||
'defines': [
|
||||
'__mips_hard_float=1',
|
||||
@ -769,11 +780,17 @@
|
||||
'conditions': [
|
||||
['v8_target_arch==target_arch', {
|
||||
'cflags': [
|
||||
'-EL',
|
||||
'-Wno-error=array-bounds', # Workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56273
|
||||
],
|
||||
'ldflags': ['-EL'],
|
||||
'conditions': [
|
||||
['v8_target_arch=="mips64el"', {
|
||||
'cflags': ['-EL'],
|
||||
'ldflags': ['-EL'],
|
||||
}],
|
||||
['v8_target_arch=="mips64"', {
|
||||
'cflags': ['-EB'],
|
||||
'ldflags': ['-EB'],
|
||||
}],
|
||||
[ 'v8_use_mips_abi_hardfloat=="true"', {
|
||||
'cflags': ['-mhard-float'],
|
||||
'ldflags': ['-mhard-float'],
|
||||
|
@ -164,7 +164,11 @@
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#endif
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#if defined(__MIPSEB__) || defined(V8_TARGET_ARCH_MIPS64_BE)
|
||||
#define V8_TARGET_BIG_ENDIAN 1
|
||||
#else
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#endif
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#elif V8_TARGET_ARCH_PPC_LE
|
||||
|
@ -361,7 +361,7 @@ void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
|
||||
DCHECK(!map_reg.is(scratch));
|
||||
__ LoadWeakValue(map_reg, cell, miss);
|
||||
if (transition->CanBeDeprecated()) {
|
||||
__ ld(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
|
||||
__ lwu(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
|
||||
__ And(at, scratch, Operand(Map::Deprecated::kMask));
|
||||
__ Branch(miss, ne, at, Operand(zero_reg));
|
||||
}
|
||||
|
@ -551,7 +551,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
__ Daddu(address, elements,
|
||||
Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) -
|
||||
Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
|
||||
kHeapObjectTag));
|
||||
__ SmiScale(at, key, kPointerSizeLog2);
|
||||
__ daddu(address, address, at);
|
||||
|
@ -79,6 +79,16 @@ struct Register {
|
||||
static const int kSizeInBytes = 8;
|
||||
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
|
||||
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
static const int kMantissaOffset = 0;
|
||||
static const int kExponentOffset = 4;
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
static const int kMantissaOffset = 4;
|
||||
static const int kExponentOffset = 0;
|
||||
#else
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
|
||||
inline static int NumAllocatableRegisters();
|
||||
|
||||
static int ToAllocationIndex(Register reg) {
|
||||
|
@ -182,8 +182,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Register input_high = scratch2;
|
||||
Register input_low = scratch3;
|
||||
|
||||
__ lw(input_low, MemOperand(input_reg, double_offset));
|
||||
__ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
|
||||
__ lw(input_low,
|
||||
MemOperand(input_reg, double_offset + Register::kMantissaOffset));
|
||||
__ lw(input_high,
|
||||
MemOperand(input_reg, double_offset + Register::kExponentOffset));
|
||||
|
||||
Label normal_exponent, restore_sign;
|
||||
// Extract the biased exponent in result.
|
||||
@ -3309,7 +3311,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
|
||||
Label not_string, slow_string;
|
||||
__ Branch(¬_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
|
||||
// Check if string has a cached array index.
|
||||
__ ld(a2, FieldMemOperand(a0, String::kHashFieldOffset));
|
||||
__ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
|
||||
__ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
|
||||
__ Branch(&slow_string, ne, at, Operand(zero_reg));
|
||||
__ IndexFromHash(a2, a0);
|
||||
@ -5602,8 +5604,8 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
|
||||
// --
|
||||
// -- sp[0] : last argument
|
||||
// -- ...
|
||||
// -- sp[(argc - 1)* 4] : first argument
|
||||
// -- sp[argc * 4] : receiver
|
||||
// -- sp[(argc - 1)* 8] : first argument
|
||||
// -- sp[argc * 8] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
Register callee = a0;
|
||||
@ -5662,10 +5664,12 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
|
||||
Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
|
||||
__ sd(at, MemOperand(a0, 1 * kPointerSize));
|
||||
// FunctionCallbackInfo::length_ = argc
|
||||
// Stored as int field, 32-bit integers within struct on stack always left
|
||||
// justified by n64 ABI.
|
||||
__ li(at, Operand(argc.immediate()));
|
||||
__ sd(at, MemOperand(a0, 2 * kPointerSize));
|
||||
__ sw(at, MemOperand(a0, 2 * kPointerSize));
|
||||
// FunctionCallbackInfo::is_construct_call_ = 0
|
||||
__ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
|
||||
__ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
|
||||
} else {
|
||||
// FunctionCallbackInfo::values_
|
||||
__ dsll(at, argc.reg(), kPointerSizeLog2);
|
||||
@ -5673,11 +5677,13 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
|
||||
__ Daddu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
|
||||
__ sd(at, MemOperand(a0, 1 * kPointerSize));
|
||||
// FunctionCallbackInfo::length_ = argc
|
||||
__ sd(argc.reg(), MemOperand(a0, 2 * kPointerSize));
|
||||
// Stored as int field, 32-bit integers within struct on stack always left
|
||||
// justified by n64 ABI.
|
||||
__ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
|
||||
// FunctionCallbackInfo::is_construct_call_
|
||||
__ Daddu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
|
||||
__ dsll(at, argc.reg(), kPointerSizeLog2);
|
||||
__ sd(at, MemOperand(a0, 3 * kPointerSize));
|
||||
__ sw(at, MemOperand(a0, 2 * kPointerSize + kIntSize));
|
||||
}
|
||||
|
||||
ExternalReference thunk_ref =
|
||||
|
@ -45,21 +45,13 @@ UnaryMathFunction CreateExpFunction() {
|
||||
Register temp2 = a5;
|
||||
Register temp3 = a6;
|
||||
|
||||
if (!IsMipsSoftFloatABI) {
|
||||
// Input value is in f12 anyway, nothing to do.
|
||||
} else {
|
||||
__ Move(input, a0, a1);
|
||||
}
|
||||
__ MovFromFloatParameter(input);
|
||||
__ Push(temp3, temp2, temp1);
|
||||
MathExpGenerator::EmitMathExp(
|
||||
&masm, input, result, double_scratch1, double_scratch2,
|
||||
temp1, temp2, temp3);
|
||||
__ Pop(temp3, temp2, temp1);
|
||||
if (!IsMipsSoftFloatABI) {
|
||||
// Result is already in f0, nothing to do.
|
||||
} else {
|
||||
__ Move(v0, v1, result);
|
||||
}
|
||||
__ MovToFloatResult(result);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
@ -141,10 +133,17 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
|
||||
__ beq(a3, zero_reg, &aligned); // Already aligned.
|
||||
__ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
|
||||
|
||||
__ lwr(t8, MemOperand(a1));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swr(t8, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(t8, MemOperand(a1));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swr(t8, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
} else {
|
||||
__ lwl(t8, MemOperand(a1));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swl(t8, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
}
|
||||
|
||||
// Now dst/src are both aligned to (word) aligned addresses. Set a2 to
|
||||
// count how many bytes we have to copy after all the 64 byte chunks are
|
||||
@ -297,12 +296,21 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
|
||||
__ beq(a3, zero_reg, &ua_chk16w);
|
||||
__ subu(a2, a2, a3); // In delay slot.
|
||||
|
||||
__ lwr(v1, MemOperand(a1));
|
||||
__ lwl(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swr(v1, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(v1, MemOperand(a1));
|
||||
__ lwl(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swr(v1, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
} else {
|
||||
__ lwl(v1, MemOperand(a1));
|
||||
__ lwr(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swl(v1, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
}
|
||||
|
||||
// Now the dst (but not the source) is aligned. Set a2 to count how many
|
||||
// bytes we have to copy after all the 64 byte chunks are copied and a3 to
|
||||
@ -330,41 +338,79 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
|
||||
}
|
||||
|
||||
__ bind(&ua_loop16w);
|
||||
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
|
||||
__ lwr(a4, MemOperand(a1));
|
||||
__ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
|
||||
if (kArchEndian == kLittle) {
|
||||
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
|
||||
__ lwr(a4, MemOperand(a1));
|
||||
__ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
|
||||
|
||||
if (pref_hint_store == kPrefHintPrepareForStore) {
|
||||
__ sltu(v1, t9, a0);
|
||||
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
|
||||
if (pref_hint_store == kPrefHintPrepareForStore) {
|
||||
__ sltu(v1, t9, a0);
|
||||
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
|
||||
}
|
||||
__ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
|
||||
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
|
||||
|
||||
__ bind(&ua_skip_pref);
|
||||
__ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwl(a4,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a5,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a6,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a7,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
} else {
|
||||
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
|
||||
__ lwl(a4, MemOperand(a1));
|
||||
__ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
|
||||
|
||||
if (pref_hint_store == kPrefHintPrepareForStore) {
|
||||
__ sltu(v1, t9, a0);
|
||||
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
|
||||
}
|
||||
__ lwl(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
|
||||
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
|
||||
|
||||
__ bind(&ua_skip_pref);
|
||||
__ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwr(a4,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a5,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a6,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a7,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t0,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t1,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t2,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t3,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
}
|
||||
__ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
|
||||
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
|
||||
|
||||
__ bind(&ua_skip_pref);
|
||||
__ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwl(a4,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a5,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a6,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a7,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
|
||||
__ sw(a4, MemOperand(a0));
|
||||
__ sw(a5, MemOperand(a0, 1, loadstore_chunk));
|
||||
@ -374,30 +420,57 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
|
||||
__ sw(t1, MemOperand(a0, 5, loadstore_chunk));
|
||||
__ sw(t2, MemOperand(a0, 6, loadstore_chunk));
|
||||
__ sw(t3, MemOperand(a0, 7, loadstore_chunk));
|
||||
__ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
|
||||
__ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
|
||||
__ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
|
||||
__ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
|
||||
__ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
|
||||
__ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
|
||||
__ lwl(a4,
|
||||
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a5,
|
||||
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a6,
|
||||
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a7,
|
||||
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
|
||||
__ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
|
||||
__ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
|
||||
__ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
|
||||
__ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
|
||||
__ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
|
||||
__ lwl(a4,
|
||||
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a5,
|
||||
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a6,
|
||||
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a7,
|
||||
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
} else {
|
||||
__ lwl(a4, MemOperand(a1, 8, loadstore_chunk));
|
||||
__ lwl(a5, MemOperand(a1, 9, loadstore_chunk));
|
||||
__ lwl(a6, MemOperand(a1, 10, loadstore_chunk));
|
||||
__ lwl(a7, MemOperand(a1, 11, loadstore_chunk));
|
||||
__ lwl(t0, MemOperand(a1, 12, loadstore_chunk));
|
||||
__ lwl(t1, MemOperand(a1, 13, loadstore_chunk));
|
||||
__ lwl(t2, MemOperand(a1, 14, loadstore_chunk));
|
||||
__ lwl(t3, MemOperand(a1, 15, loadstore_chunk));
|
||||
__ lwr(a4,
|
||||
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a5,
|
||||
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a6,
|
||||
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a7,
|
||||
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t0,
|
||||
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t1,
|
||||
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t2,
|
||||
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t3,
|
||||
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
}
|
||||
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
|
||||
__ sw(a4, MemOperand(a0, 8, loadstore_chunk));
|
||||
__ sw(a5, MemOperand(a0, 9, loadstore_chunk));
|
||||
@ -421,30 +494,57 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
|
||||
|
||||
__ beq(a2, t8, &ua_chk1w);
|
||||
__ nop(); // In delay slot.
|
||||
__ lwr(a4, MemOperand(a1));
|
||||
__ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
|
||||
__ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
|
||||
__ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwl(a4,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a5,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a6,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a7,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(a4, MemOperand(a1));
|
||||
__ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
|
||||
__ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
|
||||
__ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwl(a4,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a5,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a6,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(a7,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
} else {
|
||||
__ lwl(a4, MemOperand(a1));
|
||||
__ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
|
||||
__ lwl(a7, MemOperand(a1, 3, loadstore_chunk));
|
||||
__ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwr(a4,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a5,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a6,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(a7,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t0,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t1,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t2,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t3,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
}
|
||||
__ addiu(a1, a1, 8 * loadstore_chunk);
|
||||
__ sw(a4, MemOperand(a0));
|
||||
__ sw(a5, MemOperand(a0, 1, loadstore_chunk));
|
||||
@ -465,9 +565,15 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
|
||||
__ addu(a3, a0, a3);
|
||||
|
||||
__ bind(&ua_wordCopy_loop);
|
||||
__ lwr(v1, MemOperand(a1));
|
||||
__ lwl(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(v1, MemOperand(a1));
|
||||
__ lwl(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
} else {
|
||||
__ lwl(v1, MemOperand(a1));
|
||||
__ lwr(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
}
|
||||
__ addiu(a0, a0, loadstore_chunk);
|
||||
__ addiu(a1, a1, loadstore_chunk);
|
||||
__ bne(a0, a3, &ua_wordCopy_loop);
|
||||
@ -696,7 +802,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
// Convert and copy elements.
|
||||
__ bind(&loop);
|
||||
__ ld(scratch2, MemOperand(scratch1));
|
||||
__ Daddu(scratch1, scratch1, kIntSize);
|
||||
__ Daddu(scratch1, scratch1, kPointerSize);
|
||||
// scratch2: current element
|
||||
__ JumpIfNotSmi(scratch2, &convert_hole);
|
||||
__ SmiUntag(scratch2);
|
||||
@ -717,9 +823,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
|
||||
}
|
||||
// mantissa
|
||||
__ sw(hole_lower, MemOperand(scratch3));
|
||||
__ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
|
||||
// exponent
|
||||
__ sw(hole_upper, MemOperand(scratch3, kIntSize));
|
||||
__ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
|
||||
__ Daddu(scratch3, scratch3, kDoubleSize);
|
||||
|
||||
__ bind(&entry);
|
||||
@ -786,7 +892,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
Register dst_end = length;
|
||||
Register heap_number_map = scratch;
|
||||
__ Daddu(src_elements, src_elements,
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
|
||||
__ SmiScale(dst_end, dst_end, kPointerSizeLog2);
|
||||
__ Daddu(dst_end, dst_elements, dst_end);
|
||||
@ -824,10 +930,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
|
||||
__ bind(&loop);
|
||||
Register upper_bits = key;
|
||||
__ lw(upper_bits, MemOperand(src_elements));
|
||||
__ lw(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
|
||||
__ Daddu(src_elements, src_elements, kDoubleSize);
|
||||
// upper_bits: current element's upper 32 bit
|
||||
// src_elements: address of next element's upper 32 bit
|
||||
// src_elements: address of next element
|
||||
__ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
@ -837,11 +943,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
__ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
|
||||
&gc_required);
|
||||
// heap_number: new heap number
|
||||
// Load mantissa of current element, src_elements
|
||||
// point to exponent of next element.
|
||||
__ lw(scratch2, MemOperand(heap_number, -12));
|
||||
__ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
|
||||
__ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
|
||||
// Load current element, src_elements point to next element.
|
||||
|
||||
__ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
|
||||
__ sd(scratch2, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
|
||||
|
||||
__ mov(scratch2, dst_elements);
|
||||
__ sd(heap_number, MemOperand(dst_elements));
|
||||
__ Daddu(dst_elements, dst_elements, kPointerSize);
|
||||
@ -1045,8 +1151,8 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
||||
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
|
||||
__ dsll(at, temp2, 3);
|
||||
__ Daddu(temp3, temp3, Operand(at));
|
||||
__ lwu(temp2, MemOperand(temp3, 0));
|
||||
__ lwu(temp3, MemOperand(temp3, kIntSize));
|
||||
__ lwu(temp2, MemOperand(temp3, Register::kMantissaOffset));
|
||||
__ lwu(temp3, MemOperand(temp3, Register::kExponentOffset));
|
||||
// The first word is loaded is the lower number register.
|
||||
if (temp2.code() < temp3.code()) {
|
||||
__ dsll(at, temp1, 20);
|
||||
|
@ -35,6 +35,16 @@ enum ArchVariants {
|
||||
#endif
|
||||
|
||||
|
||||
enum Endianness { kLittle, kBig };
|
||||
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
static const Endianness kArchEndian = kLittle;
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
static const Endianness kArchEndian = kBig;
|
||||
#else
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
|
||||
// TODO(plind): consider deriving ABI from compiler flags or build system.
|
||||
|
||||
// ABI-dependent definitions are made with #define in simulator-mips64.h,
|
||||
|
@ -2981,7 +2981,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
||||
// Read int value directly from upper half of the smi.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
|
||||
offset += kPointerSize / 2;
|
||||
offset = SmiWordOffset(offset);
|
||||
representation = Representation::Integer32();
|
||||
}
|
||||
__ Load(result, FieldMemOperand(object, offset), representation);
|
||||
@ -3255,7 +3255,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
||||
// Read int value directly from upper half of the smi.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
|
||||
offset += kPointerSize / 2;
|
||||
offset = SmiWordOffset(offset);
|
||||
}
|
||||
|
||||
__ Load(result, MemOperand(store_base, offset), representation);
|
||||
@ -4202,7 +4202,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
||||
__ AssertSmi(scratch2);
|
||||
}
|
||||
// Store int value directly to upper half of the smi.
|
||||
offset += kPointerSize / 2;
|
||||
offset = SmiWordOffset(offset);
|
||||
representation = Representation::Integer32();
|
||||
}
|
||||
MemOperand operand = FieldMemOperand(destination, offset);
|
||||
@ -4468,7 +4468,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
||||
// Store int value directly to upper half of the smi.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
|
||||
offset += kPointerSize / 2;
|
||||
offset = SmiWordOffset(offset);
|
||||
representation = Representation::Integer32();
|
||||
}
|
||||
|
||||
|
@ -1209,7 +1209,24 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
|
||||
// Assert fail if the offset from start of object IS actually aligned.
|
||||
// ONLY use with known misalignment, since there is performance cost.
|
||||
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
|
||||
// TODO(plind): endian dependency.
|
||||
if (kArchEndian == kLittle) {
|
||||
lwu(rd, rs);
|
||||
lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
||||
dsll32(scratch, scratch, 0);
|
||||
} else {
|
||||
lw(rd, rs);
|
||||
lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
||||
dsll32(rd, rd, 0);
|
||||
}
|
||||
Daddu(rd, rd, scratch);
|
||||
}
|
||||
|
||||
|
||||
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
|
||||
// bits,
|
||||
// second word in high bits.
|
||||
void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
|
||||
Register scratch) {
|
||||
lwu(rd, rs);
|
||||
lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
||||
dsll32(scratch, scratch, 0);
|
||||
@ -1223,7 +1240,21 @@ void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
|
||||
// Assert fail if the offset from start of object IS actually aligned.
|
||||
// ONLY use with known misalignment, since there is performance cost.
|
||||
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
|
||||
// TODO(plind): endian dependency.
|
||||
if (kArchEndian == kLittle) {
|
||||
sw(rd, rs);
|
||||
dsrl32(scratch, rd, 0);
|
||||
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
||||
} else {
|
||||
sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
||||
dsrl32(scratch, rd, 0);
|
||||
sw(scratch, rs);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
|
||||
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
|
||||
Register scratch) {
|
||||
sw(rd, rs);
|
||||
dsrl32(scratch, rd, 0);
|
||||
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
||||
@ -3774,21 +3805,39 @@ void MacroAssembler::CopyBytes(Register src,
|
||||
|
||||
// TODO(kalmard) check if this can be optimized to use sw in most cases.
|
||||
// Can't use unaligned access - copy byte by byte.
|
||||
sb(scratch, MemOperand(dst, 0));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 1));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 2));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 3));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 4));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 5));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 6));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 7));
|
||||
if (kArchEndian == kLittle) {
|
||||
sb(scratch, MemOperand(dst, 0));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 1));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 2));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 3));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 4));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 5));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 6));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 7));
|
||||
} else {
|
||||
sb(scratch, MemOperand(dst, 7));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 6));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 5));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 4));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 3));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 2));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 1));
|
||||
dsrl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 0));
|
||||
}
|
||||
Daddu(dst, dst, 8);
|
||||
|
||||
Dsubu(length, length, Operand(kPointerSize));
|
||||
@ -3986,7 +4035,11 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
|
||||
|
||||
void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
|
||||
if (IsMipsSoftFloatABI) {
|
||||
Move(dst, v0, v1);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(dst, v0, v1);
|
||||
} else {
|
||||
Move(dst, v1, v0);
|
||||
}
|
||||
} else {
|
||||
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
|
||||
}
|
||||
@ -3995,9 +4048,13 @@ void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
|
||||
|
||||
void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
|
||||
if (IsMipsSoftFloatABI) {
|
||||
Move(dst, a0, a1);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(dst, a0, a1);
|
||||
} else {
|
||||
Move(dst, a1, a0);
|
||||
}
|
||||
} else {
|
||||
Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
|
||||
Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
|
||||
}
|
||||
}
|
||||
|
||||
@ -4006,7 +4063,11 @@ void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
|
||||
if (!IsMipsSoftFloatABI) {
|
||||
Move(f12, src);
|
||||
} else {
|
||||
Move(a0, a1, src);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(a0, a1, src);
|
||||
} else {
|
||||
Move(a1, a0, src);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4015,7 +4076,11 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
|
||||
if (!IsMipsSoftFloatABI) {
|
||||
Move(f0, src);
|
||||
} else {
|
||||
Move(v0, v1, src);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(v0, v1, src);
|
||||
} else {
|
||||
Move(v1, v0, src);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4033,8 +4098,13 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
|
||||
Move(fparg2, src2);
|
||||
}
|
||||
} else {
|
||||
Move(a0, a1, src1);
|
||||
Move(a2, a3, src2);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(a0, a1, src1);
|
||||
Move(a2, a3, src2);
|
||||
} else {
|
||||
Move(a1, a0, src1);
|
||||
Move(a3, a2, src2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5121,7 +5191,7 @@ void MacroAssembler::InitializeNewString(Register string,
|
||||
sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
|
||||
li(scratch1, Operand(String::kEmptyHashField));
|
||||
sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
|
||||
sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
|
||||
sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
|
||||
}
|
||||
|
||||
|
||||
@ -5650,8 +5720,8 @@ void MacroAssembler::HasColor(Register object,
|
||||
GetMarkBits(object, bitmap_scratch, mask_scratch);
|
||||
|
||||
Label other_color;
|
||||
// Note that we are using a 4-byte aligned 8-byte load.
|
||||
Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
||||
// Note that we are using two 4-byte aligned loads.
|
||||
LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
||||
And(t8, t9, Operand(mask_scratch));
|
||||
Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
|
||||
// Shift left 1 by adding.
|
||||
@ -5724,7 +5794,8 @@ void MacroAssembler::EnsureNotWhite(
|
||||
// Since both black and grey have a 1 in the first position and white does
|
||||
// not have a 1 there we only need to check one bit.
|
||||
// Note that we are using a 4-byte aligned 8-byte load.
|
||||
Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
||||
LoadWordPair(load_scratch,
|
||||
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
||||
And(t8, mask_scratch, load_scratch);
|
||||
Branch(&done, ne, t8, Operand(zero_reg));
|
||||
|
||||
@ -5803,14 +5874,14 @@ void MacroAssembler::EnsureNotWhite(
|
||||
bind(&is_data_object);
|
||||
// Value is a data object, and it is white. Mark it black. Since we know
|
||||
// that the object is white we can make it black by flipping one bit.
|
||||
Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
||||
LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
||||
Or(t8, t8, Operand(mask_scratch));
|
||||
Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
||||
StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
||||
|
||||
And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
|
||||
Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
|
||||
LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
|
||||
Daddu(t8, t8, Operand(length));
|
||||
Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
|
||||
StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
|
||||
|
||||
bind(&done);
|
||||
}
|
||||
@ -5823,14 +5894,14 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
|
||||
|
||||
|
||||
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
|
||||
ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
|
||||
lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
|
||||
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EnumLength(Register dst, Register map) {
|
||||
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
|
||||
ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
|
||||
lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
|
||||
And(dst, dst, Operand(Map::EnumLengthBits::kMask));
|
||||
SmiTag(dst);
|
||||
}
|
||||
|
@ -116,6 +116,13 @@ bool AreAliased(Register reg1,
|
||||
// -----------------------------------------------------------------------------
|
||||
// Static helper functions.
|
||||
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
#define SmiWordOffset(offset) (offset + kPointerSize / 2)
|
||||
#else
|
||||
#define SmiWordOffset(offset) offset
|
||||
#endif
|
||||
|
||||
|
||||
inline MemOperand ContextOperand(Register context, int index) {
|
||||
return MemOperand(context, Context::SlotOffset(index));
|
||||
}
|
||||
@ -133,9 +140,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
|
||||
|
||||
|
||||
inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
|
||||
// Assumes that Smis are shifted by 32 bits and little endianness.
|
||||
// Assumes that Smis are shifted by 32 bits.
|
||||
STATIC_ASSERT(kSmiShift == 32);
|
||||
return MemOperand(rm, offset + (kSmiShift / kBitsPerByte));
|
||||
return MemOperand(rm, SmiWordOffset(offset));
|
||||
}
|
||||
|
||||
|
||||
@ -682,6 +689,9 @@ class MacroAssembler: public Assembler {
|
||||
void Uld(Register rd, const MemOperand& rs, Register scratch = at);
|
||||
void Usd(Register rd, const MemOperand& rs, Register scratch = at);
|
||||
|
||||
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
|
||||
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
|
||||
|
||||
// Load int32 in the rd register.
|
||||
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
|
||||
inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
|
||||
|
@ -1052,18 +1052,26 @@ void Simulator::set_fpu_register(int fpureg, int64_t value) {
|
||||
|
||||
void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
|
||||
// Set ONLY lower 32-bits, leaving upper bits untouched.
|
||||
// TODO(plind): big endian issue.
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
|
||||
int32_t* pword;
|
||||
if (kArchEndian == kLittle) {
|
||||
pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
|
||||
} else {
|
||||
pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]) + 1;
|
||||
}
|
||||
*pword = value;
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
|
||||
// Set ONLY upper 32-bits, leaving lower bits untouched.
|
||||
// TODO(plind): big endian issue.
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
|
||||
int32_t* phiword;
|
||||
if (kArchEndian == kLittle) {
|
||||
phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
|
||||
} else {
|
||||
phiword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
|
||||
}
|
||||
*phiword = value;
|
||||
}
|
||||
|
||||
|
@ -493,9 +493,10 @@ class Simulator {
|
||||
|
||||
#ifdef MIPS_ABI_N64
|
||||
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
|
||||
static_cast<int>( \
|
||||
Simulator::current(Isolate::Current()) \
|
||||
->Call(entry, 10, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
|
||||
static_cast<int>(Simulator::current(Isolate::Current()) \
|
||||
->Call(entry, 10, p0, p1, p2, p3, p4, \
|
||||
reinterpret_cast<int64_t*>(p5), p6, p7, NULL, \
|
||||
p8))
|
||||
#else // Must be O32 Abi.
|
||||
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
|
||||
static_cast<int>( \
|
||||
|
@ -219,7 +219,7 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
|
||||
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
|
||||
Label not_at_start;
|
||||
// Did we start the match at the start of the string at all?
|
||||
__ lw(a0, MemOperand(frame_pointer(), kStartIndex));
|
||||
__ ld(a0, MemOperand(frame_pointer(), kStartIndex));
|
||||
BranchOrBacktrack(¬_at_start, ne, a0, Operand(zero_reg));
|
||||
|
||||
// If we did, are we still at the start of the input?
|
||||
@ -232,7 +232,7 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
|
||||
|
||||
void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
|
||||
// Did we start the match at the start of the string at all?
|
||||
__ lw(a0, MemOperand(frame_pointer(), kStartIndex));
|
||||
__ ld(a0, MemOperand(frame_pointer(), kStartIndex));
|
||||
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
|
||||
// If we did, are we still at the start of the input?
|
||||
__ ld(a1, MemOperand(frame_pointer(), kInputStart));
|
||||
@ -779,7 +779,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
|
||||
if (global()) {
|
||||
// Restart matching if the regular expression is flagged as global.
|
||||
__ ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
|
||||
__ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
|
||||
__ ld(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
|
||||
__ ld(a2, MemOperand(frame_pointer(), kRegisterOutput));
|
||||
// Increment success counter.
|
||||
__ Daddu(a0, a0, 1);
|
||||
@ -1155,8 +1155,8 @@ int64_t RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
|
||||
Address re_frame) {
|
||||
return NativeRegExpMacroAssembler::CheckStackGuardState(
|
||||
frame_entry<Isolate*>(re_frame, kIsolate),
|
||||
frame_entry<int>(re_frame, kStartIndex),
|
||||
frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
|
||||
static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
|
||||
frame_entry<int64_t>(re_frame, kDirectCall) == 1, return_address, re_code,
|
||||
frame_entry_address<String*>(re_frame, kInputString),
|
||||
frame_entry_address<const byte*>(re_frame, kInputStart),
|
||||
frame_entry_address<const byte*>(re_frame, kInputEnd));
|
||||
|
@ -276,7 +276,7 @@
|
||||
}], # 'arch == mips'
|
||||
|
||||
##############################################################################
|
||||
['arch == mips64el', {
|
||||
['arch == mips64el or arch == mips64', {
|
||||
'test-cpu-profiler/CollectDeoptEvents': [PASS, FAIL],
|
||||
|
||||
# BUG(v8:3154).
|
||||
@ -287,7 +287,7 @@
|
||||
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
|
||||
'test-serialize/DeserializeAndRunScript2': [SKIP],
|
||||
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
|
||||
}], # 'arch == mips64el'
|
||||
}], # 'arch == mips64el or arch == mips64'
|
||||
|
||||
##############################################################################
|
||||
['arch == x87', {
|
||||
|
@ -590,11 +590,19 @@ TEST(MIPS6) {
|
||||
USE(dummy);
|
||||
|
||||
CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
|
||||
CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
|
||||
CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
|
||||
CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
|
||||
CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
|
||||
if (kArchEndian == kLittle) {
|
||||
CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
|
||||
CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
|
||||
CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
|
||||
CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
|
||||
} else {
|
||||
CHECK_EQ(static_cast<int32_t>(0x1122), t.r2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xffff99aa), t.r3);
|
||||
CHECK_EQ(static_cast<int32_t>(0x000099aa), t.r4);
|
||||
CHECK_EQ(static_cast<int32_t>(0xffffff99), t.r5);
|
||||
CHECK_EQ(static_cast<int32_t>(0x99aa3333), t.r6);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1026,25 +1034,47 @@ TEST(MIPS11) {
|
||||
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
|
||||
USE(dummy);
|
||||
|
||||
CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
|
||||
if (kArchEndian == kLittle) {
|
||||
CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
|
||||
|
||||
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
|
||||
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
|
||||
|
||||
CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
|
||||
CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
|
||||
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
|
||||
} else {
|
||||
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_3);
|
||||
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_3);
|
||||
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_3);
|
||||
|
||||
CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_0);
|
||||
CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_1);
|
||||
CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_2);
|
||||
CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_3);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -184,8 +184,8 @@
|
||||
'array-constructor': [PASS, TIMEOUT],
|
||||
|
||||
# Very slow on ARM and MIPS, contains no architecture dependent code.
|
||||
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', TIMEOUT]],
|
||||
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', SKIP]],
|
||||
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', TIMEOUT]],
|
||||
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', SKIP]],
|
||||
'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', SKIP]],
|
||||
|
||||
##############################################################################
|
||||
@ -543,7 +543,7 @@
|
||||
}], # 'arch == mips'
|
||||
|
||||
##############################################################################
|
||||
['arch == mips64el', {
|
||||
['arch == mips64el or arch == mips64', {
|
||||
|
||||
# Slow tests which times out in debug mode.
|
||||
'try': [PASS, ['mode == debug', SKIP]],
|
||||
@ -591,7 +591,7 @@
|
||||
|
||||
# Currently always deopt on minus zero
|
||||
'math-floor-of-div-minus-zero': [SKIP],
|
||||
}], # 'arch == mips64el'
|
||||
}], # 'arch == mips64el or arch == mips64'
|
||||
|
||||
##############################################################################
|
||||
['system == windows', {
|
||||
|
@ -887,7 +887,7 @@
|
||||
}], # 'arch == arm64'
|
||||
|
||||
|
||||
['arch == mipsel or arch == mips64el', {
|
||||
['arch == mipsel or arch == mips64el or arch == mips64', {
|
||||
|
||||
# BUG(3251229): Times out when running new crankshaft test script.
|
||||
'ecma_3/RegExp/regress-311414': [SKIP],
|
||||
@ -904,7 +904,7 @@
|
||||
|
||||
# BUG(1040): Allow this test to timeout.
|
||||
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
|
||||
}], # 'arch == mipsel or arch == mips64el'
|
||||
}], # 'arch == mipsel or arch == mips64el or arch == mips64'
|
||||
|
||||
['arch == mipsel and simulator_run == True', {
|
||||
# Crashes due to C stack overflow.
|
||||
|
@ -746,7 +746,7 @@
|
||||
}], # no_i18n == True and mode == debug
|
||||
|
||||
|
||||
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
|
||||
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', {
|
||||
|
||||
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
|
||||
# compilation of parenthesized function literals. Needs investigation.
|
||||
|
@ -1255,7 +1255,7 @@
|
||||
'../../src/regexp/mips/regexp-macro-assembler-mips.h',
|
||||
],
|
||||
}],
|
||||
['v8_target_arch=="mips64el"', {
|
||||
['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
|
||||
'sources': [ ### gcmole(arch:mips64el) ###
|
||||
'../../src/mips64/assembler-mips64.cc',
|
||||
'../../src/mips64/assembler-mips64.h',
|
||||
|
@ -146,6 +146,7 @@ SUPPORTED_ARCHS = ["android_arm",
|
||||
"x87",
|
||||
"mips",
|
||||
"mipsel",
|
||||
"mips64",
|
||||
"mips64el",
|
||||
"nacl_ia32",
|
||||
"nacl_x64",
|
||||
@ -162,6 +163,7 @@ SLOW_ARCHS = ["android_arm",
|
||||
"arm",
|
||||
"mips",
|
||||
"mipsel",
|
||||
"mips64",
|
||||
"mips64el",
|
||||
"nacl_ia32",
|
||||
"nacl_x64",
|
||||
@ -591,7 +593,7 @@ def Execute(arch, mode, args, options, suites, workspace):
|
||||
|
||||
# TODO(all): Combine "simulator" and "simulator_run".
|
||||
simulator_run = not options.dont_skip_simulator_slow_tests and \
|
||||
arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \
|
||||
arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
|
||||
'ppc', 'ppc64'] and \
|
||||
ARCH_GUESS and arch != ARCH_GUESS
|
||||
# Find available test suites and read test cases from them.
|
||||
|
@ -56,7 +56,7 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
|
||||
VARIABLES = {ALWAYS: True}
|
||||
for var in ["debug", "release", "big", "little",
|
||||
"android_arm", "android_arm64", "android_ia32", "android_x87",
|
||||
"android_x64", "arm", "arm64", "ia32", "mips", "mipsel",
|
||||
"android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
|
||||
"mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
|
||||
"macos", "windows", "linux", "aix"]:
|
||||
VARIABLES[var] = var
|
||||
|
Loading…
Reference in New Issue
Block a user