MIPS: Add big-endian support for MIPS.
Important notices: - The snapshot cannot be created for big-endian target in cross-compilation environment on little-endian host using simulator. - In order to have i18n support working on big-endian target, the icudt46b.dat and icudt46b_dat.S files should be generated and upstreamed to ICU repo. - The mjsunit 'nans' test is endian dependent, it is skipped for mips target. - The zlib and Mandreel from Octane 2.0 benchmark are endian dependent due to use of typed arrays. TEST= BUG= R=jkummerow@chromium.org, plind44@gmail.com Review URL: https://codereview.chromium.org/228943009 Patch from Dusan Milosavljevic <Dusan.Milosavljevic@rt-rk.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20778 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
2a974583cd
commit
5a016958c6
6
Makefile
6
Makefile
@ -232,7 +232,7 @@ endif
|
||||
|
||||
# Architectures and modes to be compiled. Consider these to be internal
|
||||
# variables, don't override them (use the targets instead).
|
||||
ARCHES = ia32 x64 arm arm64 mipsel
|
||||
ARCHES = ia32 x64 arm arm64 mips mipsel
|
||||
DEFAULT_ARCHES = ia32 x64 arm
|
||||
MODES = release debug optdebug
|
||||
DEFAULT_MODES = release debug
|
||||
@ -281,10 +281,6 @@ buildbot:
|
||||
$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
|
||||
builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
|
||||
|
||||
mips mips.release mips.debug:
|
||||
@echo "V8 does not support big-endian MIPS builds at the moment," \
|
||||
"please use little-endian builds (mipsel)."
|
||||
|
||||
# Compile targets. MODES and ARCHES are convenience targets.
|
||||
.SECONDEXPANSION:
|
||||
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
|
||||
|
@ -278,6 +278,57 @@
|
||||
'V8_TARGET_ARCH_IA32',
|
||||
],
|
||||
}], # v8_target_arch=="ia32"
|
||||
['v8_target_arch=="mips"', {
|
||||
'defines': [
|
||||
'V8_TARGET_ARCH_MIPS',
|
||||
],
|
||||
'variables': {
|
||||
'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
|
||||
},
|
||||
'conditions': [
|
||||
['mipscompiler=="yes"', {
|
||||
'target_conditions': [
|
||||
['_toolset=="target"', {
|
||||
'cflags': ['-EB'],
|
||||
'ldflags': ['-EB'],
|
||||
'conditions': [
|
||||
[ 'v8_use_mips_abi_hardfloat=="true"', {
|
||||
'cflags': ['-mhard-float'],
|
||||
'ldflags': ['-mhard-float'],
|
||||
}, {
|
||||
'cflags': ['-msoft-float'],
|
||||
'ldflags': ['-msoft-float'],
|
||||
}],
|
||||
['mips_arch_variant=="mips32r2"', {
|
||||
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
|
||||
}],
|
||||
['mips_arch_variant=="mips32r1"', {
|
||||
'cflags': ['-mips32', '-Wa,-mips32'],
|
||||
}],
|
||||
],
|
||||
}],
|
||||
],
|
||||
}],
|
||||
[ 'v8_can_use_fpu_instructions=="true"', {
|
||||
'defines': [
|
||||
'CAN_USE_FPU_INSTRUCTIONS',
|
||||
],
|
||||
}],
|
||||
[ 'v8_use_mips_abi_hardfloat=="true"', {
|
||||
'defines': [
|
||||
'__mips_hard_float=1',
|
||||
'CAN_USE_FPU_INSTRUCTIONS',
|
||||
],
|
||||
}, {
|
||||
'defines': [
|
||||
'__mips_soft_float=1'
|
||||
],
|
||||
}],
|
||||
['mips_arch_variant=="mips32r2"', {
|
||||
'defines': ['_MIPS_ARCH_MIPS32R2',],
|
||||
}],
|
||||
],
|
||||
}], # v8_target_arch=="mips"
|
||||
['v8_target_arch=="mipsel"', {
|
||||
'defines': [
|
||||
'V8_TARGET_ARCH_MIPS',
|
||||
@ -380,7 +431,7 @@
|
||||
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
|
||||
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
|
||||
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
|
||||
v8_target_arch=="mipsel")', {
|
||||
v8_target_arch=="mips" or v8_target_arch=="mipsel")', {
|
||||
# Check whether the host compiler and target compiler support the
|
||||
# '-m32' option and set it if so.
|
||||
'target_conditions': [
|
||||
|
@ -75,7 +75,11 @@ inline unsigned int FastD2UI(double x) {
|
||||
if (x < k2Pow52) {
|
||||
x += k2Pow52;
|
||||
uint32_t result;
|
||||
#ifndef V8_TARGET_BIG_ENDIAN
|
||||
Address mantissa_ptr = reinterpret_cast<Address>(&x);
|
||||
#else
|
||||
Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
|
||||
#endif
|
||||
// Copy least significant 32 bits of mantissa.
|
||||
OS::MemCopy(&result, mantissa_ptr, sizeof(result));
|
||||
return negative ? ~result + 1 : result;
|
||||
|
@ -78,7 +78,7 @@ namespace internal {
|
||||
#elif defined(__ARMEL__)
|
||||
#define V8_HOST_ARCH_ARM 1
|
||||
#define V8_HOST_ARCH_32_BIT 1
|
||||
#elif defined(__MIPSEL__)
|
||||
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
|
||||
#define V8_HOST_ARCH_MIPS 1
|
||||
#define V8_HOST_ARCH_32_BIT 1
|
||||
#else
|
||||
@ -108,7 +108,7 @@ namespace internal {
|
||||
#define V8_TARGET_ARCH_ARM64 1
|
||||
#elif defined(__ARMEL__)
|
||||
#define V8_TARGET_ARCH_ARM 1
|
||||
#elif defined(__MIPSEL__)
|
||||
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
|
||||
#define V8_TARGET_ARCH_MIPS 1
|
||||
#else
|
||||
#error Target architecture was not detected as supported by v8
|
||||
@ -147,7 +147,7 @@ namespace internal {
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Determine architecture endiannes (we only support little-endian).
|
||||
// Determine architecture endianness.
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
@ -157,9 +157,13 @@ namespace internal {
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#if defined(__MIPSEB__)
|
||||
#define V8_TARGET_BIG_ENDIAN 1
|
||||
#else
|
||||
#error Unknown target architecture endiannes
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#endif
|
||||
#else
|
||||
#error Unknown target architecture endianness
|
||||
#endif
|
||||
|
||||
// Determine whether the architecture uses an out-of-line constant pool.
|
||||
|
@ -1081,7 +1081,9 @@ class IndexedReferencesExtractor : public ObjectVisitor {
|
||||
Address field = obj->address() + offset;
|
||||
ASSERT(!Memory::Object_at(field)->IsFailure());
|
||||
ASSERT(Memory::Object_at(field)->IsHeapObject());
|
||||
*field |= kFailureTag;
|
||||
Object* untagged = *reinterpret_cast<Object**>(field);
|
||||
intptr_t tagged = reinterpret_cast<intptr_t>(untagged) | kFailureTag;
|
||||
*reinterpret_cast<Object**>(field) = reinterpret_cast<Object*>(tagged);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1655,10 +1655,12 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
|
||||
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
|
||||
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
|
||||
// load to two 32-bit loads.
|
||||
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
|
||||
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
|
||||
Register::kMantissaOffset);
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
|
||||
GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
|
||||
Register::kExponentOffset);
|
||||
}
|
||||
|
||||
|
||||
@ -1670,10 +1672,12 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
|
||||
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
|
||||
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
|
||||
// store to two 32-bit stores.
|
||||
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
|
||||
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
|
||||
Register::kMantissaOffset);
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
|
||||
GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
|
||||
Register::kExponentOffset);
|
||||
}
|
||||
|
||||
|
||||
|
@ -77,6 +77,16 @@ struct Register {
|
||||
static const int kSizeInBytes = 4;
|
||||
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
|
||||
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
static const int kMantissaOffset = 0;
|
||||
static const int kExponentOffset = 4;
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
static const int kMantissaOffset = 4;
|
||||
static const int kExponentOffset = 0;
|
||||
#else
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
|
||||
inline static int NumAllocatableRegisters();
|
||||
|
||||
static int ToAllocationIndex(Register reg) {
|
||||
|
@ -470,9 +470,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
|
||||
if (count_constructions) {
|
||||
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
|
||||
__ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
|
||||
__ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
|
||||
kBitsPerByte);
|
||||
__ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
|
||||
__ sll(at, a0, kPointerSizeLog2);
|
||||
__ addu(a0, t5, at);
|
||||
__ sll(at, a3, kPointerSizeLog2);
|
||||
@ -525,12 +523,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
|
||||
// The field instance sizes contains both pre-allocated property fields
|
||||
// and in-object properties.
|
||||
__ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
|
||||
__ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
|
||||
kBitsPerByte);
|
||||
__ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
|
||||
__ Addu(a3, a3, Operand(t6));
|
||||
__ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
|
||||
kBitsPerByte);
|
||||
__ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
|
||||
__ subu(a3, a3, t6);
|
||||
|
||||
// Done if no extra properties are to be allocated.
|
||||
|
@ -559,13 +559,14 @@ class ConvertToDoubleStub : public PlatformCodeStub {
|
||||
|
||||
|
||||
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
|
||||
#ifndef BIG_ENDIAN_FLOATING_POINT
|
||||
Register exponent = result1_;
|
||||
Register mantissa = result2_;
|
||||
#else
|
||||
Register exponent = result2_;
|
||||
Register mantissa = result1_;
|
||||
#endif
|
||||
Register exponent, mantissa;
|
||||
if (kArchEndian == kLittle) {
|
||||
exponent = result1_;
|
||||
mantissa = result2_;
|
||||
} else {
|
||||
exponent = result2_;
|
||||
mantissa = result1_;
|
||||
}
|
||||
Label not_special;
|
||||
// Convert from Smi to integer.
|
||||
__ sra(source_, source_, kSmiTagSize);
|
||||
@ -671,8 +672,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Register input_high = scratch2;
|
||||
Register input_low = scratch3;
|
||||
|
||||
__ lw(input_low, MemOperand(input_reg, double_offset));
|
||||
__ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
|
||||
__ lw(input_low,
|
||||
MemOperand(input_reg, double_offset + Register::kMantissaOffset));
|
||||
__ lw(input_high,
|
||||
MemOperand(input_reg, double_offset + Register::kExponentOffset));
|
||||
|
||||
Label normal_exponent, restore_sign;
|
||||
// Extract the biased exponent in result.
|
||||
@ -3532,9 +3535,15 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
|
||||
{
|
||||
Label loop;
|
||||
__ bind(&loop);
|
||||
__ lwr(scratch1, MemOperand(src));
|
||||
__ Addu(src, src, Operand(kReadAlignment));
|
||||
__ lwl(scratch1, MemOperand(src, -1));
|
||||
if (kArchEndian == kBig) {
|
||||
__ lwl(scratch1, MemOperand(src));
|
||||
__ Addu(src, src, Operand(kReadAlignment));
|
||||
__ lwr(scratch1, MemOperand(src, -1));
|
||||
} else {
|
||||
__ lwr(scratch1, MemOperand(src));
|
||||
__ Addu(src, src, Operand(kReadAlignment));
|
||||
__ lwl(scratch1, MemOperand(src, -1));
|
||||
}
|
||||
__ sw(scratch1, MemOperand(dest));
|
||||
__ Addu(dest, dest, Operand(kReadAlignment));
|
||||
__ Subu(scratch2, limit, dest);
|
||||
|
@ -67,21 +67,13 @@ UnaryMathFunction CreateExpFunction() {
|
||||
Register temp2 = t1;
|
||||
Register temp3 = t2;
|
||||
|
||||
if (!IsMipsSoftFloatABI) {
|
||||
// Input value is in f12 anyway, nothing to do.
|
||||
} else {
|
||||
__ Move(input, a0, a1);
|
||||
}
|
||||
__ MovFromFloatParameter(input);
|
||||
__ Push(temp3, temp2, temp1);
|
||||
MathExpGenerator::EmitMathExp(
|
||||
&masm, input, result, double_scratch1, double_scratch2,
|
||||
temp1, temp2, temp3);
|
||||
__ Pop(temp3, temp2, temp1);
|
||||
if (!IsMipsSoftFloatABI) {
|
||||
// Result is already in f0, nothing to do.
|
||||
} else {
|
||||
__ Move(v0, v1, result);
|
||||
}
|
||||
__ MovToFloatResult(result);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
@ -167,11 +159,17 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
|
||||
__ beq(a3, zero_reg, &aligned); // Already aligned.
|
||||
__ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
|
||||
|
||||
__ lwr(t8, MemOperand(a1));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swr(t8, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(t8, MemOperand(a1));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swr(t8, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
} else {
|
||||
__ lwl(t8, MemOperand(a1));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swl(t8, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
}
|
||||
// Now dst/src are both aligned to (word) aligned addresses. Set a2 to
|
||||
// count how many bytes we have to copy after all the 64 byte chunks are
|
||||
// copied and a3 to the dst pointer after all the 64 byte chunks have been
|
||||
@ -323,12 +321,21 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
|
||||
__ beq(a3, zero_reg, &ua_chk16w);
|
||||
__ subu(a2, a2, a3); // In delay slot.
|
||||
|
||||
__ lwr(v1, MemOperand(a1));
|
||||
__ lwl(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swr(v1, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(v1, MemOperand(a1));
|
||||
__ lwl(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swr(v1, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
} else {
|
||||
__ lwl(v1, MemOperand(a1));
|
||||
__ lwr(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ addu(a1, a1, a3);
|
||||
__ swl(v1, MemOperand(a0));
|
||||
__ addu(a0, a0, a3);
|
||||
}
|
||||
|
||||
// Now the dst (but not the source) is aligned. Set a2 to count how many
|
||||
// bytes we have to copy after all the 64 byte chunks are copied and a3 to
|
||||
@ -357,40 +364,77 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
|
||||
|
||||
__ bind(&ua_loop16w);
|
||||
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
|
||||
__ lwr(t0, MemOperand(a1));
|
||||
__ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(t0, MemOperand(a1));
|
||||
__ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
|
||||
|
||||
if (pref_hint_store == kPrefHintPrepareForStore) {
|
||||
__ sltu(v1, t9, a0);
|
||||
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
|
||||
if (pref_hint_store == kPrefHintPrepareForStore) {
|
||||
__ sltu(v1, t9, a0);
|
||||
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
|
||||
}
|
||||
__ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
|
||||
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
|
||||
|
||||
__ bind(&ua_skip_pref);
|
||||
__ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t4,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t5,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t6,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t7,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
} else {
|
||||
__ lwl(t0, MemOperand(a1));
|
||||
__ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
|
||||
|
||||
if (pref_hint_store == kPrefHintPrepareForStore) {
|
||||
__ sltu(v1, t9, a0);
|
||||
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
|
||||
}
|
||||
__ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
|
||||
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
|
||||
|
||||
__ bind(&ua_skip_pref);
|
||||
__ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwr(t0,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t1,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t2,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t3,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t4,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t5,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t6,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t7,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
}
|
||||
__ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
|
||||
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
|
||||
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
|
||||
|
||||
__ bind(&ua_skip_pref);
|
||||
__ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t4,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t5,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t6,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t7,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
|
||||
__ sw(t0, MemOperand(a0));
|
||||
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
|
||||
@ -400,30 +444,57 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
|
||||
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
|
||||
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
|
||||
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
|
||||
__ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
|
||||
__ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
|
||||
__ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
|
||||
__ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
|
||||
__ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
|
||||
__ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t4,
|
||||
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t5,
|
||||
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t6,
|
||||
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t7,
|
||||
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
|
||||
__ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
|
||||
__ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
|
||||
__ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
|
||||
__ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
|
||||
__ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t4,
|
||||
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t5,
|
||||
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t6,
|
||||
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t7,
|
||||
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
} else {
|
||||
__ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
|
||||
__ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
|
||||
__ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
|
||||
__ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
|
||||
__ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
|
||||
__ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
|
||||
__ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
|
||||
__ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
|
||||
__ lwr(t0,
|
||||
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t1,
|
||||
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t2,
|
||||
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t3,
|
||||
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t4,
|
||||
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t5,
|
||||
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t6,
|
||||
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t7,
|
||||
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
}
|
||||
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
|
||||
__ sw(t0, MemOperand(a0, 8, loadstore_chunk));
|
||||
__ sw(t1, MemOperand(a0, 9, loadstore_chunk));
|
||||
@ -447,30 +518,57 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
|
||||
|
||||
__ beq(a2, t8, &ua_chk1w);
|
||||
__ nop(); // In delay slot.
|
||||
__ lwr(t0, MemOperand(a1));
|
||||
__ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
|
||||
__ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t4,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t5,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t6,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t7,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(t0, MemOperand(a1));
|
||||
__ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
|
||||
__ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
|
||||
__ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwl(t0,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t1,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t2,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t3,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t4,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t5,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t6,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwl(t7,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
} else {
|
||||
__ lwl(t0, MemOperand(a1));
|
||||
__ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
|
||||
__ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
|
||||
__ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
|
||||
__ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
|
||||
__ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
|
||||
__ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
|
||||
__ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
|
||||
__ lwr(t0,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t1,
|
||||
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t2,
|
||||
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t3,
|
||||
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t4,
|
||||
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t5,
|
||||
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t6,
|
||||
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
__ lwr(t7,
|
||||
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
}
|
||||
__ addiu(a1, a1, 8 * loadstore_chunk);
|
||||
__ sw(t0, MemOperand(a0));
|
||||
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
|
||||
@ -491,9 +589,15 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
|
||||
__ addu(a3, a0, a3);
|
||||
|
||||
__ bind(&ua_wordCopy_loop);
|
||||
__ lwr(v1, MemOperand(a1));
|
||||
__ lwl(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
if (kArchEndian == kLittle) {
|
||||
__ lwr(v1, MemOperand(a1));
|
||||
__ lwl(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
} else {
|
||||
__ lwl(v1, MemOperand(a1));
|
||||
__ lwr(v1,
|
||||
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
|
||||
}
|
||||
__ addiu(a0, a0, loadstore_chunk);
|
||||
__ addiu(a1, a1, loadstore_chunk);
|
||||
__ bne(a0, a3, &ua_wordCopy_loop);
|
||||
@ -722,8 +826,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
|
||||
}
|
||||
__ sw(t0, MemOperand(t3)); // mantissa
|
||||
__ sw(t1, MemOperand(t3, kIntSize)); // exponent
|
||||
__ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa
|
||||
__ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent
|
||||
__ Addu(t3, t3, kDoubleSize);
|
||||
|
||||
__ bind(&entry);
|
||||
@ -773,7 +877,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
|
||||
__ Addu(t0, t0, Operand(
|
||||
FixedDoubleArray::kHeaderSize - kHeapObjectTag
|
||||
+ Register::kExponentOffset));
|
||||
__ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
|
||||
__ Addu(t2, t2, Operand(kHeapObjectTag));
|
||||
__ sll(t1, t1, 1);
|
||||
@ -782,7 +888,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
__ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
|
||||
// Using offsetted addresses.
|
||||
// a3: begin of destination FixedArray element fields, not tagged
|
||||
// t0: begin of source FixedDoubleArray element fields, not tagged, +4
|
||||
// t0: begin of source FixedDoubleArray element fields, not tagged,
|
||||
// points to the exponent
|
||||
// t1: end of destination FixedArray, not tagged
|
||||
// t2: destination FixedArray
|
||||
// t3: the-hole pointer
|
||||
@ -805,7 +912,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
// Non-hole double, copy value into a heap number.
|
||||
__ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
|
||||
// a2: new heap number
|
||||
__ lw(a0, MemOperand(t0, -12));
|
||||
// Load mantissa of current element, t0 point to exponent of next element.
|
||||
__ lw(a0, MemOperand(t0, (Register::kMantissaOffset
|
||||
- Register::kExponentOffset - kDoubleSize)));
|
||||
__ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
|
||||
__ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
|
||||
__ mov(a0, a3);
|
||||
@ -1010,8 +1119,8 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
||||
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
|
||||
__ sll(at, temp2, 3);
|
||||
__ Addu(temp3, temp3, Operand(at));
|
||||
__ lw(temp2, MemOperand(temp3, 0));
|
||||
__ lw(temp3, MemOperand(temp3, kPointerSize));
|
||||
__ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
|
||||
__ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
|
||||
// The first word is loaded is the lower number register.
|
||||
if (temp2.code() < temp3.code()) {
|
||||
__ sll(at, temp1, 20);
|
||||
|
@ -55,6 +55,18 @@ enum ArchVariants {
|
||||
static const ArchVariants kArchVariant = kMips32r1;
|
||||
#endif
|
||||
|
||||
enum Endianness {
|
||||
kLittle,
|
||||
kBig
|
||||
};
|
||||
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
static const Endianness kArchEndian = kLittle;
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
static const Endianness kArchEndian = kBig;
|
||||
#else
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
|
||||
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
|
||||
// Use floating-point coprocessor instructions. This flag is raised when
|
||||
@ -69,6 +81,15 @@ const bool IsMipsSoftFloatABI = true;
|
||||
const bool IsMipsSoftFloatABI = true;
|
||||
#endif
|
||||
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
const uint32_t kHoleNanUpper32Offset = 4;
|
||||
const uint32_t kHoleNanLower32Offset = 0;
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
const uint32_t kHoleNanUpper32Offset = 0;
|
||||
const uint32_t kHoleNanLower32Offset = 4;
|
||||
#else
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
|
||||
// Defines constants and accessor classes to assemble, disassemble and
|
||||
// simulate MIPS32 instructions.
|
||||
|
@ -918,7 +918,7 @@ static void KeyedStoreGenerateGenericHelper(
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
__ Addu(address, elements,
|
||||
Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
|
||||
Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset
|
||||
- kHeapObjectTag));
|
||||
__ sll(at, key, kPointerSizeLog2);
|
||||
__ addu(address, address, at);
|
||||
|
@ -3219,7 +3219,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
|
||||
__ ldc1(result, MemOperand(scratch));
|
||||
|
||||
if (instr->hydrogen()->RequiresHoleCheck()) {
|
||||
__ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
|
||||
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
|
||||
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
|
||||
}
|
||||
}
|
||||
|
@ -3313,13 +3313,24 @@ void MacroAssembler::CopyBytes(Register src,
|
||||
|
||||
// TODO(kalmard) check if this can be optimized to use sw in most cases.
|
||||
// Can't use unaligned access - copy byte by byte.
|
||||
sb(scratch, MemOperand(dst, 0));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 1));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 2));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 3));
|
||||
if (kArchEndian == kLittle) {
|
||||
sb(scratch, MemOperand(dst, 0));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 1));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 2));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 3));
|
||||
} else {
|
||||
sb(scratch, MemOperand(dst, 3));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 2));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 1));
|
||||
srl(scratch, scratch, 8);
|
||||
sb(scratch, MemOperand(dst, 0));
|
||||
}
|
||||
|
||||
Addu(dst, dst, 4);
|
||||
|
||||
Subu(length, length, Operand(kPointerSize));
|
||||
@ -3424,11 +3435,12 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
|
||||
bind(&have_double_value);
|
||||
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
|
||||
Addu(scratch1, scratch1, elements_reg);
|
||||
sw(mantissa_reg, FieldMemOperand(
|
||||
scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
|
||||
uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
|
||||
sizeof(kHoleNanLower32);
|
||||
sw(exponent_reg, FieldMemOperand(scratch1, offset));
|
||||
sw(mantissa_reg,
|
||||
FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
|
||||
+ kHoleNanLower32Offset));
|
||||
sw(exponent_reg,
|
||||
FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
|
||||
+ kHoleNanUpper32Offset));
|
||||
jmp(&done);
|
||||
|
||||
bind(&maybe_nan);
|
||||
@ -3526,7 +3538,11 @@ void MacroAssembler::CheckMap(Register obj,
|
||||
|
||||
void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
|
||||
if (IsMipsSoftFloatABI) {
|
||||
Move(dst, v0, v1);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(dst, v0, v1);
|
||||
} else {
|
||||
Move(dst, v1, v0);
|
||||
}
|
||||
} else {
|
||||
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
|
||||
}
|
||||
@ -3535,7 +3551,11 @@ void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
|
||||
|
||||
void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
|
||||
if (IsMipsSoftFloatABI) {
|
||||
Move(dst, a0, a1);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(dst, a0, a1);
|
||||
} else {
|
||||
Move(dst, a1, a0);
|
||||
}
|
||||
} else {
|
||||
Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
|
||||
}
|
||||
@ -3546,7 +3566,11 @@ void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
|
||||
if (!IsMipsSoftFloatABI) {
|
||||
Move(f12, src);
|
||||
} else {
|
||||
Move(a0, a1, src);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(a0, a1, src);
|
||||
} else {
|
||||
Move(a1, a0, src);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3555,7 +3579,11 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
|
||||
if (!IsMipsSoftFloatABI) {
|
||||
Move(f0, src);
|
||||
} else {
|
||||
Move(v0, v1, src);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(v0, v1, src);
|
||||
} else {
|
||||
Move(v1, v0, src);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3572,8 +3600,13 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
|
||||
Move(f14, src2);
|
||||
}
|
||||
} else {
|
||||
Move(a0, a1, src1);
|
||||
Move(a2, a3, src2);
|
||||
if (kArchEndian == kLittle) {
|
||||
Move(a0, a1, src1);
|
||||
Move(a2, a3, src2);
|
||||
} else {
|
||||
Move(a1, a0, src1);
|
||||
Move(a3, a2, src2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1961,11 +1961,18 @@ class HeapNumber: public HeapObject {
|
||||
// Layout description.
|
||||
static const int kValueOffset = HeapObject::kHeaderSize;
|
||||
// IEEE doubles are two 32 bit words. The first is just mantissa, the second
|
||||
// is a mixture of sign, exponent and mantissa. Our current platforms are all
|
||||
// little endian apart from non-EABI arm which is little endian with big
|
||||
// endian floating point word ordering!
|
||||
// is a mixture of sign, exponent and mantissa. The offsets of two 32 bit
|
||||
// words within double numbers are endian dependent and they are set
|
||||
// accordingly.
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
static const int kMantissaOffset = kValueOffset;
|
||||
static const int kExponentOffset = kValueOffset + 4;
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
static const int kMantissaOffset = kValueOffset + 4;
|
||||
static const int kExponentOffset = kValueOffset;
|
||||
#else
|
||||
#error Unknown byte ordering
|
||||
#endif
|
||||
|
||||
static const int kSize = kValueOffset + kDoubleSize;
|
||||
static const uint32_t kSignMask = 0x80000000u;
|
||||
@ -7418,9 +7425,9 @@ class SharedFunctionInfo: public HeapObject {
|
||||
// The construction counter for inobject slack tracking is stored in the
|
||||
// most significant byte of compiler_hints which is otherwise unused.
|
||||
// Its offset depends on the endian-ness of the architecture.
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
static const int kConstructionCountOffset = kCompilerHintsOffset + 3;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
static const int kConstructionCountOffset = kCompilerHintsOffset + 0;
|
||||
#else
|
||||
#error Unknown byte ordering
|
||||
@ -7494,12 +7501,12 @@ class SharedFunctionInfo: public HeapObject {
|
||||
static const int kNativeBitWithinByte =
|
||||
(kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
|
||||
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
static const int kStrictModeByteOffset = kCompilerHintsOffset +
|
||||
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
|
||||
static const int kNativeByteOffset = kCompilerHintsOffset +
|
||||
(kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
static const int kStrictModeByteOffset = kCompilerHintsOffset +
|
||||
(kCompilerHintsSize - 1) -
|
||||
((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
|
||||
|
@ -9158,8 +9158,15 @@ static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
|
||||
#else
|
||||
typedef uint64_t ObjectPair;
|
||||
static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
return reinterpret_cast<uint32_t>(x) |
|
||||
(reinterpret_cast<ObjectPair>(y) << 32);
|
||||
#elif defined(V8_TARGET_BIG_ENDIAN)
|
||||
return reinterpret_cast<uint32_t>(y) |
|
||||
(reinterpret_cast<ObjectPair>(x) << 32);
|
||||
#else
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -186,7 +186,7 @@
|
||||
}], # 'arch == arm'
|
||||
|
||||
##############################################################################
|
||||
['arch == mipsel', {
|
||||
['arch == mipsel or arch == mips', {
|
||||
|
||||
# BUG(2657): Test sometimes times out on MIPS simulator.
|
||||
'test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate': [PASS, TIMEOUT],
|
||||
@ -196,7 +196,7 @@
|
||||
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
|
||||
'test-serialize/DeserializeAndRunScript2': [SKIP],
|
||||
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
|
||||
}], # 'arch == mipsel'
|
||||
}], # 'arch == mipsel or arch == mips'
|
||||
|
||||
##############################################################################
|
||||
['arch == android_arm or arch == android_ia32', {
|
||||
|
@ -533,11 +533,21 @@ TEST(MIPS6) {
|
||||
USE(dummy);
|
||||
|
||||
CHECK_EQ(0x11223344, t.r1);
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
CHECK_EQ(0x3344, t.r2);
|
||||
CHECK_EQ(0xffffbbcc, t.r3);
|
||||
CHECK_EQ(0x0000bbcc, t.r4);
|
||||
CHECK_EQ(0xffffffcc, t.r5);
|
||||
CHECK_EQ(0x3333bbcc, t.r6);
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
CHECK_EQ(0x1122, t.r2);
|
||||
CHECK_EQ(0xffff99aa, t.r3);
|
||||
CHECK_EQ(0x000099aa, t.r4);
|
||||
CHECK_EQ(0xffffff99, t.r5);
|
||||
CHECK_EQ(0x99aa3333, t.r6);
|
||||
#else
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -942,6 +952,7 @@ TEST(MIPS11) {
|
||||
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
|
||||
USE(dummy);
|
||||
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
CHECK_EQ(0x44bbccdd, t.lwl_0);
|
||||
CHECK_EQ(0x3344ccdd, t.lwl_1);
|
||||
CHECK_EQ(0x223344dd, t.lwl_2);
|
||||
@ -961,6 +972,29 @@ TEST(MIPS11) {
|
||||
CHECK_EQ(0xbbccdd44, t.swr_1);
|
||||
CHECK_EQ(0xccdd3344, t.swr_2);
|
||||
CHECK_EQ(0xdd223344, t.swr_3);
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
CHECK_EQ(0x11223344, t.lwl_0);
|
||||
CHECK_EQ(0x223344dd, t.lwl_1);
|
||||
CHECK_EQ(0x3344ccdd, t.lwl_2);
|
||||
CHECK_EQ(0x44bbccdd, t.lwl_3);
|
||||
|
||||
CHECK_EQ(0xaabbcc11, t.lwr_0);
|
||||
CHECK_EQ(0xaabb1122, t.lwr_1);
|
||||
CHECK_EQ(0xaa112233, t.lwr_2);
|
||||
CHECK_EQ(0x11223344, t.lwr_3);
|
||||
|
||||
CHECK_EQ(0xaabbccdd, t.swl_0);
|
||||
CHECK_EQ(0x11aabbcc, t.swl_1);
|
||||
CHECK_EQ(0x1122aabb, t.swl_2);
|
||||
CHECK_EQ(0x112233aa, t.swl_3);
|
||||
|
||||
CHECK_EQ(0xdd223344, t.swr_0);
|
||||
CHECK_EQ(0xccdd3344, t.swr_1);
|
||||
CHECK_EQ(0xbbccdd44, t.swr_2);
|
||||
CHECK_EQ(0xaabbccdd, t.swr_3);
|
||||
#else
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -59,7 +59,7 @@ using namespace ::v8::internal;
|
||||
do { \
|
||||
ASM("mov x16, sp; str x16, %0" : "=g" (sp_addr)); \
|
||||
} while (0)
|
||||
#elif defined(__MIPSEL__)
|
||||
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
|
||||
#define GET_STACK_POINTER() \
|
||||
static int sp_addr = 0; \
|
||||
do { \
|
||||
|
@ -70,7 +70,7 @@
|
||||
##############################################################################
|
||||
# These use a built-in that's only present in debug mode. They take
|
||||
# too long to run in debug mode on ARM and MIPS.
|
||||
'fuzz-natives-part*': [PASS, ['mode == release or arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel', SKIP]],
|
||||
'fuzz-natives-part*': [PASS, ['mode == release or arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips', SKIP]],
|
||||
|
||||
'big-object-literal': [PASS, ['arch == arm or arch == android_arm or arch == android_arm64', SKIP]],
|
||||
|
||||
@ -78,7 +78,7 @@
|
||||
'array-constructor': [PASS, TIMEOUT],
|
||||
|
||||
# Very slow on ARM and MIPS, contains no architecture dependent code.
|
||||
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel', TIMEOUT]],
|
||||
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips', TIMEOUT]],
|
||||
|
||||
##############################################################################
|
||||
# This test expects to reach a certain recursion depth, which may not work
|
||||
@ -122,6 +122,11 @@
|
||||
# BUG(v8:2989). PASS/FAIL on linux32 because crankshaft is turned off for
|
||||
# nosse2. Also for arm novfp3.
|
||||
'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == ia32 or arch == arm and simulator == True', PASS]],
|
||||
|
||||
# Skip endain dependent test for mips due to different typed views of the same
|
||||
# array buffer.
|
||||
'nans': [PASS, ['arch == mips', SKIP]],
|
||||
|
||||
}], # ALWAYS
|
||||
|
||||
##############################################################################
|
||||
@ -292,7 +297,7 @@
|
||||
}], # 'arch == arm or arch == android_arm'
|
||||
|
||||
##############################################################################
|
||||
['arch == mipsel', {
|
||||
['arch == mipsel or arch == mips', {
|
||||
|
||||
# Slow tests which times out in debug mode.
|
||||
'try': [PASS, ['mode == debug', SKIP]],
|
||||
@ -328,7 +333,7 @@
|
||||
|
||||
# Currently always deopt on minus zero
|
||||
'math-floor-of-div-minus-zero': [SKIP],
|
||||
}], # 'arch == mipsel'
|
||||
}], # 'arch == mipsel or arch == mips'
|
||||
|
||||
##############################################################################
|
||||
# Native Client uses the ARM simulator so will behave similarly to arm
|
||||
|
@ -141,8 +141,8 @@
|
||||
'ecma/Date/15.9.5.28-1': [PASS, FAIL],
|
||||
|
||||
# 1050186: Arm/MIPS vm is broken; probably unrelated to dates
|
||||
'ecma/Array/15.4.4.5-3': [PASS, ['arch == arm or arch == mipsel', FAIL]],
|
||||
'ecma/Date/15.9.5.22-2': [PASS, ['arch == arm or arch == mipsel', FAIL]],
|
||||
'ecma/Array/15.4.4.5-3': [PASS, ['arch == arm or arch == mipsel or arch == mips', FAIL]],
|
||||
'ecma/Date/15.9.5.22-2': [PASS, ['arch == arm or arch == mipsel or arch == mips', FAIL]],
|
||||
|
||||
# Flaky test that fails due to what appears to be a bug in the test.
|
||||
# Occurs depending on current time
|
||||
@ -874,6 +874,25 @@
|
||||
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
|
||||
}], # 'arch == mipsel'
|
||||
|
||||
['arch == mips', {
|
||||
|
||||
# BUG(3251229): Times out when running new crankshaft test script.
|
||||
'ecma_3/RegExp/regress-311414': [SKIP],
|
||||
'ecma/Date/15.9.5.8': [SKIP],
|
||||
'ecma/Date/15.9.5.10-2': [SKIP],
|
||||
'ecma/Date/15.9.5.11-2': [SKIP],
|
||||
'ecma/Date/15.9.5.12-2': [SKIP],
|
||||
'js1_5/Array/regress-99120-02': [SKIP],
|
||||
'js1_5/extensions/regress-371636': [SKIP],
|
||||
'js1_5/Regress/regress-203278-1': [SKIP],
|
||||
'js1_5/Regress/regress-404755': [SKIP],
|
||||
'js1_5/Regress/regress-451322': [SKIP],
|
||||
|
||||
|
||||
# BUG(1040): Allow this test to timeout.
|
||||
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
|
||||
}], # 'arch == mips'
|
||||
|
||||
['arch == arm64 and simulator_run == True', {
|
||||
|
||||
'js1_5/GC/regress-203278-2': [SKIP],
|
||||
|
@ -99,7 +99,7 @@
|
||||
'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
|
||||
}], # ALWAYS
|
||||
|
||||
['arch == arm or arch == mipsel or arch == arm64', {
|
||||
['arch == arm or arch == mipsel or arch == mips or arch == arm64', {
|
||||
|
||||
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
|
||||
# compilation of parenthesized function literals. Needs investigation.
|
||||
@ -112,5 +112,5 @@
|
||||
'S15.1.3.2_A2.5_T1': [SKIP],
|
||||
'S15.1.3.3_A2.3_T1': [SKIP],
|
||||
'S15.1.3.4_A2.3_T1': [SKIP],
|
||||
}], # 'arch == arm or arch == mipsel or arch == arm64'
|
||||
}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
|
||||
]
|
||||
|
@ -727,7 +727,7 @@
|
||||
'../../src/ia32/stub-cache-ia32.cc',
|
||||
],
|
||||
}],
|
||||
['v8_target_arch=="mipsel"', {
|
||||
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
|
||||
'sources': [ ### gcmole(arch:mipsel) ###
|
||||
'../../src/mips/assembler-mips.cc',
|
||||
'../../src/mips/assembler-mips.h',
|
||||
|
@ -80,6 +80,7 @@ SUPPORTED_ARCHS = ["android_arm",
|
||||
"android_ia32",
|
||||
"arm",
|
||||
"ia32",
|
||||
"mips",
|
||||
"mipsel",
|
||||
"nacl_ia32",
|
||||
"nacl_x64",
|
||||
@ -90,6 +91,7 @@ SLOW_ARCHS = ["android_arm",
|
||||
"android_arm64",
|
||||
"android_ia32",
|
||||
"arm",
|
||||
"mips",
|
||||
"mipsel",
|
||||
"nacl_ia32",
|
||||
"nacl_x64",
|
||||
|
@ -53,8 +53,8 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
|
||||
# Support arches, modes to be written as keywords instead of strings.
|
||||
VARIABLES = {ALWAYS: True}
|
||||
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32",
|
||||
"arm", "arm64", "ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64",
|
||||
"macos", "windows", "linux"]:
|
||||
"arm", "arm64", "ia32", "mips", "mipsel", "x64", "nacl_ia32",
|
||||
"nacl_x64", "macos", "windows", "linux"]:
|
||||
VARIABLES[var] = var
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user