[cleanup] Drop handwritten KeyedStoreIC code
The handwritten-assembly implementations of both dispatcher and generic stub have been replaced by Turbofan-generated stubs. Review-Url: https://codereview.chromium.org/2523473002 Cr-Commit-Position: refs/heads/master@{#41188}
This commit is contained in:
parent
36a17e87d8
commit
5ef05d8e2c
18
BUILD.gn
18
BUILD.gn
@ -1804,9 +1804,7 @@ v8_source_set("v8_base") {
|
||||
"src/ia32/simulator-ia32.h",
|
||||
"src/ic/ia32/access-compiler-ia32.cc",
|
||||
"src/ic/ia32/handler-compiler-ia32.cc",
|
||||
"src/ic/ia32/ic-compiler-ia32.cc",
|
||||
"src/ic/ia32/ic-ia32.cc",
|
||||
"src/ic/ia32/stub-cache-ia32.cc",
|
||||
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
|
||||
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
|
||||
]
|
||||
@ -1829,9 +1827,7 @@ v8_source_set("v8_base") {
|
||||
"src/full-codegen/x64/full-codegen-x64.cc",
|
||||
"src/ic/x64/access-compiler-x64.cc",
|
||||
"src/ic/x64/handler-compiler-x64.cc",
|
||||
"src/ic/x64/ic-compiler-x64.cc",
|
||||
"src/ic/x64/ic-x64.cc",
|
||||
"src/ic/x64/stub-cache-x64.cc",
|
||||
"src/regexp/x64/regexp-macro-assembler-x64.cc",
|
||||
"src/regexp/x64/regexp-macro-assembler-x64.h",
|
||||
"src/third_party/valgrind/valgrind.h",
|
||||
@ -1896,8 +1892,6 @@ v8_source_set("v8_base") {
|
||||
"src/ic/arm/access-compiler-arm.cc",
|
||||
"src/ic/arm/handler-compiler-arm.cc",
|
||||
"src/ic/arm/ic-arm.cc",
|
||||
"src/ic/arm/ic-compiler-arm.cc",
|
||||
"src/ic/arm/stub-cache-arm.cc",
|
||||
"src/regexp/arm/regexp-macro-assembler-arm.cc",
|
||||
"src/regexp/arm/regexp-macro-assembler-arm.h",
|
||||
]
|
||||
@ -1955,8 +1949,6 @@ v8_source_set("v8_base") {
|
||||
"src/ic/arm64/access-compiler-arm64.cc",
|
||||
"src/ic/arm64/handler-compiler-arm64.cc",
|
||||
"src/ic/arm64/ic-arm64.cc",
|
||||
"src/ic/arm64/ic-compiler-arm64.cc",
|
||||
"src/ic/arm64/stub-cache-arm64.cc",
|
||||
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
|
||||
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
|
||||
]
|
||||
@ -1977,9 +1969,7 @@ v8_source_set("v8_base") {
|
||||
"src/full-codegen/mips/full-codegen-mips.cc",
|
||||
"src/ic/mips/access-compiler-mips.cc",
|
||||
"src/ic/mips/handler-compiler-mips.cc",
|
||||
"src/ic/mips/ic-compiler-mips.cc",
|
||||
"src/ic/mips/ic-mips.cc",
|
||||
"src/ic/mips/stub-cache-mips.cc",
|
||||
"src/mips/assembler-mips-inl.h",
|
||||
"src/mips/assembler-mips.cc",
|
||||
"src/mips/assembler-mips.h",
|
||||
@ -2019,9 +2009,7 @@ v8_source_set("v8_base") {
|
||||
"src/full-codegen/mips64/full-codegen-mips64.cc",
|
||||
"src/ic/mips64/access-compiler-mips64.cc",
|
||||
"src/ic/mips64/handler-compiler-mips64.cc",
|
||||
"src/ic/mips64/ic-compiler-mips64.cc",
|
||||
"src/ic/mips64/ic-mips64.cc",
|
||||
"src/ic/mips64/stub-cache-mips64.cc",
|
||||
"src/mips64/assembler-mips64-inl.h",
|
||||
"src/mips64/assembler-mips64.cc",
|
||||
"src/mips64/assembler-mips64.h",
|
||||
@ -2061,9 +2049,7 @@ v8_source_set("v8_base") {
|
||||
"src/full-codegen/ppc/full-codegen-ppc.cc",
|
||||
"src/ic/ppc/access-compiler-ppc.cc",
|
||||
"src/ic/ppc/handler-compiler-ppc.cc",
|
||||
"src/ic/ppc/ic-compiler-ppc.cc",
|
||||
"src/ic/ppc/ic-ppc.cc",
|
||||
"src/ic/ppc/stub-cache-ppc.cc",
|
||||
"src/ppc/assembler-ppc-inl.h",
|
||||
"src/ppc/assembler-ppc.cc",
|
||||
"src/ppc/assembler-ppc.h",
|
||||
@ -2103,9 +2089,7 @@ v8_source_set("v8_base") {
|
||||
"src/full-codegen/s390/full-codegen-s390.cc",
|
||||
"src/ic/s390/access-compiler-s390.cc",
|
||||
"src/ic/s390/handler-compiler-s390.cc",
|
||||
"src/ic/s390/ic-compiler-s390.cc",
|
||||
"src/ic/s390/ic-s390.cc",
|
||||
"src/ic/s390/stub-cache-s390.cc",
|
||||
"src/regexp/s390/regexp-macro-assembler-s390.cc",
|
||||
"src/regexp/s390/regexp-macro-assembler-s390.h",
|
||||
"src/s390/assembler-s390-inl.h",
|
||||
@ -2145,9 +2129,7 @@ v8_source_set("v8_base") {
|
||||
"src/full-codegen/x87/full-codegen-x87.cc",
|
||||
"src/ic/x87/access-compiler-x87.cc",
|
||||
"src/ic/x87/handler-compiler-x87.cc",
|
||||
"src/ic/x87/ic-compiler-x87.cc",
|
||||
"src/ic/x87/ic-x87.cc",
|
||||
"src/ic/x87/stub-cache-x87.cc",
|
||||
"src/regexp/x87/regexp-macro-assembler-x87.cc",
|
||||
"src/regexp/x87/regexp-macro-assembler-x87.h",
|
||||
"src/x87/assembler-x87-inl.h",
|
||||
|
@ -3057,238 +3057,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, bool is_polymorphic,
|
||||
Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
|
||||
__ ldr(cached_map,
|
||||
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ cmp(receiver_map, cached_map);
|
||||
__ b(ne, &start_polymorphic);
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
__ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
|
||||
Register length = scratch2;
|
||||
__ bind(&start_polymorphic);
|
||||
__ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
if (!is_polymorphic) {
|
||||
// If the IC could be monomorphic we have to make sure we don't go past the
|
||||
// end of the feedback array.
|
||||
__ cmp(length, Operand(Smi::FromInt(2)));
|
||||
__ b(eq, miss);
|
||||
}
|
||||
|
||||
Register too_far = length;
|
||||
Register pointer_reg = feedback;
|
||||
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
|
||||
__ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ ldr(cached_map, MemOperand(pointer_reg));
|
||||
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ cmp(receiver_map, cached_map);
|
||||
__ b(ne, &prepare_next);
|
||||
__ ldr(handler, MemOperand(pointer_reg, kPointerSize));
|
||||
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
|
||||
__ cmp(pointer_reg, too_far);
|
||||
__ b(lt, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ jmp(miss);
|
||||
}
|
||||
|
||||
|
||||
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
|
||||
Register receiver_map, Register feedback,
|
||||
Register vector, Register slot,
|
||||
Register scratch, Label* compare_map,
|
||||
Label* load_smi_map, Label* try_array) {
|
||||
__ JumpIfSmi(receiver, load_smi_map);
|
||||
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ bind(compare_map);
|
||||
Register cached_map = scratch;
|
||||
// Move the weak map into the weak_cell register.
|
||||
__ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
|
||||
__ cmp(cached_map, receiver_map);
|
||||
__ b(ne, try_array);
|
||||
Register handler = feedback;
|
||||
__ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
|
||||
__ ldr(handler,
|
||||
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
|
||||
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
|
||||
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
Label transition_call;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
Register too_far = scratch2;
|
||||
Register pointer_reg = feedback;
|
||||
__ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
|
||||
// +-----+------+------+-----+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
|
||||
__ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ ldr(cached_map, MemOperand(pointer_reg));
|
||||
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ cmp(receiver_map, cached_map);
|
||||
__ b(ne, &prepare_next);
|
||||
// Is it a transitioning store?
|
||||
__ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
|
||||
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
|
||||
__ b(ne, &transition_call);
|
||||
__ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
__ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
__ bind(&transition_call);
|
||||
__ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(too_far, miss);
|
||||
|
||||
__ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
|
||||
// Load the map into the correct register.
|
||||
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ mov(feedback, too_far);
|
||||
|
||||
__ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
|
||||
__ cmp(pointer_reg, too_far);
|
||||
__ b(lt, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ jmp(miss);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // r2
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
|
||||
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
|
||||
Register feedback = r5;
|
||||
Register receiver_map = r6;
|
||||
Register scratch1 = r9;
|
||||
|
||||
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
|
||||
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
|
||||
|
||||
// Try to quickly handle the monomorphic case without knowing for sure
|
||||
// if we have a weak cell in feedback. We do know it's safe to look
|
||||
// at WeakCell::kValueOffset.
|
||||
Label try_array, load_smi_map, compare_map;
|
||||
Label not_array, miss;
|
||||
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
|
||||
scratch1, &compare_map, &load_smi_map, &try_array);
|
||||
|
||||
__ bind(&try_array);
|
||||
// Is it a fixed array?
|
||||
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
|
||||
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
|
||||
__ b(ne, ¬_array);
|
||||
|
||||
// We have a polymorphic element handler.
|
||||
Label polymorphic, try_poly_name;
|
||||
__ bind(&polymorphic);
|
||||
|
||||
// We are using register r8, which is used for the embedded constant pool
|
||||
// when FLAG_enable_embedded_constant_pool is true.
|
||||
DCHECK(!FLAG_enable_embedded_constant_pool);
|
||||
Register scratch2 = r8;
|
||||
|
||||
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
|
||||
&miss);
|
||||
|
||||
__ bind(¬_array);
|
||||
// Is it generic?
|
||||
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
|
||||
__ b(ne, &try_poly_name);
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ cmp(key, feedback);
|
||||
__ b(ne, &miss);
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
|
||||
__ ldr(feedback,
|
||||
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
|
||||
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
|
||||
&miss);
|
||||
|
||||
__ bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ jmp(&compare_map);
|
||||
}
|
||||
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
||||
if (masm->isolate()->function_entry_hook() != NULL) {
|
||||
ProfileEntryHookStub stub(masm->isolate());
|
||||
|
@ -317,337 +317,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
Register scratch_elements = r4;
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
scratch_elements));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(
|
||||
receiver, scratch_elements, allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
r9,
|
||||
kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Register lr contains the return address.
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map, done;
|
||||
Register elements = r4;
|
||||
Register length = r5;
|
||||
Register array = r6;
|
||||
Register array_end = array;
|
||||
|
||||
// target_map parameter can be clobbered.
|
||||
Register scratch1 = target_map;
|
||||
Register scratch2 = r9;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, length, array, scratch2));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ b(eq, &only_change_map);
|
||||
|
||||
__ push(lr);
|
||||
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
// Use lr as a temporary register.
|
||||
__ mov(lr, Operand(length, LSL, 2));
|
||||
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
|
||||
__ sub(array, array, Operand(kHeapObjectTag));
|
||||
// array: destination FixedDoubleArray, not tagged as heap object.
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// r4: source FixedArray.
|
||||
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
// Update receiver's map.
|
||||
__ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch2,
|
||||
kLRHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ add(scratch1, array, Operand(kHeapObjectTag));
|
||||
__ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
JSObject::kElementsOffset,
|
||||
scratch1,
|
||||
scratch2,
|
||||
kLRHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ add(array_end, scratch2, Operand(length, LSL, 2));
|
||||
|
||||
// Repurpose registers no longer in use.
|
||||
Register hole_lower = elements;
|
||||
Register hole_upper = length;
|
||||
|
||||
__ mov(hole_lower, Operand(kHoleNanLower32));
|
||||
__ mov(hole_upper, Operand(kHoleNanUpper32));
|
||||
// scratch1: begin of source FixedArray element fields, not tagged
|
||||
// hole_lower: kHoleNanLower32
|
||||
// hole_upper: kHoleNanUpper32
|
||||
// array_end: end of destination FixedDoubleArray, not tagged
|
||||
// scratch2: begin of FixedDoubleArray element fields, not tagged
|
||||
|
||||
__ b(&entry);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch2,
|
||||
kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ b(&done);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ pop(lr);
|
||||
__ b(fail);
|
||||
|
||||
// Convert and copy elements.
|
||||
__ bind(&loop);
|
||||
__ ldr(lr, MemOperand(scratch1, 4, PostIndex));
|
||||
// lr: current element
|
||||
__ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
|
||||
|
||||
// Normal smi, convert to double and store.
|
||||
__ vmov(s0, lr);
|
||||
__ vcvt_f64_s32(d0, s0);
|
||||
__ vstr(d0, scratch2, 0);
|
||||
__ add(scratch2, scratch2, Operand(8));
|
||||
__ b(&entry);
|
||||
|
||||
// Hole found, store the-hole NaN.
|
||||
__ bind(&convert_hole);
|
||||
if (FLAG_debug_code) {
|
||||
// Restore a "smi-untagged" heap object.
|
||||
__ SmiTag(lr);
|
||||
__ orr(lr, lr, Operand(1));
|
||||
__ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, kObjectFoundInSmiOnlyArray);
|
||||
}
|
||||
__ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
|
||||
|
||||
__ bind(&entry);
|
||||
__ cmp(scratch2, array_end);
|
||||
__ b(lt, &loop);
|
||||
|
||||
__ pop(lr);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Register lr contains the return address.
|
||||
Label entry, loop, convert_hole, gc_required, only_change_map;
|
||||
Register elements = r4;
|
||||
Register array = r6;
|
||||
Register length = r5;
|
||||
Register scratch = r9;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, array, length, scratch));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ b(eq, &only_change_map);
|
||||
|
||||
__ push(lr);
|
||||
__ Push(target_map, receiver, key, value);
|
||||
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// elements: source FixedDoubleArray
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedArray.
|
||||
// Re-use value and target_map registers, as they have been saved on the
|
||||
// stack.
|
||||
Register array_size = value;
|
||||
Register allocate_scratch = target_map;
|
||||
__ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ add(array_size, array_size, Operand(length, LSL, 1));
|
||||
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
|
||||
NO_ALLOCATION_FLAGS);
|
||||
// array: destination FixedArray, tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
|
||||
__ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
__ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
__ sub(array, array, Operand(kHeapObjectTag));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
Register src_elements = elements;
|
||||
Register dst_elements = target_map;
|
||||
Register dst_end = length;
|
||||
Register heap_number_map = scratch;
|
||||
__ add(src_elements, elements,
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
|
||||
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
|
||||
__ add(dst_end, dst_elements, Operand(length, LSL, 1));
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
Label initialization_loop, initialization_loop_entry;
|
||||
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
||||
__ b(&initialization_loop_entry);
|
||||
__ bind(&initialization_loop);
|
||||
__ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
|
||||
__ bind(&initialization_loop_entry);
|
||||
__ cmp(dst_elements, dst_end);
|
||||
__ b(lt, &initialization_loop);
|
||||
|
||||
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
|
||||
__ add(array, array, Operand(kHeapObjectTag));
|
||||
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
// Using offsetted addresses in src_elements to fully take advantage of
|
||||
// post-indexing.
|
||||
// dst_elements: begin of destination FixedArray element fields, not tagged
|
||||
// src_elements: begin of source FixedDoubleArray element fields,
|
||||
// not tagged, +4
|
||||
// dst_end: end of destination FixedArray, not tagged
|
||||
// array: destination FixedArray
|
||||
// heap_number_map: heap number map
|
||||
__ b(&entry);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ Pop(target_map, receiver, key, value);
|
||||
__ pop(lr);
|
||||
__ b(fail);
|
||||
|
||||
__ bind(&loop);
|
||||
Register upper_bits = key;
|
||||
__ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
|
||||
// upper_bits: current element's upper 32 bit
|
||||
// src_elements: address of next element's upper 32 bit
|
||||
__ cmp(upper_bits, Operand(kHoleNanUpper32));
|
||||
__ b(eq, &convert_hole);
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
Register heap_number = receiver;
|
||||
Register scratch2 = value;
|
||||
__ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
|
||||
&gc_required);
|
||||
// heap_number: new heap number
|
||||
__ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
|
||||
__ Strd(scratch2, upper_bits,
|
||||
FieldMemOperand(heap_number, HeapNumber::kValueOffset));
|
||||
__ mov(scratch2, dst_elements);
|
||||
__ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
|
||||
__ RecordWrite(array,
|
||||
scratch2,
|
||||
heap_number,
|
||||
kLRHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ b(&entry);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
|
||||
__ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
|
||||
|
||||
__ bind(&entry);
|
||||
__ cmp(dst_elements, dst_end);
|
||||
__ b(lt, &loop);
|
||||
|
||||
__ Pop(target_map, receiver, key, value);
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
JSObject::kElementsOffset,
|
||||
array,
|
||||
scratch,
|
||||
kLRHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ pop(lr);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// Update receiver's map.
|
||||
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch,
|
||||
kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Register string,
|
||||
Register index,
|
||||
|
@ -2314,68 +2314,6 @@ void MacroAssembler::CompareRoot(Register obj,
|
||||
cmp(obj, ip);
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
b(ls, fail);
|
||||
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
|
||||
b(hi, fail);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckFastSmiElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
b(hi, fail);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::StoreNumberToDoubleElements(
|
||||
Register value_reg,
|
||||
Register key_reg,
|
||||
Register elements_reg,
|
||||
Register scratch1,
|
||||
LowDwVfpRegister double_scratch,
|
||||
Label* fail,
|
||||
int elements_offset) {
|
||||
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
|
||||
Label smi_value, store;
|
||||
|
||||
// Handle smi values specially.
|
||||
JumpIfSmi(value_reg, &smi_value);
|
||||
|
||||
// Ensure that the object is a heap number
|
||||
CheckMap(value_reg,
|
||||
scratch1,
|
||||
isolate()->factory()->heap_number_map(),
|
||||
fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
||||
VFPCanonicalizeNaN(double_scratch);
|
||||
b(&store);
|
||||
|
||||
bind(&smi_value);
|
||||
SmiToDouble(double_scratch, value_reg);
|
||||
|
||||
bind(&store);
|
||||
add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
|
||||
vstr(double_scratch,
|
||||
FieldMemOperand(scratch1,
|
||||
FixedDoubleArray::kHeaderSize - elements_offset));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CompareMap(Register obj,
|
||||
Register scratch,
|
||||
Handle<Map> map,
|
||||
@ -2878,28 +2816,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
ldr(scratch, NativeContextMemOperand());
|
||||
ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
|
||||
cmp(map_in_out, ip);
|
||||
b(ne, no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
ldr(map_in_out,
|
||||
ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
|
||||
ldr(dst, NativeContextMemOperand());
|
||||
ldr(dst, ContextMemOperand(dst, index));
|
||||
@ -2962,15 +2878,6 @@ void MacroAssembler::UntagAndJumpIfSmi(
|
||||
b(cc, smi_case); // Shifter carry is not set for a smi.
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(
|
||||
Register dst, Register src, Label* non_smi_case) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
SmiUntag(dst, src, SetCC);
|
||||
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfEitherSmi(Register reg1,
|
||||
Register reg2,
|
||||
Label* on_either_smi) {
|
||||
@ -3861,45 +3768,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
|
||||
return no_reg;
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
||||
Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
// scratch contained elements pointer.
|
||||
mov(current, object);
|
||||
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
CompareRoot(current, Heap::kNullValueRootIndex);
|
||||
b(eq, &end);
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
bind(&loop_again);
|
||||
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
|
||||
cmp(scratch1, Operand(JS_OBJECT_TYPE));
|
||||
b(lo, found);
|
||||
|
||||
ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
|
||||
b(eq, found);
|
||||
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
CompareRoot(current, Heap::kNullValueRootIndex);
|
||||
b(ne, &loop_again);
|
||||
|
||||
bind(&end);
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
bool AreAliased(Register reg1,
|
||||
Register reg2,
|
||||
|
@ -635,17 +635,6 @@ class MacroAssembler: public Assembler {
|
||||
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
|
||||
}
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
void LoadNativeContextSlot(int index, Register dst);
|
||||
|
||||
// Load the initial map from the global function. The registers
|
||||
@ -884,29 +873,6 @@ class MacroAssembler: public Assembler {
|
||||
Register type_reg,
|
||||
InstanceType type);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object has fast smi only
|
||||
// elements. Jump to the specified label if it does not.
|
||||
void CheckFastSmiElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail);
|
||||
|
||||
// Check to see if maybe_number can be stored as a double in
|
||||
// FastDoubleElements. If it can, store it at the index specified by key in
|
||||
// the FastDoubleElements array elements. Otherwise jump to fail.
|
||||
void StoreNumberToDoubleElements(Register value_reg,
|
||||
Register key_reg,
|
||||
Register elements_reg,
|
||||
Register scratch1,
|
||||
LowDwVfpRegister double_scratch,
|
||||
Label* fail,
|
||||
int elements_offset = 0);
|
||||
|
||||
// Compare an object's map with the specified map and its transitioned
|
||||
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
|
||||
// set with result of map compare. If multiple map compares are required, the
|
||||
@ -1287,10 +1253,6 @@ class MacroAssembler: public Assembler {
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
||||
|
||||
// Untag the source value into destination and jump if source is not a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
// Test if the register contains a smi (Z == 0 (eq) if true).
|
||||
inline void SmiTst(Register value) {
|
||||
tst(value, Operand(kSmiTagMask));
|
||||
@ -1464,20 +1426,6 @@ class MacroAssembler: public Assembler {
|
||||
Register scratch_reg,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
|
||||
&no_memento_found);
|
||||
b(eq, memento_found);
|
||||
bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
// Loads the constant pool pointer (pp) register.
|
||||
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
|
||||
Register code_target_address);
|
||||
|
@ -2980,234 +2980,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, bool is_polymorphic,
|
||||
Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label load_smi_map, compare_map;
|
||||
Label start_polymorphic;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
|
||||
__ Ldr(cached_map,
|
||||
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Cmp(receiver_map, cached_map);
|
||||
__ B(ne, &start_polymorphic);
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
__ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Jump(feedback);
|
||||
|
||||
Register length = scratch2;
|
||||
__ Bind(&start_polymorphic);
|
||||
__ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
if (!is_polymorphic) {
|
||||
__ Cmp(length, Operand(Smi::FromInt(2)));
|
||||
__ B(eq, miss);
|
||||
}
|
||||
|
||||
Register too_far = length;
|
||||
Register pointer_reg = feedback;
|
||||
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ Add(too_far, feedback,
|
||||
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
|
||||
__ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(pointer_reg, feedback,
|
||||
FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
|
||||
|
||||
__ Bind(&next_loop);
|
||||
__ Ldr(cached_map, MemOperand(pointer_reg));
|
||||
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Cmp(receiver_map, cached_map);
|
||||
__ B(ne, &prepare_next);
|
||||
__ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
|
||||
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Jump(handler);
|
||||
|
||||
__ Bind(&prepare_next);
|
||||
__ Add(pointer_reg, pointer_reg, kPointerSize * 2);
|
||||
__ Cmp(pointer_reg, too_far);
|
||||
__ B(lt, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ jmp(miss);
|
||||
}
|
||||
|
||||
|
||||
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
|
||||
Register receiver_map, Register feedback,
|
||||
Register vector, Register slot,
|
||||
Register scratch, Label* compare_map,
|
||||
Label* load_smi_map, Label* try_array) {
|
||||
__ JumpIfSmi(receiver, load_smi_map);
|
||||
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ bind(compare_map);
|
||||
Register cached_map = scratch;
|
||||
// Move the weak map into the weak_cell register.
|
||||
__ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
|
||||
__ Cmp(cached_map, receiver_map);
|
||||
__ B(ne, try_array);
|
||||
|
||||
Register handler = feedback;
|
||||
__ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
|
||||
__ Ldr(handler,
|
||||
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
|
||||
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Jump(handler);
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
|
||||
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
Label transition_call;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
Register too_far = scratch2;
|
||||
Register pointer_reg = feedback;
|
||||
|
||||
__ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
|
||||
// +-----+------+------+-----+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ Add(too_far, feedback,
|
||||
Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
|
||||
__ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(pointer_reg, feedback,
|
||||
FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
|
||||
|
||||
__ Bind(&next_loop);
|
||||
__ Ldr(cached_map, MemOperand(pointer_reg));
|
||||
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Cmp(receiver_map, cached_map);
|
||||
__ B(ne, &prepare_next);
|
||||
// Is it a transitioning store?
|
||||
__ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
|
||||
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
|
||||
__ B(ne, &transition_call);
|
||||
|
||||
__ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
__ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Jump(pointer_reg);
|
||||
|
||||
__ Bind(&transition_call);
|
||||
__ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(too_far, miss);
|
||||
|
||||
__ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
// Load the map into the correct register.
|
||||
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ mov(feedback, too_far);
|
||||
__ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Jump(receiver_map);
|
||||
|
||||
__ Bind(&prepare_next);
|
||||
__ Add(pointer_reg, pointer_reg, kPointerSize * 3);
|
||||
__ Cmp(pointer_reg, too_far);
|
||||
__ B(lt, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ jmp(miss);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // x2
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
|
||||
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
|
||||
Register feedback = x5;
|
||||
Register receiver_map = x6;
|
||||
Register scratch1 = x7;
|
||||
|
||||
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
|
||||
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
|
||||
|
||||
// Try to quickly handle the monomorphic case without knowing for sure
|
||||
// if we have a weak cell in feedback. We do know it's safe to look
|
||||
// at WeakCell::kValueOffset.
|
||||
Label try_array, load_smi_map, compare_map;
|
||||
Label not_array, miss;
|
||||
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
|
||||
scratch1, &compare_map, &load_smi_map, &try_array);
|
||||
|
||||
__ Bind(&try_array);
|
||||
// Is it a fixed array?
|
||||
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
|
||||
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, ¬_array);
|
||||
|
||||
// We have a polymorphic element handler.
|
||||
Label try_poly_name;
|
||||
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
|
||||
|
||||
__ Bind(¬_array);
|
||||
// Is it generic?
|
||||
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
|
||||
&try_poly_name);
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ Bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ Cmp(key, feedback);
|
||||
__ B(ne, &miss);
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
|
||||
__ Ldr(feedback,
|
||||
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
|
||||
HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
|
||||
|
||||
__ Bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
|
||||
__ Bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ jmp(&compare_map);
|
||||
}
|
||||
|
||||
|
||||
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
|
||||
// a "Push lr" instruction, followed by a call.
|
||||
static const unsigned int kProfileEntryHookCallSize =
|
||||
|
@ -40,272 +40,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
// -------------------------------------------------------------------------
|
||||
// Code generators
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
ASM_LOCATION(
|
||||
"ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
|
||||
allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
x10,
|
||||
kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
|
||||
Label gc_required, only_change_map;
|
||||
Register elements = x4;
|
||||
Register length = x5;
|
||||
Register array_size = x6;
|
||||
Register array = x7;
|
||||
|
||||
Register scratch = x6;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, length, array_size, array));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
|
||||
|
||||
__ Push(lr);
|
||||
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
|
||||
FixedArray::kLengthOffset));
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
__ Lsl(array_size, length, kDoubleSizeLog2);
|
||||
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
|
||||
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
|
||||
// Register array is non-tagged heap object.
|
||||
|
||||
// Set the destination FixedDoubleArray's length and map.
|
||||
Register map_root = array_size;
|
||||
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ SmiTag(x11, length);
|
||||
__ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
__ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
|
||||
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ Move(x10, array);
|
||||
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
|
||||
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Prepare for conversion loop.
|
||||
Register src_elements = x10;
|
||||
Register dst_elements = x11;
|
||||
Register dst_end = x12;
|
||||
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
|
||||
|
||||
FPRegister nan_d = d1;
|
||||
__ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
|
||||
|
||||
Label entry, done;
|
||||
__ B(&entry);
|
||||
|
||||
__ Bind(&only_change_map);
|
||||
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ B(&done);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ Bind(&gc_required);
|
||||
__ Pop(lr);
|
||||
__ B(fail);
|
||||
|
||||
// Iterate over the array, copying and coverting smis to doubles. If an
|
||||
// element is non-smi, write a hole to the destination.
|
||||
{
|
||||
Label loop;
|
||||
__ Bind(&loop);
|
||||
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
|
||||
__ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
|
||||
__ Tst(x13, kSmiTagMask);
|
||||
__ Fcsel(d0, d0, nan_d, eq);
|
||||
__ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
|
||||
|
||||
__ Bind(&entry);
|
||||
__ Cmp(dst_elements, dst_end);
|
||||
__ B(lt, &loop);
|
||||
}
|
||||
|
||||
__ Pop(lr);
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
|
||||
Register elements = x4;
|
||||
Register array_size = x6;
|
||||
Register array = x7;
|
||||
Register length = x5;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, array_size, array, length));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
Label only_change_map;
|
||||
|
||||
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
|
||||
|
||||
__ Push(lr);
|
||||
// TODO(all): These registers may not need to be pushed. Examine
|
||||
// RecordWriteStub and check whether it's needed.
|
||||
__ Push(target_map, receiver, key, value);
|
||||
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
|
||||
FixedArray::kLengthOffset));
|
||||
// Allocate new FixedArray.
|
||||
Label gc_required;
|
||||
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
|
||||
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
|
||||
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
Register map_root = array_size;
|
||||
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
|
||||
__ SmiTag(x11, length);
|
||||
__ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
__ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
Register src_elements = x10;
|
||||
Register dst_elements = x11;
|
||||
Register dst_end = x12;
|
||||
Register the_hole = x14;
|
||||
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
|
||||
__ Add(src_elements, elements,
|
||||
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
Label initialization_loop, initialization_loop_entry;
|
||||
__ B(&initialization_loop_entry);
|
||||
__ bind(&initialization_loop);
|
||||
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
|
||||
__ bind(&initialization_loop_entry);
|
||||
__ Cmp(dst_elements, dst_end);
|
||||
__ B(lt, &initialization_loop);
|
||||
|
||||
__ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
|
||||
Register heap_num_map = x15;
|
||||
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
|
||||
|
||||
Label entry;
|
||||
__ B(&entry);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ Bind(&gc_required);
|
||||
__ Pop(value, key, receiver, target_map);
|
||||
__ Pop(lr);
|
||||
__ B(fail);
|
||||
|
||||
{
|
||||
Label loop, convert_hole;
|
||||
__ Bind(&loop);
|
||||
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
|
||||
__ Cmp(x13, kHoleNanInt64);
|
||||
__ B(eq, &convert_hole);
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
Register heap_num = length;
|
||||
Register scratch = array_size;
|
||||
Register scratch2 = elements;
|
||||
__ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
|
||||
x13, heap_num_map);
|
||||
__ Mov(x13, dst_elements);
|
||||
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
|
||||
__ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
|
||||
__ B(&entry);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ Bind(&convert_hole);
|
||||
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
|
||||
|
||||
__ Bind(&entry);
|
||||
__ Cmp(dst_elements, dst_end);
|
||||
__ B(lt, &loop);
|
||||
}
|
||||
|
||||
__ Pop(value, key, receiver, target_map);
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
|
||||
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ Pop(lr);
|
||||
|
||||
__ Bind(&only_change_map);
|
||||
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
|
||||
USE(isolate);
|
||||
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
|
||||
|
@ -3664,59 +3664,6 @@ void MacroAssembler::TestAndSplit(const Register& reg,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
// If cond==ls, set cond=hi, otherwise compare.
|
||||
Ccmp(scratch,
|
||||
Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
|
||||
B(hi, fail);
|
||||
}
|
||||
|
||||
|
||||
// Note: The ARM version of this clobbers elements_reg, but this version does
|
||||
// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
|
||||
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
|
||||
Register key_reg,
|
||||
Register elements_reg,
|
||||
Register scratch1,
|
||||
FPRegister fpscratch1,
|
||||
Label* fail,
|
||||
int elements_offset) {
|
||||
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
|
||||
Label store_num;
|
||||
|
||||
// Speculatively convert the smi to a double - all smis can be exactly
|
||||
// represented as a double.
|
||||
SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
|
||||
|
||||
// If value_reg is a smi, we're done.
|
||||
JumpIfSmi(value_reg, &store_num);
|
||||
|
||||
// Ensure that the object is a heap number.
|
||||
JumpIfNotHeapNumber(value_reg, fail);
|
||||
|
||||
Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
||||
|
||||
// Canonicalize NaNs.
|
||||
CanonicalizeNaN(fpscratch1);
|
||||
|
||||
// Store the result.
|
||||
Bind(&store_num);
|
||||
Add(scratch1, elements_reg,
|
||||
Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
|
||||
Str(fpscratch1,
|
||||
FieldMemOperand(scratch1,
|
||||
FixedDoubleArray::kHeaderSize - elements_offset));
|
||||
}
|
||||
|
||||
|
||||
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
|
||||
return has_frame_ || !stub->SometimesSetsUpAFrame();
|
||||
}
|
||||
@ -4276,39 +4223,6 @@ void MacroAssembler::JumpIfBlack(Register object,
|
||||
HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
||||
Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!AreAliased(object, scratch0, scratch1));
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
// Scratch contains elements pointer.
|
||||
Mov(current, object);
|
||||
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
Bind(&loop_again);
|
||||
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
|
||||
B(lo, found);
|
||||
Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
|
||||
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
|
||||
|
||||
Bind(&end);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
|
||||
Register shift_scratch, Register load_scratch,
|
||||
Register length_scratch,
|
||||
@ -4471,30 +4385,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
|
||||
TmpList()->set_list(old_tmp_list);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
Ldr(scratch1, NativeContextMemOperand());
|
||||
Ldr(scratch2,
|
||||
ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
|
||||
Cmp(map_in_out, scratch2);
|
||||
B(ne, no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
Ldr(map_in_out,
|
||||
ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
|
||||
Ldr(dst, NativeContextMemOperand());
|
||||
Ldr(dst, ContextMemOperand(dst, index));
|
||||
|
@ -1566,21 +1566,6 @@ class MacroAssembler : public Assembler {
|
||||
Label* if_any_set,
|
||||
Label* fall_through);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
|
||||
|
||||
// Check to see if number can be stored as a double in FastDoubleElements.
|
||||
// If it can, store it at the index specified by key_reg in the array,
|
||||
// otherwise jump to fail.
|
||||
void StoreNumberToDoubleElements(Register value_reg,
|
||||
Register key_reg,
|
||||
Register elements_reg,
|
||||
Register scratch1,
|
||||
FPRegister fpscratch1,
|
||||
Label* fail,
|
||||
int elements_offset = 0);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Inline caching support.
|
||||
|
||||
@ -1624,17 +1609,6 @@ class MacroAssembler : public Assembler {
|
||||
Register scratch2,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
|
||||
&no_memento_found);
|
||||
B(eq, memento_found);
|
||||
Bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// The stack pointer has to switch between csp and jssp when setting up and
|
||||
// destroying the exit frame. Hence preserving/restoring the registers is
|
||||
// slightly more complicated than simple push/pop operations.
|
||||
@ -1902,18 +1876,6 @@ class MacroAssembler : public Assembler {
|
||||
// Print a message to stderr and abort execution.
|
||||
void Abort(BailoutReason reason);
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* no_map_match);
|
||||
|
||||
void LoadNativeContextSlot(int index, Register dst);
|
||||
|
||||
// Load the initial map from the global function. The registers function and
|
||||
@ -2002,10 +1964,6 @@ class MacroAssembler : public Assembler {
|
||||
// sequence is a code age sequence (emitted by EmitCodeAgeSequence).
|
||||
static bool IsYoungSequence(Isolate* isolate, byte* sequence);
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
// Perform necessary maintenance operations before a push or after a pop.
|
||||
//
|
||||
// Note that size is specified in bytes.
|
||||
|
@ -139,7 +139,6 @@ namespace internal {
|
||||
V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR") \
|
||||
V(kNotEnoughVirtualRegistersRegalloc, \
|
||||
"Not enough virtual registers (regalloc)") \
|
||||
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
|
||||
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
|
||||
V(kOffsetOutOfRange, "Offset out of range") \
|
||||
V(kOperandIsANumber, "Operand is a number") \
|
||||
|
@ -24,14 +24,6 @@ void Builtins::Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
|
||||
KeyedLoadIC::GenerateRuntimeGetProperty(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
|
||||
KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
|
||||
}
|
||||
|
||||
void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
|
||||
KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
|
||||
}
|
||||
|
||||
void Builtins::Generate_KeyedStoreIC_Megamorphic_TF(
|
||||
compiler::CodeAssemblerState* state) {
|
||||
KeyedStoreGenericGenerator::Generate(state, SLOPPY);
|
||||
|
@ -183,9 +183,6 @@ namespace internal {
|
||||
LoadWithVector) \
|
||||
ASM(KeyedLoadIC_Miss) \
|
||||
ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC) \
|
||||
ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState) \
|
||||
ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, \
|
||||
StoreICState::kStrictModeState) \
|
||||
TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState, \
|
||||
StoreWithVector) \
|
||||
TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC, \
|
||||
|
@ -101,39 +101,25 @@ Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
|
||||
// static
|
||||
Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
|
||||
LanguageMode language_mode) {
|
||||
if (FLAG_tf_store_ic_stub) {
|
||||
KeyedStoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
|
||||
return make_callable(stub);
|
||||
}
|
||||
KeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
|
||||
KeyedStoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
|
||||
return make_callable(stub);
|
||||
}
|
||||
|
||||
// static
|
||||
Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
|
||||
LanguageMode language_mode) {
|
||||
if (FLAG_tf_store_ic_stub) {
|
||||
KeyedStoreICTFStub stub(isolate, StoreICState(language_mode));
|
||||
return make_callable(stub);
|
||||
}
|
||||
KeyedStoreICStub stub(isolate, StoreICState(language_mode));
|
||||
KeyedStoreICTFStub stub(isolate, StoreICState(language_mode));
|
||||
return make_callable(stub);
|
||||
}
|
||||
|
||||
// static
|
||||
Callable CodeFactory::KeyedStoreIC_Megamorphic(Isolate* isolate,
|
||||
LanguageMode language_mode) {
|
||||
if (FLAG_tf_store_ic_stub) {
|
||||
return Callable(
|
||||
language_mode == STRICT
|
||||
? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict_TF()
|
||||
: isolate->builtins()->KeyedStoreIC_Megamorphic_TF(),
|
||||
StoreWithVectorDescriptor(isolate));
|
||||
}
|
||||
return Callable(language_mode == STRICT
|
||||
? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
|
||||
: isolate->builtins()->KeyedStoreIC_Megamorphic(),
|
||||
StoreWithVectorDescriptor(isolate));
|
||||
return Callable(
|
||||
language_mode == STRICT
|
||||
? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict_TF()
|
||||
: isolate->builtins()->KeyedStoreIC_Megamorphic_TF(),
|
||||
StoreWithVectorDescriptor(isolate));
|
||||
}
|
||||
|
||||
// static
|
||||
|
@ -44,7 +44,6 @@ class ObjectLiteral;
|
||||
V(StoreBufferOverflow) \
|
||||
V(StoreElement) \
|
||||
V(SubString) \
|
||||
V(KeyedStoreIC) \
|
||||
V(LoadGlobalIC) \
|
||||
V(FastNewObject) \
|
||||
V(FastNewRestParameter) \
|
||||
@ -59,7 +58,6 @@ class ObjectLiteral;
|
||||
/* version of the corresponding stub is */ \
|
||||
/* used universally */ \
|
||||
V(CallICTrampoline) \
|
||||
V(KeyedStoreICTrampoline) \
|
||||
/* --- HydrogenCodeStubs --- */ \
|
||||
/* These builtins w/ JS linkage are */ \
|
||||
/* just fast-cases of C++ builtins. They */ \
|
||||
@ -1999,27 +1997,6 @@ class StoreICTrampolineStub : public TurboFanCodeStub {
|
||||
DEFINE_TURBOFAN_CODE_STUB(StoreICTrampoline, TurboFanCodeStub);
|
||||
};
|
||||
|
||||
class KeyedStoreICTrampolineStub : public PlatformCodeStub {
|
||||
public:
|
||||
KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
|
||||
: PlatformCodeStub(isolate) {
|
||||
minor_key_ = state.GetExtraICState();
|
||||
}
|
||||
|
||||
Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
|
||||
|
||||
ExtraICState GetExtraICState() const final {
|
||||
return static_cast<ExtraICState>(minor_key_);
|
||||
}
|
||||
|
||||
protected:
|
||||
StoreICState state() const { return StoreICState(GetExtraICState()); }
|
||||
|
||||
private:
|
||||
DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
|
||||
DEFINE_PLATFORM_CODE_STUB(KeyedStoreICTrampoline, PlatformCodeStub);
|
||||
};
|
||||
|
||||
class KeyedStoreICTrampolineTFStub : public StoreICTrampolineStub {
|
||||
public:
|
||||
KeyedStoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
|
||||
@ -2142,28 +2119,6 @@ class StoreICStub : public TurboFanCodeStub {
|
||||
DEFINE_TURBOFAN_CODE_STUB(StoreIC, TurboFanCodeStub);
|
||||
};
|
||||
|
||||
class KeyedStoreICStub : public PlatformCodeStub {
|
||||
public:
|
||||
KeyedStoreICStub(Isolate* isolate, const StoreICState& state)
|
||||
: PlatformCodeStub(isolate) {
|
||||
minor_key_ = state.GetExtraICState();
|
||||
}
|
||||
|
||||
void GenerateForTrampoline(MacroAssembler* masm);
|
||||
|
||||
Code::Kind GetCodeKind() const final { return Code::KEYED_STORE_IC; }
|
||||
|
||||
ExtraICState GetExtraICState() const final {
|
||||
return static_cast<ExtraICState>(minor_key_);
|
||||
}
|
||||
|
||||
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
|
||||
DEFINE_PLATFORM_CODE_STUB(KeyedStoreIC, PlatformCodeStub);
|
||||
|
||||
protected:
|
||||
void GenerateImpl(MacroAssembler* masm, bool in_frame);
|
||||
};
|
||||
|
||||
class KeyedStoreICTFStub : public StoreICStub {
|
||||
public:
|
||||
KeyedStoreICTFStub(Isolate* isolate, const StoreICState& state)
|
||||
|
@ -104,43 +104,6 @@ V8_EXPORT_PRIVATE double modulo(double x, double y);
|
||||
double fast_sqrt(double input, Isolate* isolate);
|
||||
void lazily_initialize_fast_sqrt(Isolate* isolate);
|
||||
|
||||
|
||||
class ElementsTransitionGenerator : public AllStatic {
|
||||
public:
|
||||
// If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
|
||||
// |allocation_memento_found| may be NULL.
|
||||
static void GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found);
|
||||
static void GenerateSmiToDouble(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail);
|
||||
static void GenerateDoubleToObject(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail);
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
|
||||
};
|
||||
|
||||
static const int kNumberDictionaryProbes = 4;
|
||||
|
||||
|
||||
class CodeAgingHelper {
|
||||
public:
|
||||
explicit CodeAgingHelper(Isolate* isolate);
|
||||
|
@ -3016,329 +3016,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
// value is on the stack already.
|
||||
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
|
||||
Register key, Register vector,
|
||||
Register slot, Register feedback,
|
||||
bool is_polymorphic, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next, next_loop, prepare_next;
|
||||
Label load_smi_map, compare_map;
|
||||
Label start_polymorphic;
|
||||
Label pop_and_miss;
|
||||
|
||||
__ push(receiver);
|
||||
// Value, vector and slot are passed on the stack, so no need to save/restore
|
||||
// them.
|
||||
|
||||
Register receiver_map = receiver;
|
||||
Register cached_map = vector;
|
||||
|
||||
// Receiver might not be a heap object.
|
||||
__ JumpIfSmi(receiver, &load_smi_map);
|
||||
__ mov(receiver_map, FieldOperand(receiver, 0));
|
||||
__ bind(&compare_map);
|
||||
__ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
|
||||
// A named keyed store might have a 2 element array, all other cases can count
|
||||
// on an array with at least 2 {map, handler} pairs, so they can go right
|
||||
// into polymorphic array handling.
|
||||
__ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &start_polymorphic);
|
||||
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
|
||||
__ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ pop(receiver);
|
||||
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
|
||||
__ jmp(handler);
|
||||
|
||||
// Polymorphic, we have to loop from 2 to N
|
||||
__ bind(&start_polymorphic);
|
||||
__ push(key);
|
||||
Register counter = key;
|
||||
__ mov(counter, Immediate(Smi::FromInt(2)));
|
||||
|
||||
if (!is_polymorphic) {
|
||||
// If is_polymorphic is false, we may only have a two element array.
|
||||
// Check against length now in that case.
|
||||
__ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
|
||||
__ j(greater_equal, &pop_and_miss);
|
||||
}
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &prepare_next);
|
||||
__ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
__ jmp(handler);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ add(counter, Immediate(Smi::FromInt(2)));
|
||||
__ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
|
||||
__ j(less, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ bind(&pop_and_miss);
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
__ jmp(miss);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ jmp(&compare_map);
|
||||
}
|
||||
|
||||
|
||||
static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
|
||||
Register key, Register vector,
|
||||
Register slot, Register weak_cell,
|
||||
Label* miss) {
|
||||
// The store ic value is on the stack.
|
||||
DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
|
||||
|
||||
// feedback initially contains the feedback array
|
||||
Label compare_smi_map;
|
||||
|
||||
// Move the weak map into the weak_cell register.
|
||||
Register ic_map = weak_cell;
|
||||
__ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
|
||||
|
||||
// Receiver might not be a heap object.
|
||||
__ JumpIfSmi(receiver, &compare_smi_map);
|
||||
__ cmp(ic_map, FieldOperand(receiver, 0));
|
||||
__ j(not_equal, miss);
|
||||
__ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
|
||||
// jump to the handler.
|
||||
__ jmp(weak_cell);
|
||||
|
||||
// In microbenchmarks, it made sense to unroll this code so that the call to
|
||||
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
|
||||
__ bind(&compare_smi_map);
|
||||
__ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, miss);
|
||||
__ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
|
||||
// jump to the handler.
|
||||
__ jmp(weak_cell);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
|
||||
static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
|
||||
Register receiver, Register key,
|
||||
Register vector, Register slot,
|
||||
Register feedback, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next, next_loop, prepare_next;
|
||||
Label load_smi_map, compare_map;
|
||||
Label transition_call;
|
||||
Label pop_and_miss;
|
||||
|
||||
__ push(receiver);
|
||||
// Value, vector and slot are passed on the stack, so no need to save/restore
|
||||
// them.
|
||||
|
||||
Register receiver_map = receiver;
|
||||
Register cached_map = vector;
|
||||
|
||||
// Receiver might not be a heap object.
|
||||
__ JumpIfSmi(receiver, &load_smi_map);
|
||||
__ mov(receiver_map, FieldOperand(receiver, 0));
|
||||
__ bind(&compare_map);
|
||||
|
||||
// Polymorphic, we have to loop from 0 to N - 1
|
||||
__ push(key);
|
||||
// Current stack layout:
|
||||
// - esp[0] -- key
|
||||
// - esp[4] -- receiver
|
||||
// - esp[8] -- return address
|
||||
// - esp[12] -- vector
|
||||
// - esp[16] -- slot
|
||||
// - esp[20] -- value
|
||||
//
|
||||
// Required stack layout for handler call (see StoreWithVectorDescriptor):
|
||||
// - esp[0] -- return address
|
||||
// - esp[4] -- vector
|
||||
// - esp[8] -- slot
|
||||
// - esp[12] -- value
|
||||
// - receiver, key, handler in registers.
|
||||
Register counter = key;
|
||||
__ mov(counter, Immediate(Smi::kZero));
|
||||
__ bind(&next_loop);
|
||||
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &prepare_next);
|
||||
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
|
||||
__ j(not_equal, &transition_call);
|
||||
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + 2 * kPointerSize));
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
|
||||
__ jmp(feedback);
|
||||
|
||||
__ bind(&transition_call);
|
||||
// Current stack layout:
|
||||
// - esp[0] -- key
|
||||
// - esp[4] -- receiver
|
||||
// - esp[8] -- return address
|
||||
// - esp[12] -- vector
|
||||
// - esp[16] -- slot
|
||||
// - esp[20] -- value
|
||||
//
|
||||
// Required stack layout for handler call (see StoreTransitionDescriptor):
|
||||
// - esp[0] -- return address
|
||||
// - esp[4] -- vector
|
||||
// - esp[8] -- slot
|
||||
// - esp[12] -- value
|
||||
// - receiver, key, map, handler in registers.
|
||||
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + 2 * kPointerSize));
|
||||
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
|
||||
|
||||
__ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
// The weak cell may have been cleared.
|
||||
__ JumpIfSmi(cached_map, &pop_and_miss);
|
||||
DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
|
||||
|
||||
// Call store transition handler using StoreTransitionDescriptor calling
|
||||
// convention.
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
// Ensure that the transition handler we are going to call has the same
|
||||
// number of stack arguments which means that we don't have to adapt them
|
||||
// before the call.
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
|
||||
STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
|
||||
StoreWithVectorDescriptor::kValue ==
|
||||
StoreTransitionDescriptor::kParameterCount -
|
||||
StoreTransitionDescriptor::kValue);
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
|
||||
StoreWithVectorDescriptor::kSlot ==
|
||||
StoreTransitionDescriptor::kParameterCount -
|
||||
StoreTransitionDescriptor::kSlot);
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
|
||||
StoreWithVectorDescriptor::kVector ==
|
||||
StoreTransitionDescriptor::kParameterCount -
|
||||
StoreTransitionDescriptor::kVector);
|
||||
__ jmp(feedback);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ add(counter, Immediate(Smi::FromInt(3)));
|
||||
__ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
|
||||
__ j(less, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ bind(&pop_and_miss);
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
__ jmp(miss);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ jmp(&compare_map);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
|
||||
Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
|
||||
Label miss;
|
||||
|
||||
if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
|
||||
// Current stack layout:
|
||||
// - esp[8] -- value
|
||||
// - esp[4] -- slot
|
||||
// - esp[0] -- return address
|
||||
STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
|
||||
if (in_frame) {
|
||||
__ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
|
||||
// If the vector is not on the stack, then insert the vector beneath
|
||||
// return address in order to prepare for calling handler with
|
||||
// StoreWithVector calling convention.
|
||||
__ push(Operand(esp, 0));
|
||||
__ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
|
||||
__ RecordComment("]");
|
||||
} else {
|
||||
__ mov(vector, Operand(esp, 1 * kPointerSize));
|
||||
}
|
||||
__ mov(slot, Operand(esp, 2 * kPointerSize));
|
||||
}
|
||||
|
||||
Register scratch = value;
|
||||
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
|
||||
// Is it a weak cell?
|
||||
Label try_array;
|
||||
Label not_array, smi_key, key_okay;
|
||||
__ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
|
||||
__ j(not_equal, &try_array);
|
||||
HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
|
||||
|
||||
// Is it a fixed array?
|
||||
__ bind(&try_array);
|
||||
__ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
|
||||
__ j(not_equal, ¬_array);
|
||||
HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
|
||||
&miss);
|
||||
|
||||
__ bind(¬_array);
|
||||
Label try_poly_name;
|
||||
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
|
||||
__ j(not_equal, &try_poly_name);
|
||||
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ cmp(key, scratch);
|
||||
__ j(not_equal, &miss);
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
|
||||
&miss);
|
||||
|
||||
__ bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
}
|
||||
|
||||
|
||||
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(ebx);
|
||||
CallICStub stub(isolate(), state());
|
||||
|
@ -485,309 +485,6 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
Register scratch = edi;
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(
|
||||
receiver, scratch, allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Return address is on the stack.
|
||||
DCHECK(receiver.is(edx));
|
||||
DCHECK(key.is(ecx));
|
||||
DCHECK(value.is(eax));
|
||||
DCHECK(target_map.is(ebx));
|
||||
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
|
||||
__ j(equal, &only_change_map);
|
||||
|
||||
__ push(eax);
|
||||
__ push(ebx);
|
||||
__ push(esi);
|
||||
|
||||
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
// edx: receiver
|
||||
// edi: length of source FixedArray (smi-tagged)
|
||||
AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
|
||||
__ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
|
||||
REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
|
||||
|
||||
// eax: destination FixedDoubleArray
|
||||
// edi: number of elements
|
||||
// edx: receiver
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
|
||||
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
|
||||
__ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
|
||||
__ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
|
||||
__ mov(ebx, eax);
|
||||
__ RecordWriteField(edx,
|
||||
JSObject::kElementsOffset,
|
||||
ebx,
|
||||
edi,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
__ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
ExternalReference canonical_the_hole_nan_reference =
|
||||
ExternalReference::address_of_the_hole_nan();
|
||||
XMMRegister the_hole_nan = xmm1;
|
||||
__ movsd(the_hole_nan,
|
||||
Operand::StaticVariable(canonical_the_hole_nan_reference));
|
||||
__ jmp(&entry);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
|
||||
// Restore registers before jumping into runtime.
|
||||
__ pop(esi);
|
||||
__ pop(ebx);
|
||||
__ pop(eax);
|
||||
__ jmp(fail);
|
||||
|
||||
// Convert and copy elements
|
||||
// esi: source FixedArray
|
||||
__ bind(&loop);
|
||||
__ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
|
||||
// ebx: current element from source
|
||||
// edi: index of current element
|
||||
__ JumpIfNotSmi(ebx, &convert_hole);
|
||||
|
||||
// Normal smi, convert it to double and store.
|
||||
__ SmiUntag(ebx);
|
||||
__ Cvtsi2sd(xmm0, ebx);
|
||||
__ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
|
||||
xmm0);
|
||||
__ jmp(&entry);
|
||||
|
||||
// Found hole, store hole_nan_as_double instead.
|
||||
__ bind(&convert_hole);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
|
||||
__ Assert(equal, kObjectFoundInSmiOnlyArray);
|
||||
}
|
||||
|
||||
__ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
|
||||
the_hole_nan);
|
||||
|
||||
__ bind(&entry);
|
||||
__ sub(edi, Immediate(Smi::FromInt(1)));
|
||||
__ j(not_sign, &loop);
|
||||
|
||||
// Restore registers.
|
||||
__ pop(esi);
|
||||
__ pop(ebx);
|
||||
__ pop(eax);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// eax: value
|
||||
// ebx: target map
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx,
|
||||
HeapObject::kMapOffset,
|
||||
ebx,
|
||||
edi,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Return address is on the stack.
|
||||
DCHECK(receiver.is(edx));
|
||||
DCHECK(key.is(ecx));
|
||||
DCHECK(value.is(eax));
|
||||
DCHECK(target_map.is(ebx));
|
||||
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map, success;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
|
||||
__ j(equal, &only_change_map);
|
||||
|
||||
__ push(esi);
|
||||
__ push(eax);
|
||||
__ push(edx);
|
||||
__ push(ebx);
|
||||
|
||||
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
|
||||
|
||||
// Allocate new FixedArray.
|
||||
// ebx: length of source FixedDoubleArray (smi-tagged)
|
||||
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
|
||||
__ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
|
||||
// eax: destination FixedArray
|
||||
// ebx: number of elements
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
|
||||
Immediate(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
Label initialization_loop, initialization_loop_entry;
|
||||
__ jmp(&initialization_loop_entry, Label::kNear);
|
||||
__ bind(&initialization_loop);
|
||||
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
|
||||
masm->isolate()->factory()->the_hole_value());
|
||||
__ bind(&initialization_loop_entry);
|
||||
__ sub(ebx, Immediate(Smi::FromInt(1)));
|
||||
__ j(not_sign, &initialization_loop);
|
||||
|
||||
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
|
||||
__ jmp(&entry);
|
||||
|
||||
// ebx: target map
|
||||
// edx: receiver
|
||||
// Set transitioned map.
|
||||
__ bind(&only_change_map);
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx,
|
||||
HeapObject::kMapOffset,
|
||||
ebx,
|
||||
edi,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ jmp(&success);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ pop(ebx);
|
||||
__ pop(edx);
|
||||
__ pop(eax);
|
||||
__ pop(esi);
|
||||
__ jmp(fail);
|
||||
|
||||
// Box doubles into heap numbers.
|
||||
// edi: source FixedDoubleArray
|
||||
// eax: destination FixedArray
|
||||
__ bind(&loop);
|
||||
// ebx: index of current element (smi-tagged)
|
||||
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
|
||||
__ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
|
||||
__ j(equal, &convert_hole);
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
__ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
|
||||
// edx: new heap number
|
||||
__ movsd(xmm0,
|
||||
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
|
||||
__ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
|
||||
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
|
||||
__ mov(esi, ebx);
|
||||
__ RecordWriteArray(eax,
|
||||
edx,
|
||||
esi,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ jmp(&entry, Label::kNear);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
|
||||
masm->isolate()->factory()->the_hole_value());
|
||||
|
||||
__ bind(&entry);
|
||||
__ sub(ebx, Immediate(Smi::FromInt(1)));
|
||||
__ j(not_sign, &loop);
|
||||
|
||||
__ pop(ebx);
|
||||
__ pop(edx);
|
||||
// ebx: target map
|
||||
// edx: receiver
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx,
|
||||
HeapObject::kMapOffset,
|
||||
ebx,
|
||||
edi,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
|
||||
__ RecordWriteField(edx,
|
||||
JSObject::kElementsOffset,
|
||||
eax,
|
||||
edi,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Restore registers.
|
||||
__ pop(eax);
|
||||
__ pop(esi);
|
||||
|
||||
__ bind(&success);
|
||||
}
|
||||
|
||||
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Factory* factory,
|
||||
Register string,
|
||||
|
@ -810,67 +810,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
|
||||
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map,
|
||||
Label* fail,
|
||||
Label::Distance distance) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
j(below_equal, fail, distance);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleyElementValue));
|
||||
j(above, fail, distance);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckFastSmiElements(Register map,
|
||||
Label* fail,
|
||||
Label::Distance distance) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
j(above, fail, distance);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::StoreNumberToDoubleElements(
|
||||
Register maybe_number,
|
||||
Register elements,
|
||||
Register key,
|
||||
Register scratch1,
|
||||
XMMRegister scratch2,
|
||||
Label* fail,
|
||||
int elements_offset) {
|
||||
Label smi_value, done;
|
||||
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
|
||||
|
||||
CheckMap(maybe_number,
|
||||
isolate()->factory()->heap_number_map(),
|
||||
fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
// Double value, turn potential sNaN into qNaN.
|
||||
Move(scratch2, 1.0);
|
||||
mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
|
||||
jmp(&done, Label::kNear);
|
||||
|
||||
bind(&smi_value);
|
||||
// Value is a smi. Convert to a double and store.
|
||||
// Preserve original value.
|
||||
mov(scratch1, maybe_number);
|
||||
SmiUntag(scratch1);
|
||||
Cvtsi2sd(scratch2, scratch1);
|
||||
bind(&done);
|
||||
movsd(FieldOperand(elements, key, times_4,
|
||||
FixedDoubleArray::kHeaderSize - elements_offset),
|
||||
scratch2);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
|
||||
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
|
||||
}
|
||||
@ -2291,28 +2230,6 @@ void MacroAssembler::LoadGlobalProxy(Register dst) {
|
||||
mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
mov(scratch, NativeContextOperand());
|
||||
cmp(map_in_out,
|
||||
ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
|
||||
j(not_equal, no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
mov(map_in_out,
|
||||
ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
||||
// Load the native context from the current context.
|
||||
mov(function, NativeContextOperand());
|
||||
@ -3162,43 +3079,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
|
||||
cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
||||
Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Factory* factory = isolate()->factory();
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
// scratch contained elements pointer.
|
||||
mov(current, object);
|
||||
mov(current, FieldOperand(current, HeapObject::kMapOffset));
|
||||
mov(current, FieldOperand(current, Map::kPrototypeOffset));
|
||||
cmp(current, Immediate(factory->null_value()));
|
||||
j(equal, &end);
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
bind(&loop_again);
|
||||
mov(current, FieldOperand(current, HeapObject::kMapOffset));
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
CmpInstanceType(current, JS_OBJECT_TYPE);
|
||||
j(below, found);
|
||||
mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
|
||||
j(equal, found);
|
||||
mov(current, FieldOperand(current, Map::kPrototypeOffset));
|
||||
cmp(current, Immediate(factory->null_value()));
|
||||
j(not_equal, &loop_again);
|
||||
|
||||
bind(&end);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
|
||||
DCHECK(!dividend.is(eax));
|
||||
DCHECK(!dividend.is(edx));
|
||||
|
@ -260,16 +260,6 @@ class MacroAssembler: public Assembler {
|
||||
// Load the global proxy from the current context.
|
||||
void LoadGlobalProxy(Register dst);
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
// Load the global function with the given index.
|
||||
void LoadGlobalFunction(int index, Register function);
|
||||
|
||||
@ -391,24 +381,6 @@ class MacroAssembler: public Assembler {
|
||||
// Compare instance type for map.
|
||||
void CmpInstanceType(Register map, InstanceType type);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map, Label* fail,
|
||||
Label::Distance distance = Label::kFar);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object has fast smi only
|
||||
// elements. Jump to the specified label if it does not.
|
||||
void CheckFastSmiElements(Register map, Label* fail,
|
||||
Label::Distance distance = Label::kFar);
|
||||
|
||||
// Check to see if maybe_number can be stored as a double in
|
||||
// FastDoubleElements. If it can, store it at the index specified by key in
|
||||
// the FastDoubleElements array elements, otherwise jump to fail.
|
||||
void StoreNumberToDoubleElements(Register maybe_number, Register elements,
|
||||
Register key, Register scratch1,
|
||||
XMMRegister scratch2, Label* fail,
|
||||
int offset = 0);
|
||||
|
||||
// Compare an object's map with the specified map.
|
||||
void CompareMap(Register obj, Handle<Map> map);
|
||||
|
||||
@ -943,20 +915,6 @@ class MacroAssembler: public Assembler {
|
||||
Register scratch_reg,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
|
||||
&no_memento_found);
|
||||
j(equal, memento_found);
|
||||
bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
private:
|
||||
bool generating_stub_;
|
||||
bool has_frame_;
|
||||
|
@ -219,281 +219,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
|
||||
Register value, Register key, Register receiver, Register receiver_map,
|
||||
Register elements_map, Register elements) {
|
||||
Label transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
|
||||
// Fast case: Do the store, could be either Object or double.
|
||||
__ bind(fast_object);
|
||||
Register scratch = r4;
|
||||
Register address = r5;
|
||||
DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
|
||||
scratch, address));
|
||||
|
||||
if (check_map == kCheckMap) {
|
||||
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ cmp(elements_map,
|
||||
Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ b(ne, fast_double);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because
|
||||
// there may be a callback on the element
|
||||
Label holecheck_passed1;
|
||||
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
|
||||
__ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
|
||||
__ b(ne, &holecheck_passed1);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&holecheck_passed1);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(value, &non_smi_value);
|
||||
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
|
||||
__ Ret();
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to elements kind transition case.
|
||||
__ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
|
||||
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(address, address, Operand::PointerOffsetFromSmiKey(key));
|
||||
__ str(value, MemOperand(address));
|
||||
// Update write barrier for the elements array address.
|
||||
__ mov(scratch, value); // Preserve the value which is returned.
|
||||
__ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ Ret();
|
||||
|
||||
__ bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
__ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ b(ne, slow);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
__ add(address, elements,
|
||||
Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
|
||||
kHeapObjectTag));
|
||||
__ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
|
||||
__ cmp(scratch, Operand(kHoleNanUpper32));
|
||||
__ b(ne, &fast_double_without_map_check);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
|
||||
&transition_double_elements);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ Ret();
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
|
||||
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
|
||||
__ b(ne, &non_double_value);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(
|
||||
FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
receiver_map, mode, slow);
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
// ---------- S t a t e --------------
|
||||
// -- r0 : value
|
||||
// -- r1 : key
|
||||
// -- r2 : receiver
|
||||
// -- lr : return address
|
||||
// -----------------------------------
|
||||
Label slow, fast_object, fast_object_grow;
|
||||
Label fast_double, fast_double_grow;
|
||||
Label array, extra, check_if_double_array, maybe_name_key, miss;
|
||||
|
||||
// Register usage.
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(key.is(r2));
|
||||
DCHECK(value.is(r0));
|
||||
Register receiver_map = r3;
|
||||
Register elements_map = r6;
|
||||
Register elements = r9; // Elements array of the receiver.
|
||||
// r4 and r5 are used as general scratch registers.
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
// Get the map of the object.
|
||||
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
|
||||
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
|
||||
__ b(ne, &slow);
|
||||
// Check if the object is a JS array or not.
|
||||
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
|
||||
__ cmp(r4, Operand(JS_ARRAY_TYPE));
|
||||
__ b(eq, &array);
|
||||
// Check that the object is some kind of JS object EXCEPT JS Value type. In
|
||||
// the case that the object is a value-wrapper object, we enter the runtime
|
||||
// system to make sure that indexing into string objects works as intended.
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
__ cmp(r4, Operand(JS_OBJECT_TYPE));
|
||||
__ b(lo, &slow);
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ cmp(key, Operand(ip));
|
||||
__ b(lo, &fast_object);
|
||||
|
||||
// Slow case, handle jump to runtime.
|
||||
__ bind(&slow);
|
||||
// Entry registers are intact.
|
||||
// r0: value.
|
||||
// r1: key.
|
||||
// r2: receiver.
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ ldr(r4, FieldMemOperand(key, HeapObject::kMapOffset));
|
||||
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
|
||||
|
||||
// We use register r8, because otherwise probing the megamorphic stub cache
|
||||
// would require pushing temporaries on the stack.
|
||||
// TODO(mvstanton): quit using register r8 when
|
||||
// FLAG_enable_embedded_constant_pool is turned on.
|
||||
DCHECK(!FLAG_enable_embedded_constant_pool);
|
||||
Register temporary2 = r8;
|
||||
// The handlers in the stub cache expect a vector and slot. Since we won't
|
||||
// change the IC from any downstream misses, a dummy vector can be used.
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
|
||||
DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
|
||||
Handle<TypeFeedbackVector> dummy_vector =
|
||||
TypeFeedbackVector::DummyVector(masm->isolate());
|
||||
int slot_index = dummy_vector->GetIndex(
|
||||
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
|
||||
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
|
||||
__ mov(slot, Operand(Smi::FromInt(slot_index)));
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r5,
|
||||
temporary2, r6, r9);
|
||||
// Cache miss.
|
||||
__ b(&miss);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// Condition code from comparing key and array length is still available.
|
||||
__ b(ne, &slow); // Only support writing to writing to array[array.length].
|
||||
// Check for room in the elements backing store.
|
||||
// Both the key and the length of FixedArray are smis.
|
||||
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ cmp(key, Operand(ip));
|
||||
__ b(hs, &slow);
|
||||
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ b(ne, &check_if_double_array);
|
||||
__ jmp(&fast_object_grow);
|
||||
|
||||
__ bind(&check_if_double_array);
|
||||
__ cmp(elements_map,
|
||||
Operand(masm->isolate()->factory()->fixed_double_array_map()));
|
||||
__ b(ne, &slow);
|
||||
__ jmp(&fast_double_grow);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
__ bind(&array);
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array.
|
||||
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
__ cmp(key, Operand(ip));
|
||||
__ b(hs, &extra);
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(
|
||||
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
|
||||
value, key, receiver, receiver_map, elements_map, elements);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength, value, key, receiver,
|
||||
receiver_map, elements_map, elements);
|
||||
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
void StoreIC::GenerateMiss(MacroAssembler* masm) {
|
||||
StoreIC_PushArgs(masm);
|
||||
|
||||
|
@ -1,33 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
|
||||
StoreDescriptor::ValueRegister());
|
||||
|
||||
__ mov(r0, Operand(Smi::FromInt(language_mode)));
|
||||
__ Push(r0);
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
@ -1,157 +0,0 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
Register offset_scratch) {
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
ExternalReference map_offset(stub_cache->map_reference(table));
|
||||
|
||||
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
|
||||
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
|
||||
uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
|
||||
|
||||
// Check the relative positions of the address fields.
|
||||
DCHECK(value_off_addr > key_off_addr);
|
||||
DCHECK((value_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((value_off_addr - key_off_addr) < (256 * 4));
|
||||
DCHECK(map_off_addr > key_off_addr);
|
||||
DCHECK((map_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((map_off_addr - key_off_addr) < (256 * 4));
|
||||
|
||||
Label miss;
|
||||
Register base_addr = scratch;
|
||||
scratch = no_reg;
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry (name, code, map).
|
||||
__ add(offset_scratch, offset, Operand(offset, LSL, 1));
|
||||
|
||||
// Calculate the base address of the entry.
|
||||
__ add(base_addr, offset_scratch, Operand(key_offset));
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ ldr(ip, MemOperand(base_addr, 0));
|
||||
__ cmp(name, ip);
|
||||
__ b(ne, &miss);
|
||||
|
||||
// Check the map matches.
|
||||
__ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
|
||||
__ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ cmp(ip, scratch2);
|
||||
__ b(ne, &miss);
|
||||
|
||||
// Get the code entry from the cache.
|
||||
Register code = scratch2;
|
||||
scratch2 = no_reg;
|
||||
__ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ jmp(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ jmp(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
__ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
// Miss: fall through.
|
||||
__ bind(&miss);
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 12.
|
||||
DCHECK(sizeof(Entry) == 12);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Check scratch, extra and extra2 registers are valid.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(!extra.is(no_reg));
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind_)) {
|
||||
Register vector, slot;
|
||||
if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
|
||||
vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
} else {
|
||||
DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
|
||||
vector = LoadWithVectorDescriptor::VectorRegister();
|
||||
slot = LoadWithVectorDescriptor::SlotRegister();
|
||||
}
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
|
||||
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ add(scratch, scratch, Operand(ip));
|
||||
__ eor(scratch, scratch, Operand(kPrimaryMagic));
|
||||
__ mov(ip, Operand(kPrimaryTableSize - 1));
|
||||
__ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ sub(scratch, scratch, Operand(name));
|
||||
__ add(scratch, scratch, Operand(kSecondaryMagic));
|
||||
__ mov(ip, Operand(kSecondaryTableSize - 1));
|
||||
__ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
|
||||
extra3);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
@ -197,266 +197,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
|
||||
Register value, Register key, Register receiver, Register receiver_map,
|
||||
Register elements_map, Register elements) {
|
||||
DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
|
||||
x10, x11));
|
||||
|
||||
Label transition_smi_elements;
|
||||
Label transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
Label non_double_value;
|
||||
Label finish_store;
|
||||
|
||||
__ Bind(fast_object);
|
||||
if (check_map == kCheckMap) {
|
||||
__ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ Cmp(elements_map,
|
||||
Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ B(ne, fast_double);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because there
|
||||
// may be a callback on the element.
|
||||
Label holecheck_passed;
|
||||
__ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
|
||||
__ Ldr(x11, MemOperand(x10));
|
||||
__ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
|
||||
__ bind(&holecheck_passed);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
__ JumpIfSmi(value, &finish_store);
|
||||
|
||||
// Escape to elements kind transition case.
|
||||
__ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
|
||||
|
||||
__ Bind(&finish_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ Add(x10, key, Smi::FromInt(1));
|
||||
__ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
|
||||
Register address = x11;
|
||||
__ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
|
||||
__ Str(value, MemOperand(address));
|
||||
|
||||
Label dont_record_write;
|
||||
__ JumpIfSmi(value, &dont_record_write);
|
||||
|
||||
// Update write barrier for the elements array address.
|
||||
__ Mov(x10, value); // Preserve the value which is returned.
|
||||
__ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
|
||||
__ Bind(&dont_record_write);
|
||||
__ Ret();
|
||||
|
||||
|
||||
__ Bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
__ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so go to
|
||||
// the runtime.
|
||||
__ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
|
||||
__ Ldr(x11, MemOperand(x10));
|
||||
__ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
|
||||
|
||||
__ Bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, key, elements, x10, d0,
|
||||
&transition_double_elements);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ Add(x10, key, Smi::FromInt(1));
|
||||
__ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ Ret();
|
||||
|
||||
|
||||
__ Bind(&transition_smi_elements);
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
|
||||
__ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(
|
||||
FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
receiver_map, mode, slow);
|
||||
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ B(&fast_double_without_map_check);
|
||||
|
||||
__ Bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, x10, x11, slow);
|
||||
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
|
||||
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ B(&finish_store);
|
||||
|
||||
__ Bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, x10, x11, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ B(&finish_store);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
|
||||
Label slow;
|
||||
Label array;
|
||||
Label fast_object;
|
||||
Label extra;
|
||||
Label fast_object_grow;
|
||||
Label fast_double_grow;
|
||||
Label fast_double;
|
||||
Label maybe_name_key;
|
||||
Label miss;
|
||||
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
DCHECK(receiver.is(x1));
|
||||
DCHECK(key.is(x2));
|
||||
DCHECK(value.is(x0));
|
||||
|
||||
Register receiver_map = x3;
|
||||
Register elements = x4;
|
||||
Register elements_map = x5;
|
||||
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
|
||||
__ TestAndBranchIfAnySet(x10, (1 << Map::kIsAccessCheckNeeded), &slow);
|
||||
|
||||
// Check if the object is a JS array or not.
|
||||
Register instance_type = x10;
|
||||
__ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
|
||||
__ B(eq, &array);
|
||||
// Check that the object is some kind of JS object EXCEPT JS Value type. In
|
||||
// the case that the object is a value-wrapper object, we enter the runtime
|
||||
// system to make sure that indexing into string objects works as intended.
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
__ Cmp(instance_type, JS_OBJECT_TYPE);
|
||||
__ B(lo, &slow);
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
__ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ Cmp(x10, Operand::UntagSmi(key));
|
||||
__ B(hi, &fast_object);
|
||||
|
||||
|
||||
__ Bind(&slow);
|
||||
// Slow case, handle jump to runtime.
|
||||
// Live values:
|
||||
// x0: value
|
||||
// x1: key
|
||||
// x2: receiver
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
|
||||
__ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(x10, &slow);
|
||||
|
||||
// The handlers in the stub cache expect a vector and slot. Since we won't
|
||||
// change the IC from any downstream misses, a dummy vector can be used.
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
|
||||
Handle<TypeFeedbackVector> dummy_vector =
|
||||
TypeFeedbackVector::DummyVector(masm->isolate());
|
||||
int slot_index = dummy_vector->GetIndex(
|
||||
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
|
||||
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
|
||||
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, x5,
|
||||
x6, x7, x8);
|
||||
// Cache miss.
|
||||
__ B(&miss);
|
||||
|
||||
__ Bind(&extra);
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
|
||||
// Check for room in the elements backing store.
|
||||
// Both the key and the length of FixedArray are smis.
|
||||
__ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ Cmp(x10, Operand::UntagSmi(key));
|
||||
__ B(ls, &slow);
|
||||
|
||||
__ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ B(eq, &fast_object_grow);
|
||||
__ Cmp(elements_map,
|
||||
Operand(masm->isolate()->factory()->fixed_double_array_map()));
|
||||
__ B(eq, &fast_double_grow);
|
||||
__ B(&slow);
|
||||
|
||||
|
||||
__ Bind(&array);
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
|
||||
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array.
|
||||
__ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
__ Cmp(x10, Operand::UntagSmi(key));
|
||||
__ B(eq, &extra); // We can handle the case where we are appending 1 element.
|
||||
__ B(lo, &slow);
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(
|
||||
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
|
||||
value, key, receiver, receiver_map, elements_map, elements);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength, value, key, receiver,
|
||||
receiver_map, elements_map, elements);
|
||||
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
void StoreIC::GenerateMiss(MacroAssembler* masm) {
|
||||
StoreIC_PushArgs(masm);
|
||||
|
||||
|
@ -1,33 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
ASM_LOCATION("PropertyICCompiler::GenerateRuntimeSetProperty");
|
||||
|
||||
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
|
||||
StoreDescriptor::ValueRegister());
|
||||
|
||||
__ Mov(x10, Smi::FromInt(language_mode));
|
||||
__ Push(x10);
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM64
|
@ -1,156 +0,0 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
// Probe primary or secondary table.
|
||||
// If the entry is found in the cache, the generated code jump to the first
|
||||
// instruction of the stub in the cache.
|
||||
// If there is a miss the code fall trough.
|
||||
//
|
||||
// 'receiver', 'name' and 'offset' registers are preserved on miss.
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
Register scratch3) {
|
||||
// Some code below relies on the fact that the Entry struct contains
|
||||
// 3 pointers (name, code, map).
|
||||
STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
|
||||
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
ExternalReference map_offset(stub_cache->map_reference(table));
|
||||
|
||||
uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
|
||||
uintptr_t value_off_addr =
|
||||
reinterpret_cast<uintptr_t>(value_offset.address());
|
||||
uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
|
||||
|
||||
Label miss;
|
||||
|
||||
DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry.
|
||||
__ Add(scratch3, offset, Operand(offset, LSL, 1));
|
||||
|
||||
// Calculate the base address of the entry.
|
||||
__ Mov(scratch, key_offset);
|
||||
__ Add(
|
||||
scratch, scratch,
|
||||
Operand(scratch3, LSL, kPointerSizeLog2 - StubCache::kCacheIndexShift));
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ Ldr(scratch2, MemOperand(scratch));
|
||||
__ Cmp(name, scratch2);
|
||||
__ B(ne, &miss);
|
||||
|
||||
// Check the map matches.
|
||||
__ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
|
||||
__ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ Cmp(scratch2, scratch3);
|
||||
__ B(ne, &miss);
|
||||
|
||||
// Get the code entry from the cache.
|
||||
__ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ B(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ B(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
__ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Br(scratch);
|
||||
|
||||
// Miss: fall through.
|
||||
__ Bind(&miss);
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Make sure extra and extra2 registers are valid.
|
||||
DCHECK(!extra.is(no_reg));
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind_)) {
|
||||
Register vector, slot;
|
||||
if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
|
||||
vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
} else {
|
||||
DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
|
||||
vector = LoadWithVectorDescriptor::VectorRegister();
|
||||
slot = LoadWithVectorDescriptor::SlotRegister();
|
||||
}
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Compute the hash for primary table.
|
||||
__ Ldr(scratch.W(), FieldMemOperand(name, Name::kHashFieldOffset));
|
||||
__ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ Add(scratch, scratch, extra);
|
||||
__ Eor(scratch, scratch, kPrimaryMagic);
|
||||
__ And(scratch, scratch,
|
||||
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary table.
|
||||
__ Sub(scratch, scratch, Operand(name));
|
||||
__ Add(scratch, scratch, Operand(kSecondaryMagic));
|
||||
__ And(scratch, scratch,
|
||||
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ Bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
|
||||
extra3);
|
||||
}
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM64
|
@ -1,45 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
typedef StoreWithVectorDescriptor Descriptor;
|
||||
STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
|
||||
// ----------- S t a t e -------------
|
||||
// -- esp[12] : value
|
||||
// -- esp[8] : slot
|
||||
// -- esp[4] : vector
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
__ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
|
||||
Descriptor::kValue);
|
||||
|
||||
__ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
|
||||
__ mov(Operand(esp, 8), Descriptor::NameRegister());
|
||||
__ mov(Operand(esp, 4), Descriptor::ValueRegister());
|
||||
__ pop(ebx);
|
||||
__ push(Immediate(Smi::FromInt(language_mode)));
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_IA32
|
@ -120,251 +120,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
|
||||
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
|
||||
Label transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
DCHECK(receiver.is(edx));
|
||||
DCHECK(key.is(ecx));
|
||||
DCHECK(value.is(eax));
|
||||
// key is a smi.
|
||||
// ebx: FixedArray receiver->elements
|
||||
// edi: receiver map
|
||||
// Fast case: Do the store, could either Object or double.
|
||||
__ bind(fast_object);
|
||||
if (check_map == kCheckMap) {
|
||||
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
|
||||
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
|
||||
__ j(not_equal, fast_double);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because
|
||||
// there may be a callback on the element
|
||||
Label holecheck_passed1;
|
||||
__ cmp(FixedArrayElementOperand(ebx, key),
|
||||
masm->isolate()->factory()->the_hole_value());
|
||||
__ j(not_equal, &holecheck_passed1);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
__ bind(&holecheck_passed1);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(value, &non_smi_value);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(FieldOperand(receiver, JSArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(1)));
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ mov(FixedArrayElementOperand(ebx, key), value);
|
||||
__ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to elements kind transition case.
|
||||
__ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ CheckFastObjectElements(edi, &transition_smi_elements);
|
||||
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(FieldOperand(receiver, JSArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(1)));
|
||||
}
|
||||
__ mov(FixedArrayElementOperand(ebx, key), value);
|
||||
// Update write barrier for the elements array address.
|
||||
__ mov(edx, value); // Preserve the value which is returned.
|
||||
__ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
|
||||
|
||||
__ bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
|
||||
__ j(not_equal, slow);
|
||||
// If the value is a number, store it as a double in the FastDoubleElements
|
||||
// array.
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
|
||||
__ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
|
||||
__ j(not_equal, &fast_double_without_map_check);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
__ bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0,
|
||||
&transition_double_elements);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(FieldOperand(receiver, JSArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(1)));
|
||||
}
|
||||
__ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
|
||||
&non_double_value, DONT_DO_SMI_CHECK);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
|
||||
// and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
|
||||
FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
ebx, mode, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
|
||||
edi, slow);
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, ebx, mode, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
ebx, edi, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
|
||||
value, ebx, mode, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
typedef StoreWithVectorDescriptor Descriptor;
|
||||
// Return address is on the stack.
|
||||
Label slow, fast_object, fast_object_grow;
|
||||
Label fast_double, fast_double_grow;
|
||||
Label array, extra, check_if_double_array, maybe_name_key, miss;
|
||||
Register receiver = Descriptor::ReceiverRegister();
|
||||
Register key = Descriptor::NameRegister();
|
||||
DCHECK(receiver.is(edx));
|
||||
DCHECK(key.is(ecx));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
// Get the map from the receiver.
|
||||
__ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
|
||||
Immediate(1 << Map::kIsAccessCheckNeeded));
|
||||
__ j(not_zero, &slow);
|
||||
|
||||
__ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
|
||||
Descriptor::kValue);
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
|
||||
__ j(equal, &array);
|
||||
// Check that the object is some kind of JS object EXCEPT JS Value type. In
|
||||
// the case that the object is a value-wrapper object, we enter the runtime
|
||||
// system to make sure that indexing into string objects works as intended.
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
__ CmpInstanceType(edi, JS_OBJECT_TYPE);
|
||||
__ j(below, &slow);
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
// Key is a smi.
|
||||
// edi: receiver map
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
__ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
|
||||
__ j(below, &fast_object);
|
||||
|
||||
// Slow case: call runtime.
|
||||
__ bind(&slow);
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
|
||||
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
|
||||
no_reg);
|
||||
|
||||
// Cache miss.
|
||||
__ jmp(&miss);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// receiver is a JSArray.
|
||||
// key is a smi.
|
||||
// ebx: receiver->elements, a FixedArray
|
||||
// edi: receiver map
|
||||
// flags: compare (key, receiver.length())
|
||||
// do not leave holes in the array:
|
||||
__ j(not_equal, &slow);
|
||||
__ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
|
||||
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
|
||||
__ j(not_equal, &check_if_double_array);
|
||||
__ jmp(&fast_object_grow);
|
||||
|
||||
__ bind(&check_if_double_array);
|
||||
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
|
||||
__ j(not_equal, &slow);
|
||||
__ jmp(&fast_double_grow);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
__ bind(&array);
|
||||
// receiver is a JSArray.
|
||||
// key is a smi.
|
||||
// edi: receiver map
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array and fall through to the
|
||||
// common store code.
|
||||
__ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
|
||||
__ j(above_equal, &extra);
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
|
||||
kCheckMap, kDontIncrementLength);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength);
|
||||
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
||||
Register dictionary = eax;
|
||||
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
|
||||
|
@ -1,185 +0,0 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register name, Register receiver,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset, Register extra) {
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
ExternalReference map_offset(stub_cache->map_reference(table));
|
||||
|
||||
Label miss;
|
||||
Code::Kind ic_kind = stub_cache->ic_kind();
|
||||
bool is_vector_store =
|
||||
IC::ICUseVector(ic_kind) &&
|
||||
(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry (name, code, map).
|
||||
__ lea(offset, Operand(offset, offset, times_2, 0));
|
||||
|
||||
if (extra.is_valid()) {
|
||||
// Get the code entry from the cache.
|
||||
__ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Check the map matches.
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
|
||||
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ jmp(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ jmp(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (is_vector_store) {
|
||||
// The value, vector and slot were passed to the IC on the stack and
|
||||
// they are still there. So we can just jump to the handler.
|
||||
DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
|
||||
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(extra);
|
||||
} else {
|
||||
// The vector and slot were pushed onto the stack before starting the
|
||||
// probe, and need to be dropped before calling the handler.
|
||||
__ pop(LoadWithVectorDescriptor::VectorRegister());
|
||||
__ pop(LoadDescriptor::SlotRegister());
|
||||
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(extra);
|
||||
}
|
||||
|
||||
__ bind(&miss);
|
||||
} else {
|
||||
DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
|
||||
|
||||
// Save the offset on the stack.
|
||||
__ push(offset);
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Check the map matches.
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
|
||||
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Restore offset register.
|
||||
__ mov(offset, Operand(esp, 0));
|
||||
|
||||
// Get the code entry from the cache.
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ jmp(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ jmp(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Restore offset and re-load code entry from cache.
|
||||
__ pop(offset);
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
if (is_vector_store) {
|
||||
DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
|
||||
}
|
||||
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(offset);
|
||||
|
||||
// Pop at miss.
|
||||
__ bind(&miss);
|
||||
__ pop(offset);
|
||||
}
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
|
||||
// Assert that code is valid. The multiplying code relies on the entry size
|
||||
// being 12.
|
||||
DCHECK(sizeof(Entry) == 12);
|
||||
|
||||
// Assert that there are no register conflicts.
|
||||
DCHECK(!scratch.is(receiver));
|
||||
DCHECK(!scratch.is(name));
|
||||
DCHECK(!extra.is(receiver));
|
||||
DCHECK(!extra.is(name));
|
||||
DCHECK(!extra.is(scratch));
|
||||
|
||||
// Assert scratch and extra registers are valid, and extra2/3 are unused.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(extra2.is(no_reg));
|
||||
DCHECK(extra3.is(no_reg));
|
||||
|
||||
Register offset = scratch;
|
||||
scratch = no_reg;
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
|
||||
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ xor_(offset, kPrimaryMagic);
|
||||
// We mask out the last two bits because they are not part of the hash and
|
||||
// they are always 01 for maps. Also in the two 'and' instructions below.
|
||||
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
|
||||
// ProbeTable expects the offset to be pointer scaled, which it is, because
|
||||
// the heap object tag size is 2 and the pointer size log 2 is also 2.
|
||||
DCHECK(kCacheIndexShift == kPointerSizeLog2);
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
|
||||
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ xor_(offset, kPrimaryMagic);
|
||||
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
|
||||
__ sub(offset, name);
|
||||
__ add(offset, Immediate(kSecondaryMagic));
|
||||
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_IA32
|
@ -20,13 +20,6 @@ class PropertyICCompiler : public PropertyAccessCompiler {
|
||||
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
|
||||
CodeHandleList* handlers, KeyedAccessStoreMode store_mode);
|
||||
|
||||
// Helpers
|
||||
// TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
|
||||
// and make the helpers private.
|
||||
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
|
||||
LanguageMode language_mode);
|
||||
|
||||
|
||||
private:
|
||||
explicit PropertyICCompiler(Isolate* isolate)
|
||||
: PropertyAccessCompiler(isolate, Code::KEYED_STORE_IC,
|
||||
|
@ -832,15 +832,6 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
|
||||
}
|
||||
}
|
||||
|
||||
Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
|
||||
ExtraICState extra_state) {
|
||||
DCHECK(!FLAG_tf_store_ic_stub);
|
||||
LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
|
||||
return is_strict(mode)
|
||||
? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
|
||||
: isolate->builtins()->KeyedStoreIC_Megamorphic();
|
||||
}
|
||||
|
||||
Handle<Object> LoadIC::SimpleFieldLoad(FieldIndex index) {
|
||||
if (FLAG_tf_load_ic_stub) {
|
||||
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
|
||||
|
@ -451,9 +451,6 @@ class KeyedStoreIC : public StoreIC {
|
||||
static void GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode);
|
||||
|
||||
static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
|
||||
ExtraICState extra_state);
|
||||
|
||||
static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
|
||||
|
||||
protected:
|
||||
|
@ -1,33 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_MIPS
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
|
||||
StoreDescriptor::ValueRegister());
|
||||
|
||||
__ li(a0, Operand(Smi::FromInt(language_mode)));
|
||||
__ Push(a0);
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_MIPS
|
@ -200,274 +200,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kKeyedGetProperty);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
|
||||
Register value, Register key, Register receiver, Register receiver_map,
|
||||
Register elements_map, Register elements) {
|
||||
Label transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
|
||||
// Fast case: Do the store, could be either Object or double.
|
||||
__ bind(fast_object);
|
||||
Register scratch = t0;
|
||||
Register scratch2 = t4;
|
||||
Register scratch3 = t5;
|
||||
Register address = t1;
|
||||
DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
|
||||
scratch, scratch2, scratch3, address));
|
||||
|
||||
if (check_map == kCheckMap) {
|
||||
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ Branch(fast_double, ne, elements_map,
|
||||
Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because
|
||||
// there may be a callback on the element.
|
||||
Label holecheck_passed1;
|
||||
__ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ lw(scratch, MemOperand(address));
|
||||
__ Branch(&holecheck_passed1, ne, scratch,
|
||||
Operand(masm->isolate()->factory()->the_hole_value()));
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&holecheck_passed1);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(value, &non_smi_value);
|
||||
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ Addu(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ sw(value, MemOperand(address));
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ Move(v0, value); // Ensure the stub returns correct value.
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to elements kind transition case.
|
||||
__ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
|
||||
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ Addu(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ sw(value, MemOperand(address));
|
||||
// Update write barrier for the elements array address.
|
||||
__ mov(scratch, value); // Preserve the value which is returned.
|
||||
__ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ Move(v0, value); // Ensure the stub returns correct value.
|
||||
|
||||
__ bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
__ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ Branch(slow, ne, elements_map, Operand(at));
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
__ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
|
||||
kHoleNanUpper32Offset - kHeapObjectTag));
|
||||
__ Lsa(address, address, key, kPointerSizeLog2);
|
||||
__ lw(scratch, MemOperand(address));
|
||||
__ Branch(&fast_double_without_map_check, ne, scratch,
|
||||
Operand(kHoleNanUpper32));
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
|
||||
scratch3, &transition_double_elements);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ Addu(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ Move(v0, value); // Ensure the stub returns correct value.
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
|
||||
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
|
||||
__ Branch(&non_double_value, ne, scratch, Operand(at));
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(
|
||||
FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
receiver_map, mode, slow);
|
||||
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
// ---------- S t a t e --------------
|
||||
// -- a0 : value
|
||||
// -- a1 : key
|
||||
// -- a2 : receiver
|
||||
// -- ra : return address
|
||||
// -----------------------------------
|
||||
Label slow, fast_object, fast_object_grow;
|
||||
Label fast_double, fast_double_grow;
|
||||
Label array, extra, check_if_double_array, maybe_name_key, miss;
|
||||
|
||||
// Register usage.
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
DCHECK(value.is(a0));
|
||||
Register receiver_map = a3;
|
||||
Register elements_map = t2;
|
||||
Register elements = t3; // Elements array of the receiver.
|
||||
// t0 and t1 are used as general scratch registers.
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
// Get the map of the object.
|
||||
__ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
|
||||
__ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
|
||||
__ Branch(&slow, ne, t0, Operand(zero_reg));
|
||||
// Check if the object is a JS array or not.
|
||||
__ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
|
||||
__ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
|
||||
// Check that the object is some kind of JS object EXCEPT JS Value type. In
|
||||
// the case that the object is a value-wrapper object, we enter the runtime
|
||||
// system to make sure that indexing into string objects works as intended.
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
__ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ Branch(&fast_object, lo, key, Operand(t0));
|
||||
|
||||
// Slow case, handle jump to runtime.
|
||||
__ bind(&slow);
|
||||
// Entry registers are intact.
|
||||
// a0: value.
|
||||
// a1: key.
|
||||
// a2: receiver.
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
|
||||
__ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(t0, &slow);
|
||||
|
||||
// The handlers in the stub cache expect a vector and slot. Since we won't
|
||||
// change the IC from any downstream misses, a dummy vector can be used.
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
|
||||
Handle<TypeFeedbackVector> dummy_vector =
|
||||
TypeFeedbackVector::DummyVector(masm->isolate());
|
||||
int slot_index = dummy_vector->GetIndex(
|
||||
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
|
||||
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
|
||||
__ li(slot, Operand(Smi::FromInt(slot_index)));
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, t1,
|
||||
t2, t4, t5);
|
||||
// Cache miss.
|
||||
__ Branch(&miss);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// Condition code from comparing key and array length is still available.
|
||||
// Only support writing to array[array.length].
|
||||
__ Branch(&slow, ne, key, Operand(t0));
|
||||
// Check for room in the elements backing store.
|
||||
// Both the key and the length of FixedArray are smis.
|
||||
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ Branch(&slow, hs, key, Operand(t0));
|
||||
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ Branch(&check_if_double_array, ne, elements_map,
|
||||
Heap::kFixedArrayMapRootIndex);
|
||||
|
||||
__ jmp(&fast_object_grow);
|
||||
|
||||
__ bind(&check_if_double_array);
|
||||
__ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ jmp(&fast_double_grow);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
__ bind(&array);
|
||||
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array.
|
||||
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
__ Branch(&extra, hs, key, Operand(t0));
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(
|
||||
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
|
||||
value, key, receiver, receiver_map, elements_map, elements);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength, value, key, receiver,
|
||||
receiver_map, elements_map, elements);
|
||||
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
|
||||
static void StoreIC_PushArgs(MacroAssembler* masm) {
|
||||
__ Push(StoreWithVectorDescriptor::ValueRegister(),
|
||||
StoreWithVectorDescriptor::SlotRegister(),
|
||||
|
@ -1,157 +0,0 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_MIPS
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
Register offset_scratch) {
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
ExternalReference map_offset(stub_cache->map_reference(table));
|
||||
|
||||
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
|
||||
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
|
||||
uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
|
||||
|
||||
// Check the relative positions of the address fields.
|
||||
DCHECK(value_off_addr > key_off_addr);
|
||||
DCHECK((value_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((value_off_addr - key_off_addr) < (256 * 4));
|
||||
DCHECK(map_off_addr > key_off_addr);
|
||||
DCHECK((map_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((map_off_addr - key_off_addr) < (256 * 4));
|
||||
|
||||
Label miss;
|
||||
Register base_addr = scratch;
|
||||
scratch = no_reg;
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry (name, code, map).
|
||||
__ Lsa(offset_scratch, offset, offset, 1);
|
||||
|
||||
// Calculate the base address of the entry.
|
||||
__ li(base_addr, Operand(key_offset));
|
||||
__ Addu(base_addr, base_addr, offset_scratch);
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ lw(at, MemOperand(base_addr, 0));
|
||||
__ Branch(&miss, ne, name, Operand(at));
|
||||
|
||||
// Check the map matches.
|
||||
__ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
|
||||
__ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ Branch(&miss, ne, at, Operand(scratch2));
|
||||
|
||||
// Get the code entry from the cache.
|
||||
Register code = scratch2;
|
||||
scratch2 = no_reg;
|
||||
__ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ jmp(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ jmp(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
__ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(at);
|
||||
|
||||
// Miss: fall through.
|
||||
__ bind(&miss);
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 12.
|
||||
DCHECK(sizeof(Entry) == 12);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Check register validity.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(!extra.is(no_reg));
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind_)) {
|
||||
Register vector, slot;
|
||||
if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
|
||||
vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
} else {
|
||||
DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
|
||||
vector = LoadWithVectorDescriptor::VectorRegister();
|
||||
slot = LoadWithVectorDescriptor::SlotRegister();
|
||||
}
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
|
||||
__ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ Addu(scratch, scratch, at);
|
||||
__ Xor(scratch, scratch, Operand(kPrimaryMagic));
|
||||
__ And(scratch, scratch,
|
||||
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ Subu(scratch, scratch, name);
|
||||
__ Addu(scratch, scratch, Operand(kSecondaryMagic));
|
||||
__ And(scratch, scratch,
|
||||
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
|
||||
extra3);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_MIPS
|
@ -1,33 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_MIPS64
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
|
||||
StoreDescriptor::ValueRegister());
|
||||
|
||||
__ li(a0, Operand(Smi::FromInt(language_mode)));
|
||||
__ Push(a0);
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_MIPS64
|
@ -198,279 +198,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kKeyedGetProperty);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
|
||||
Register value, Register key, Register receiver, Register receiver_map,
|
||||
Register elements_map, Register elements) {
|
||||
Label transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
|
||||
// Fast case: Do the store, could be either Object or double.
|
||||
__ bind(fast_object);
|
||||
Register scratch = a4;
|
||||
Register scratch2 = t0;
|
||||
Register address = a5;
|
||||
DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
|
||||
scratch, scratch2, address));
|
||||
|
||||
if (check_map == kCheckMap) {
|
||||
__ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ Branch(fast_double, ne, elements_map,
|
||||
Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because
|
||||
// there may be a callback on the element.
|
||||
Label holecheck_passed1;
|
||||
__ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
|
||||
__ SmiScale(at, key, kPointerSizeLog2);
|
||||
__ daddu(address, address, at);
|
||||
__ ld(scratch, MemOperand(address));
|
||||
|
||||
__ Branch(&holecheck_passed1, ne, scratch,
|
||||
Operand(masm->isolate()->factory()->the_hole_value()));
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&holecheck_passed1);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(value, &non_smi_value);
|
||||
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ Daddu(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ Daddu(address, elements,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiScale(scratch, key, kPointerSizeLog2);
|
||||
__ Daddu(address, address, scratch);
|
||||
__ sd(value, MemOperand(address));
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ Move(v0, value); // Ensure the stub returns correct value.
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to elements kind transition case.
|
||||
__ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
|
||||
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ Daddu(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ Daddu(address, elements,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiScale(scratch, key, kPointerSizeLog2);
|
||||
__ Daddu(address, address, scratch);
|
||||
__ sd(value, MemOperand(address));
|
||||
// Update write barrier for the elements array address.
|
||||
__ mov(scratch, value); // Preserve the value which is returned.
|
||||
__ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ Move(v0, value); // Ensure the stub returns correct value.
|
||||
|
||||
__ bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
__ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ Branch(slow, ne, elements_map, Operand(at));
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
__ Daddu(address, elements,
|
||||
Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
|
||||
kHeapObjectTag));
|
||||
__ SmiScale(at, key, kPointerSizeLog2);
|
||||
__ daddu(address, address, at);
|
||||
__ lw(scratch, MemOperand(address));
|
||||
__ Branch(&fast_double_without_map_check, ne, scratch,
|
||||
Operand(static_cast<int32_t>(kHoleNanUpper32)));
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
|
||||
&transition_double_elements);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ Daddu(scratch, key, Operand(Smi::FromInt(1)));
|
||||
__ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ Move(v0, value); // Ensure the stub returns correct value.
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
|
||||
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
|
||||
__ Branch(&non_double_value, ne, scratch, Operand(at));
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(
|
||||
FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
receiver_map, mode, slow);
|
||||
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
// ---------- S t a t e --------------
|
||||
// -- a0 : value
|
||||
// -- a1 : key
|
||||
// -- a2 : receiver
|
||||
// -- ra : return address
|
||||
// -----------------------------------
|
||||
Label slow, fast_object, fast_object_grow;
|
||||
Label fast_double, fast_double_grow;
|
||||
Label array, extra, check_if_double_array, maybe_name_key, miss;
|
||||
|
||||
// Register usage.
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
DCHECK(value.is(a0));
|
||||
Register receiver_map = a3;
|
||||
Register elements_map = a6;
|
||||
Register elements = a7; // Elements array of the receiver.
|
||||
// a4 and a5 are used as general scratch registers.
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
// Get the map of the object.
|
||||
__ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
|
||||
__ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
|
||||
__ Branch(&slow, ne, a4, Operand(zero_reg));
|
||||
// Check if the object is a JS array or not.
|
||||
__ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
|
||||
__ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
|
||||
// Check that the object is some kind of JSObject.
|
||||
__ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
__ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ Branch(&fast_object, lo, key, Operand(a4));
|
||||
|
||||
// Slow case, handle jump to runtime.
|
||||
__ bind(&slow);
|
||||
// Entry registers are intact.
|
||||
// a0: value.
|
||||
// a1: key.
|
||||
// a2: receiver.
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
|
||||
__ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(a4, &slow);
|
||||
|
||||
// The handlers in the stub cache expect a vector and slot. Since we won't
|
||||
// change the IC from any downstream misses, a dummy vector can be used.
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
|
||||
DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
|
||||
Handle<TypeFeedbackVector> dummy_vector =
|
||||
TypeFeedbackVector::DummyVector(masm->isolate());
|
||||
int slot_index = dummy_vector->GetIndex(
|
||||
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
|
||||
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
|
||||
__ li(slot, Operand(Smi::FromInt(slot_index)));
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, a5,
|
||||
a6, a7, t0);
|
||||
// Cache miss.
|
||||
__ Branch(&miss);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// Condition code from comparing key and array length is still available.
|
||||
// Only support writing to array[array.length].
|
||||
__ Branch(&slow, ne, key, Operand(a4));
|
||||
// Check for room in the elements backing store.
|
||||
// Both the key and the length of FixedArray are smis.
|
||||
__ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ Branch(&slow, hs, key, Operand(a4));
|
||||
__ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ Branch(&check_if_double_array, ne, elements_map,
|
||||
Heap::kFixedArrayMapRootIndex);
|
||||
|
||||
__ jmp(&fast_object_grow);
|
||||
|
||||
__ bind(&check_if_double_array);
|
||||
__ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ jmp(&fast_double_grow);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
__ bind(&array);
|
||||
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array.
|
||||
__ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
__ Branch(&extra, hs, key, Operand(a4));
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(
|
||||
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
|
||||
value, key, receiver, receiver_map, elements_map, elements);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength, value, key, receiver,
|
||||
receiver_map, elements_map, elements);
|
||||
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
|
||||
static void StoreIC_PushArgs(MacroAssembler* masm) {
|
||||
__ Push(StoreWithVectorDescriptor::ValueRegister(),
|
||||
StoreWithVectorDescriptor::SlotRegister(),
|
||||
|
@ -1,161 +0,0 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_MIPS64
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
Register offset_scratch) {
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
ExternalReference map_offset(stub_cache->map_reference(table));
|
||||
|
||||
uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
|
||||
uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
|
||||
uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
|
||||
|
||||
// Check the relative positions of the address fields.
|
||||
DCHECK(value_off_addr > key_off_addr);
|
||||
DCHECK((value_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((value_off_addr - key_off_addr) < (256 * 4));
|
||||
DCHECK(map_off_addr > key_off_addr);
|
||||
DCHECK((map_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((map_off_addr - key_off_addr) < (256 * 4));
|
||||
|
||||
Label miss;
|
||||
Register base_addr = scratch;
|
||||
scratch = no_reg;
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry (name, code, map).
|
||||
__ Dlsa(offset_scratch, offset, offset, 1);
|
||||
|
||||
// Calculate the base address of the entry.
|
||||
__ li(base_addr, Operand(key_offset));
|
||||
__ Dlsa(base_addr, base_addr, offset_scratch,
|
||||
kPointerSizeLog2 - StubCache::kCacheIndexShift);
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ ld(at, MemOperand(base_addr, 0));
|
||||
__ Branch(&miss, ne, name, Operand(at));
|
||||
|
||||
// Check the map matches.
|
||||
__ ld(at, MemOperand(base_addr,
|
||||
static_cast<int32_t>(map_off_addr - key_off_addr)));
|
||||
__ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ Branch(&miss, ne, at, Operand(scratch2));
|
||||
|
||||
// Get the code entry from the cache.
|
||||
Register code = scratch2;
|
||||
scratch2 = no_reg;
|
||||
__ ld(code, MemOperand(base_addr,
|
||||
static_cast<int32_t>(value_off_addr - key_off_addr)));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ jmp(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ jmp(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
__ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(at);
|
||||
|
||||
// Miss: fall through.
|
||||
__ bind(&miss);
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 12.
|
||||
// DCHECK(sizeof(Entry) == 12);
|
||||
// DCHECK(sizeof(Entry) == 3 * kPointerSize);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Check register validity.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(!extra.is(no_reg));
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind_)) {
|
||||
Register vector, slot;
|
||||
if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
|
||||
vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
} else {
|
||||
DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
|
||||
vector = LoadWithVectorDescriptor::VectorRegister();
|
||||
slot = LoadWithVectorDescriptor::SlotRegister();
|
||||
}
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ lwu(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
|
||||
__ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ Addu(scratch, scratch, at);
|
||||
__ Xor(scratch, scratch, Operand(kPrimaryMagic));
|
||||
__ And(scratch, scratch,
|
||||
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ Subu(scratch, scratch, name);
|
||||
__ Addu(scratch, scratch, kSecondaryMagic);
|
||||
__ And(scratch, scratch,
|
||||
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
|
||||
extra3);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_MIPS64
|
@ -1,31 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_PPC
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
__ mov(r0, Operand(Smi::FromInt(language_mode)));
|
||||
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
|
||||
StoreDescriptor::ValueRegister(), r0);
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_PPC
|
@ -223,274 +223,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
|
||||
Register value, Register key, Register receiver, Register receiver_map,
|
||||
Register elements_map, Register elements) {
|
||||
Label transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
|
||||
// Fast case: Do the store, could be either Object or double.
|
||||
__ bind(fast_object);
|
||||
Register scratch = r7;
|
||||
Register address = r8;
|
||||
DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
|
||||
scratch, address));
|
||||
|
||||
if (check_map == kCheckMap) {
|
||||
__ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ cmp(elements_map, scratch);
|
||||
__ bne(fast_double);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because
|
||||
// there may be a callback on the element
|
||||
Label holecheck_passed1;
|
||||
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiToPtrArrayOffset(scratch, key);
|
||||
__ LoadPX(scratch, MemOperand(address, scratch));
|
||||
__ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0);
|
||||
__ bne(&holecheck_passed1);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&holecheck_passed1);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(value, &non_smi_value);
|
||||
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
|
||||
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiToPtrArrayOffset(scratch, key);
|
||||
__ StorePX(value, MemOperand(address, scratch));
|
||||
__ Ret();
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to elements kind transition case.
|
||||
__ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
|
||||
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
|
||||
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
|
||||
}
|
||||
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiToPtrArrayOffset(scratch, key);
|
||||
__ StorePUX(value, MemOperand(address, scratch));
|
||||
// Update write barrier for the elements array address.
|
||||
__ mr(scratch, value); // Preserve the value which is returned.
|
||||
__ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ Ret();
|
||||
|
||||
__ bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
__ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ bne(slow);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
__ addi(address, elements,
|
||||
Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
|
||||
kHeapObjectTag)));
|
||||
__ SmiToDoubleArrayOffset(scratch, key);
|
||||
__ lwzx(scratch, MemOperand(address, scratch));
|
||||
__ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
|
||||
__ bne(&fast_double_without_map_check);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
|
||||
&transition_double_elements);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
|
||||
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
|
||||
}
|
||||
__ Ret();
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
|
||||
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
|
||||
__ bne(&non_double_value);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(
|
||||
FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
receiver_map, mode, slow);
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ b(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ b(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ b(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
// ---------- S t a t e --------------
|
||||
// -- r3 : value
|
||||
// -- r4 : key
|
||||
// -- r5 : receiver
|
||||
// -- lr : return address
|
||||
// -----------------------------------
|
||||
Label slow, fast_object, fast_object_grow;
|
||||
Label fast_double, fast_double_grow;
|
||||
Label array, extra, check_if_double_array, maybe_name_key, miss;
|
||||
|
||||
// Register usage.
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
DCHECK(receiver.is(r4));
|
||||
DCHECK(key.is(r5));
|
||||
DCHECK(value.is(r3));
|
||||
Register receiver_map = r6;
|
||||
Register elements_map = r9;
|
||||
Register elements = r10; // Elements array of the receiver.
|
||||
// r7 and r8 are used as general scratch registers.
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
// Get the map of the object.
|
||||
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
|
||||
__ andi(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
|
||||
__ bne(&slow, cr0);
|
||||
// Check if the object is a JS array or not.
|
||||
__ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
|
||||
__ cmpi(r7, Operand(JS_ARRAY_TYPE));
|
||||
__ beq(&array);
|
||||
// Check that the object is some kind of JSObject.
|
||||
__ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE));
|
||||
__ blt(&slow);
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
__ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ cmpl(key, ip);
|
||||
__ blt(&fast_object);
|
||||
|
||||
// Slow case, handle jump to runtime.
|
||||
__ bind(&slow);
|
||||
// Entry registers are intact.
|
||||
// r3: value.
|
||||
// r4: key.
|
||||
// r5: receiver.
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
|
||||
__ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(r7, &slow);
|
||||
|
||||
// The handlers in the stub cache expect a vector and slot. Since we won't
|
||||
// change the IC from any downstream misses, a dummy vector can be used.
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
|
||||
Handle<TypeFeedbackVector> dummy_vector =
|
||||
TypeFeedbackVector::DummyVector(masm->isolate());
|
||||
int slot_index = dummy_vector->GetIndex(
|
||||
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
|
||||
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
|
||||
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r8,
|
||||
r9, r10, r11);
|
||||
// Cache miss.
|
||||
__ b(&miss);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// Condition code from comparing key and array length is still available.
|
||||
__ bne(&slow); // Only support writing to writing to array[array.length].
|
||||
// Check for room in the elements backing store.
|
||||
// Both the key and the length of FixedArray are smis.
|
||||
__ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ cmpl(key, ip);
|
||||
__ bge(&slow);
|
||||
__ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ cmp(elements_map, ip); // PPC - I think I can re-use ip here
|
||||
__ bne(&check_if_double_array);
|
||||
__ b(&fast_object_grow);
|
||||
|
||||
__ bind(&check_if_double_array);
|
||||
__ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map()));
|
||||
__ cmp(elements_map, ip); // PPC - another ip re-use
|
||||
__ bne(&slow);
|
||||
__ b(&fast_double_grow);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
__ bind(&array);
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array.
|
||||
__ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
__ cmpl(key, ip);
|
||||
__ bge(&extra);
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(
|
||||
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
|
||||
value, key, receiver, receiver_map, elements_map, elements);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength, value, key, receiver,
|
||||
receiver_map, elements_map, elements);
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
void StoreIC::GenerateMiss(MacroAssembler* masm) {
|
||||
StoreIC_PushArgs(masm);
|
||||
|
||||
|
@ -1,176 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_PPC
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
Register offset_scratch) {
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
ExternalReference map_offset(stub_cache->map_reference(table));
|
||||
|
||||
uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
|
||||
uintptr_t value_off_addr =
|
||||
reinterpret_cast<uintptr_t>(value_offset.address());
|
||||
uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
|
||||
|
||||
// Check the relative positions of the address fields.
|
||||
DCHECK(value_off_addr > key_off_addr);
|
||||
DCHECK((value_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((value_off_addr - key_off_addr) < (256 * 4));
|
||||
DCHECK(map_off_addr > key_off_addr);
|
||||
DCHECK((map_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((map_off_addr - key_off_addr) < (256 * 4));
|
||||
|
||||
Label miss;
|
||||
Register base_addr = scratch;
|
||||
scratch = no_reg;
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry (name, code, map).
|
||||
__ ShiftLeftImm(offset_scratch, offset, Operand(1));
|
||||
__ add(offset_scratch, offset, offset_scratch);
|
||||
|
||||
// Calculate the base address of the entry.
|
||||
__ mov(base_addr, Operand(key_offset));
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
|
||||
__ ShiftLeftImm(offset_scratch, offset_scratch,
|
||||
Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
|
||||
#else
|
||||
DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
|
||||
#endif
|
||||
__ add(base_addr, base_addr, offset_scratch);
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ LoadP(ip, MemOperand(base_addr, 0));
|
||||
__ cmp(name, ip);
|
||||
__ bne(&miss);
|
||||
|
||||
// Check the map matches.
|
||||
__ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
|
||||
__ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ cmp(ip, scratch2);
|
||||
__ bne(&miss);
|
||||
|
||||
// Get the code entry from the cache.
|
||||
Register code = scratch2;
|
||||
scratch2 = no_reg;
|
||||
__ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ b(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ b(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
__ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ mtctr(r0);
|
||||
__ bctr();
|
||||
|
||||
// Miss: fall through.
|
||||
__ bind(&miss);
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 24.
|
||||
DCHECK(sizeof(Entry) == 24);
|
||||
#else
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 12.
|
||||
DCHECK(sizeof(Entry) == 12);
|
||||
#endif
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Check scratch, extra and extra2 registers are valid.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(!extra.is(no_reg));
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind_)) {
|
||||
Register vector, slot;
|
||||
if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
|
||||
vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
} else {
|
||||
DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
|
||||
vector = LoadWithVectorDescriptor::VectorRegister();
|
||||
slot = LoadWithVectorDescriptor::SlotRegister();
|
||||
}
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
|
||||
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ add(scratch, scratch, ip);
|
||||
__ Xor(scratch, scratch, Operand(kPrimaryMagic));
|
||||
// The mask omits the last two bits because they are not part of the hash.
|
||||
__ andi(scratch, scratch,
|
||||
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ sub(scratch, scratch, name);
|
||||
__ Add(scratch, scratch, kSecondaryMagic, r0);
|
||||
__ andi(scratch, scratch,
|
||||
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
|
||||
extra3);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_PPC
|
@ -1,29 +0,0 @@
|
||||
// Copyright 2015 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_S390
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
__ mov(r0, Operand(Smi::FromInt(language_mode)));
|
||||
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
|
||||
StoreDescriptor::ValueRegister(), r0);
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_S390
|
@ -216,272 +216,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
|
||||
Register value, Register key, Register receiver, Register receiver_map,
|
||||
Register elements_map, Register elements) {
|
||||
Label transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
|
||||
// Fast case: Do the store, could be either Object or double.
|
||||
__ bind(fast_object);
|
||||
Register scratch = r6;
|
||||
Register address = r7;
|
||||
DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
|
||||
scratch, address));
|
||||
|
||||
if (check_map == kCheckMap) {
|
||||
__ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ CmpP(elements_map,
|
||||
Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ bne(fast_double);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because
|
||||
// there may be a callback on the element
|
||||
Label holecheck_passed1;
|
||||
// @TODO(joransiu) : Fold AddP into memref of LoadP
|
||||
__ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiToPtrArrayOffset(scratch, key);
|
||||
__ LoadP(scratch, MemOperand(address, scratch));
|
||||
__ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
|
||||
__ bne(&holecheck_passed1, Label::kNear);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&holecheck_passed1);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(value, &non_smi_value);
|
||||
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
|
||||
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiToPtrArrayOffset(scratch, key);
|
||||
__ StoreP(value, MemOperand(address, scratch));
|
||||
__ Ret();
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to elements kind transition case.
|
||||
__ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
|
||||
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
|
||||
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiToPtrArrayOffset(scratch, key);
|
||||
__ StoreP(value, MemOperand(address, scratch));
|
||||
__ la(address, MemOperand(address, scratch));
|
||||
// Update write barrier for the elements array address.
|
||||
__ LoadRR(scratch, value); // Preserve the value which is returned.
|
||||
__ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ Ret();
|
||||
|
||||
__ bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
__ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ bne(slow);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
// @TODO(joransiu) : Fold AddP Operand into LoadlW
|
||||
__ AddP(address, elements,
|
||||
Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
|
||||
kHeapObjectTag)));
|
||||
__ SmiToDoubleArrayOffset(scratch, key);
|
||||
__ LoadlW(scratch, MemOperand(address, scratch));
|
||||
__ CmpP(scratch, Operand(kHoleNanUpper32));
|
||||
__ bne(&fast_double_without_map_check, Label::kNear);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
|
||||
|
||||
__ bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
|
||||
&transition_double_elements);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
|
||||
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ Ret();
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
|
||||
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
|
||||
__ bne(&non_double_value);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(
|
||||
FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
receiver_map, mode, slow);
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ b(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ b(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
receiver_map, scratch, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
masm, receiver, key, value, receiver_map, mode, slow);
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ b(&finish_object_store);
|
||||
}
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
// ---------- S t a t e --------------
|
||||
// -- r2 : value
|
||||
// -- r3 : key
|
||||
// -- r4 : receiver
|
||||
// -- lr : return address
|
||||
// -----------------------------------
|
||||
Label slow, fast_object, fast_object_grow;
|
||||
Label fast_double, fast_double_grow;
|
||||
Label array, extra, check_if_double_array, maybe_name_key, miss;
|
||||
|
||||
// Register usage.
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
DCHECK(receiver.is(r3));
|
||||
DCHECK(key.is(r4));
|
||||
DCHECK(value.is(r2));
|
||||
Register receiver_map = r5;
|
||||
Register elements_map = r8;
|
||||
Register elements = r9; // Elements array of the receiver.
|
||||
// r6 and r7 are used as general scratch registers.
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
// Get the map of the object.
|
||||
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
|
||||
__ AndP(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
|
||||
__ bne(&slow, Label::kNear);
|
||||
// Check if the object is a JS array or not.
|
||||
__ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
|
||||
__ CmpP(r6, Operand(JS_ARRAY_TYPE));
|
||||
__ beq(&array);
|
||||
// Check that the object is some kind of JSObject.
|
||||
__ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
|
||||
__ blt(&slow, Label::kNear);
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
__ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ blt(&fast_object);
|
||||
|
||||
// Slow case, handle jump to runtime.
|
||||
__ bind(&slow);
|
||||
// Entry registers are intact.
|
||||
// r2: value.
|
||||
// r3: key.
|
||||
// r4: receiver.
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
|
||||
__ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(r6, &slow);
|
||||
|
||||
// The handlers in the stub cache expect a vector and slot. Since we won't
|
||||
// change the IC from any downstream misses, a dummy vector can be used.
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
|
||||
Handle<TypeFeedbackVector> dummy_vector =
|
||||
TypeFeedbackVector::DummyVector(masm->isolate());
|
||||
int slot_index = dummy_vector->GetIndex(
|
||||
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
|
||||
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
|
||||
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r7,
|
||||
r8, r9, ip);
|
||||
// Cache miss.
|
||||
__ b(&miss);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// Condition code from comparing key and array length is still available.
|
||||
__ bne(&slow); // Only support writing to writing to array[array.length].
|
||||
// Check for room in the elements backing store.
|
||||
// Both the key and the length of FixedArray are smis.
|
||||
__ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ bge(&slow);
|
||||
__ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
__ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ bne(&check_if_double_array, Label::kNear);
|
||||
__ b(&fast_object_grow);
|
||||
|
||||
__ bind(&check_if_double_array);
|
||||
__ CmpP(elements_map,
|
||||
Operand(masm->isolate()->factory()->fixed_double_array_map()));
|
||||
__ bne(&slow);
|
||||
__ b(&fast_double_grow);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
__ bind(&array);
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array.
|
||||
__ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
__ bge(&extra);
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(
|
||||
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
|
||||
value, key, receiver, receiver_map, elements_map, elements);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength, value, key, receiver,
|
||||
receiver_map, elements_map, elements);
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
void StoreIC::GenerateMiss(MacroAssembler* masm) {
|
||||
StoreIC_PushArgs(masm);
|
||||
|
||||
|
@ -1,173 +0,0 @@
|
||||
// Copyright 2015 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_S390
|
||||
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
Register offset_scratch) {
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
ExternalReference map_offset(stub_cache->map_reference(table));
|
||||
|
||||
uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
|
||||
uintptr_t value_off_addr =
|
||||
reinterpret_cast<uintptr_t>(value_offset.address());
|
||||
uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
|
||||
|
||||
// Check the relative positions of the address fields.
|
||||
DCHECK(value_off_addr > key_off_addr);
|
||||
DCHECK((value_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((value_off_addr - key_off_addr) < (256 * 4));
|
||||
DCHECK(map_off_addr > key_off_addr);
|
||||
DCHECK((map_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((map_off_addr - key_off_addr) < (256 * 4));
|
||||
|
||||
Label miss;
|
||||
Register base_addr = scratch;
|
||||
scratch = no_reg;
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry (name, code, map).
|
||||
__ ShiftLeftP(offset_scratch, offset, Operand(1));
|
||||
__ AddP(offset_scratch, offset, offset_scratch);
|
||||
|
||||
// Calculate the base address of the entry.
|
||||
__ mov(base_addr, Operand(key_offset));
|
||||
#if V8_TARGET_ARCH_S390X
|
||||
DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
|
||||
__ ShiftLeftP(offset_scratch, offset_scratch,
|
||||
Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
|
||||
#else
|
||||
DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
|
||||
#endif
|
||||
__ AddP(base_addr, base_addr, offset_scratch);
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ CmpP(name, MemOperand(base_addr, 0));
|
||||
__ bne(&miss, Label::kNear);
|
||||
|
||||
// Check the map matches.
|
||||
__ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
|
||||
__ CmpP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ bne(&miss, Label::kNear);
|
||||
|
||||
// Get the code entry from the cache.
|
||||
Register code = scratch2;
|
||||
scratch2 = no_reg;
|
||||
__ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ b(&miss, Label::kNear);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ b(&miss, Label::kNear);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
// TODO(joransiu): Combine into indirect branch
|
||||
__ la(code, MemOperand(code, Code::kHeaderSize - kHeapObjectTag));
|
||||
__ b(code);
|
||||
|
||||
// Miss: fall through.
|
||||
__ bind(&miss);
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
|
||||
#if V8_TARGET_ARCH_S390X
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 24.
|
||||
DCHECK(sizeof(Entry) == 24);
|
||||
#else
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 12.
|
||||
DCHECK(sizeof(Entry) == 12);
|
||||
#endif
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Check scratch, extra and extra2 registers are valid.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(!extra.is(no_reg));
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind_)) {
|
||||
Register vector, slot;
|
||||
if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
|
||||
vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
} else {
|
||||
DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
|
||||
vector = LoadWithVectorDescriptor::VectorRegister();
|
||||
slot = LoadWithVectorDescriptor::SlotRegister();
|
||||
}
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
|
||||
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ AddP(scratch, scratch, ip);
|
||||
__ XorP(scratch, scratch, Operand(kPrimaryMagic));
|
||||
// The mask omits the last two bits because they are not part of the hash.
|
||||
__ AndP(scratch, scratch,
|
||||
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ SubP(scratch, scratch, name);
|
||||
__ AddP(scratch, scratch, Operand(kSecondaryMagic));
|
||||
__ AndP(scratch, scratch,
|
||||
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
|
||||
extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
|
||||
extra3);
|
||||
}
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_S390
|
@ -48,13 +48,6 @@ class StubCache {
|
||||
// Collect all maps that match the name.
|
||||
void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
|
||||
Handle<Context> native_context, Zone* zone);
|
||||
// Generate code for probing the stub cache table.
|
||||
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
|
||||
// registers. Set to no_reg if not needed.
|
||||
// If leave_frame is true, then exit a frame before the tail call.
|
||||
void GenerateProbe(MacroAssembler* masm, Register receiver, Register name,
|
||||
Register scratch, Register extra, Register extra2 = no_reg,
|
||||
Register extra3 = no_reg);
|
||||
|
||||
enum Table { kPrimary, kSecondary };
|
||||
|
||||
|
@ -1,39 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_X64
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
// Return address is on the stack.
|
||||
DCHECK(!rbx.is(StoreDescriptor::ReceiverRegister()) &&
|
||||
!rbx.is(StoreDescriptor::NameRegister()) &&
|
||||
!rbx.is(StoreDescriptor::ValueRegister()));
|
||||
|
||||
__ PopReturnAddressTo(rbx);
|
||||
__ Push(StoreDescriptor::ReceiverRegister());
|
||||
__ Push(StoreDescriptor::NameRegister());
|
||||
__ Push(StoreDescriptor::ValueRegister());
|
||||
__ Push(Smi::FromInt(language_mode));
|
||||
__ PushReturnAddressFrom(rbx);
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_X64
|
@ -121,255 +121,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
|
||||
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
|
||||
Label transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
DCHECK(receiver.is(rdx));
|
||||
DCHECK(key.is(rcx));
|
||||
DCHECK(value.is(rax));
|
||||
// Fast case: Do the store, could be either Object or double.
|
||||
__ bind(fast_object);
|
||||
// rbx: receiver's elements array (a FixedArray)
|
||||
// receiver is a JSArray.
|
||||
// r9: map of receiver
|
||||
if (check_map == kCheckMap) {
|
||||
__ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
|
||||
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
|
||||
__ j(not_equal, fast_double);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because
|
||||
// there may be a callback on the element
|
||||
Label holecheck_passed1;
|
||||
__ movp(kScratchRegister,
|
||||
FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize));
|
||||
__ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
|
||||
__ j(not_equal, &holecheck_passed1);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
|
||||
|
||||
__ bind(&holecheck_passed1);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(value, &non_smi_value);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ leal(rdi, Operand(key, 1));
|
||||
__ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
|
||||
value);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Writing a non-smi, check whether array allows non-smi elements.
|
||||
// r9: receiver's map
|
||||
__ CheckFastObjectElements(r9, &transition_smi_elements);
|
||||
|
||||
__ bind(&finish_object_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ leal(rdi, Operand(key, 1));
|
||||
__ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
|
||||
}
|
||||
__ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
|
||||
value);
|
||||
__ movp(rdx, value); // Preserve the value which is returned.
|
||||
__ RecordWriteArray(rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
// rdi: elements array's map
|
||||
__ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ j(not_equal, slow);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
|
||||
__ cmpl(FieldOperand(rbx, key, times_8, offset), Immediate(kHoleNanUpper32));
|
||||
__ j(not_equal, &fast_double_without_map_check);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
|
||||
|
||||
__ bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, rbx, key, kScratchDoubleReg,
|
||||
&transition_double_elements);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ leal(rdi, Operand(key, 1));
|
||||
__ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
|
||||
}
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
__ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ movp(r9, FieldOperand(value, HeapObject::kMapOffset));
|
||||
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, &non_double_value);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
|
||||
FAST_DOUBLE_ELEMENTS, rbx, rdi, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
rbx, mode, slow);
|
||||
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx,
|
||||
rdi, slow);
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, rbx, mode, slow);
|
||||
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
rbx, rdi, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
|
||||
value, rbx, mode, slow);
|
||||
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
// Return address is on the stack.
|
||||
Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
|
||||
Label fast_double, fast_double_grow;
|
||||
Label array, extra, check_if_double_array, maybe_name_key, miss;
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
DCHECK(receiver.is(rdx));
|
||||
DCHECK(key.is(rcx));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow_with_tagged_index);
|
||||
// Get the map from the receiver.
|
||||
__ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ testb(FieldOperand(r9, Map::kBitFieldOffset),
|
||||
Immediate(1 << Map::kIsAccessCheckNeeded));
|
||||
__ j(not_zero, &slow_with_tagged_index);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
__ SmiToInteger32(key, key);
|
||||
|
||||
__ CmpInstanceType(r9, JS_ARRAY_TYPE);
|
||||
__ j(equal, &array);
|
||||
// Check that the object is some kind of JS object EXCEPT JS Value type. In
|
||||
// the case that the object is a value-wrapper object, we enter the runtime
|
||||
// system to make sure that indexing into string objects works as intended.
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
__ CmpInstanceType(r9, JS_OBJECT_TYPE);
|
||||
__ j(below, &slow);
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds.
|
||||
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
|
||||
// rbx: FixedArray
|
||||
__ j(above, &fast_object);
|
||||
|
||||
// Slow case: call runtime.
|
||||
__ bind(&slow);
|
||||
__ Integer32ToSmi(key, key);
|
||||
__ bind(&slow_with_tagged_index);
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ movp(r9, FieldOperand(key, HeapObject::kMapOffset));
|
||||
__ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
|
||||
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
// The handlers in the stub cache expect a vector and slot. Since we won't
|
||||
// change the IC from any downstream misses, a dummy vector can be used.
|
||||
Handle<TypeFeedbackVector> dummy_vector =
|
||||
TypeFeedbackVector::DummyVector(masm->isolate());
|
||||
int slot_index = dummy_vector->GetIndex(
|
||||
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
|
||||
__ Move(vector, dummy_vector);
|
||||
__ Move(slot, Smi::FromInt(slot_index));
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r9,
|
||||
no_reg);
|
||||
// Cache miss.
|
||||
__ jmp(&miss);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// receiver is a JSArray.
|
||||
// rbx: receiver's elements array (a FixedArray)
|
||||
// flags: smicompare (receiver.length(), rbx)
|
||||
__ j(not_equal, &slow); // do not leave holes in the array
|
||||
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
|
||||
__ j(below_equal, &slow);
|
||||
// Increment index to get new length.
|
||||
__ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
|
||||
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
|
||||
__ j(not_equal, &check_if_double_array);
|
||||
__ jmp(&fast_object_grow);
|
||||
|
||||
__ bind(&check_if_double_array);
|
||||
// rdi: elements array's map
|
||||
__ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ j(not_equal, &slow);
|
||||
__ jmp(&fast_double_grow);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
__ bind(&array);
|
||||
// receiver is a JSArray.
|
||||
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array, compute the
|
||||
// address to store into and fall through to fast case.
|
||||
__ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
|
||||
__ j(below_equal, &extra);
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
|
||||
kCheckMap, kDontIncrementLength);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength);
|
||||
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
||||
Register dictionary = rax;
|
||||
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
|
||||
|
@ -1,153 +0,0 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_X64
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset) {
|
||||
// We need to scale up the pointer by 2 when the offset is scaled by less
|
||||
// than the pointer size.
|
||||
DCHECK(kPointerSize == kInt64Size
|
||||
? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
|
||||
: kPointerSizeLog2 == StubCache::kCacheIndexShift);
|
||||
ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
|
||||
|
||||
DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry));
|
||||
// The offset register holds the entry offset times four (due to masking
|
||||
// and shifting optimizations).
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
Label miss;
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry (name, code, map).
|
||||
__ leap(offset, Operand(offset, offset, times_2, 0));
|
||||
|
||||
__ LoadAddress(kScratchRegister, key_offset);
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ cmpp(name, Operand(kScratchRegister, offset, scale_factor, 0));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Get the map entry from the cache.
|
||||
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
|
||||
DCHECK(stub_cache->map_reference(table).address() -
|
||||
stub_cache->key_reference(table).address() ==
|
||||
kPointerSize * 2);
|
||||
__ movp(kScratchRegister,
|
||||
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
|
||||
__ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Get the code entry from the cache.
|
||||
__ LoadAddress(kScratchRegister, value_offset);
|
||||
__ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ jmp(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ jmp(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
__ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(kScratchRegister);
|
||||
|
||||
__ bind(&miss);
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
USE(extra); // The register extra is not used on the X64 platform.
|
||||
USE(extra2); // The register extra2 is not used on the X64 platform.
|
||||
USE(extra3); // The register extra2 is not used on the X64 platform.
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 3 * kPointerSize.
|
||||
DCHECK(sizeof(Entry) == 3 * kPointerSize);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!scratch.is(receiver));
|
||||
DCHECK(!scratch.is(name));
|
||||
|
||||
// Check scratch register is valid, extra and extra2 are unused.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(extra2.is(no_reg));
|
||||
DCHECK(extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch doesn't conflict with
|
||||
// the vector and slot registers, which need to be preserved for a handler
|
||||
// call or miss.
|
||||
if (IC::ICUseVector(ic_kind_)) {
|
||||
if (ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC) {
|
||||
Register vector = LoadWithVectorDescriptor::VectorRegister();
|
||||
Register slot = LoadDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, scratch));
|
||||
} else {
|
||||
DCHECK(ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC);
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister();
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, scratch));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
|
||||
// Use only the low 32 bits of the map pointer.
|
||||
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ xorp(scratch, Immediate(kPrimaryMagic));
|
||||
// We mask out the last two bits because they are not part of the hash and
|
||||
// they are always 01 for maps. Also in the two 'and' instructions below.
|
||||
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, receiver, name, scratch);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
|
||||
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ xorp(scratch, Immediate(kPrimaryMagic));
|
||||
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
|
||||
__ subl(scratch, name);
|
||||
__ addl(scratch, Immediate(kSecondaryMagic));
|
||||
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, receiver, name, scratch);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_X64
|
@ -1,45 +0,0 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/ic-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void PropertyICCompiler::GenerateRuntimeSetProperty(
|
||||
MacroAssembler* masm, LanguageMode language_mode) {
|
||||
typedef StoreWithVectorDescriptor Descriptor;
|
||||
STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
|
||||
// ----------- S t a t e -------------
|
||||
// -- esp[12] : value
|
||||
// -- esp[8] : slot
|
||||
// -- esp[4] : vector
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
__ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
|
||||
Descriptor::kValue);
|
||||
|
||||
__ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
|
||||
__ mov(Operand(esp, 8), Descriptor::NameRegister());
|
||||
__ mov(Operand(esp, 4), Descriptor::ValueRegister());
|
||||
__ pop(ebx);
|
||||
__ push(Immediate(Smi::FromInt(language_mode)));
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_X87
|
@ -120,251 +120,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
|
||||
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
|
||||
}
|
||||
|
||||
static void KeyedStoreGenerateMegamorphicHelper(
|
||||
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
|
||||
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
|
||||
Label transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
Label fast_double_without_map_check;
|
||||
Register receiver = StoreDescriptor::ReceiverRegister();
|
||||
Register key = StoreDescriptor::NameRegister();
|
||||
Register value = StoreDescriptor::ValueRegister();
|
||||
DCHECK(receiver.is(edx));
|
||||
DCHECK(key.is(ecx));
|
||||
DCHECK(value.is(eax));
|
||||
// key is a smi.
|
||||
// ebx: FixedArray receiver->elements
|
||||
// edi: receiver map
|
||||
// Fast case: Do the store, could either Object or double.
|
||||
__ bind(fast_object);
|
||||
if (check_map == kCheckMap) {
|
||||
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
|
||||
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
|
||||
__ j(not_equal, fast_double);
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] = V"
|
||||
// We have to go to the runtime if the current value is the hole because
|
||||
// there may be a callback on the element
|
||||
Label holecheck_passed1;
|
||||
__ cmp(FixedArrayElementOperand(ebx, key),
|
||||
masm->isolate()->factory()->the_hole_value());
|
||||
__ j(not_equal, &holecheck_passed1);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
__ bind(&holecheck_passed1);
|
||||
|
||||
// Smi stores don't require further checks.
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(value, &non_smi_value);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(FieldOperand(receiver, JSArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(1)));
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ mov(FixedArrayElementOperand(ebx, key), value);
|
||||
__ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to elements kind transition case.
|
||||
__ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ CheckFastObjectElements(edi, &transition_smi_elements);
|
||||
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(FieldOperand(receiver, JSArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(1)));
|
||||
}
|
||||
__ mov(FixedArrayElementOperand(ebx, key), value);
|
||||
// Update write barrier for the elements array address.
|
||||
__ mov(edx, value); // Preserve the value which is returned.
|
||||
__ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
|
||||
|
||||
__ bind(fast_double);
|
||||
if (check_map == kCheckMap) {
|
||||
// Check for fast double array case. If this fails, call through to the
|
||||
// runtime.
|
||||
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
|
||||
__ j(not_equal, slow);
|
||||
// If the value is a number, store it as a double in the FastDoubleElements
|
||||
// array.
|
||||
}
|
||||
|
||||
// HOLECHECK: guards "A[i] double hole?"
|
||||
// We have to see if the double version of the hole is present. If so
|
||||
// go to the runtime.
|
||||
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
|
||||
__ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
|
||||
__ j(not_equal, &fast_double_without_map_check);
|
||||
__ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
__ bind(&fast_double_without_map_check);
|
||||
__ StoreNumberToDoubleElements(value, ebx, key, edi,
|
||||
&transition_double_elements, false);
|
||||
if (increment_length == kIncrementLength) {
|
||||
// Add 1 to receiver->length.
|
||||
__ add(FieldOperand(receiver, JSArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(1)));
|
||||
}
|
||||
__ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
|
||||
&non_double_value, DONT_DO_SMI_CHECK);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
|
||||
// and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
|
||||
FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
|
||||
AllocationSiteMode mode =
|
||||
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
|
||||
ebx, mode, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
|
||||
edi, slow);
|
||||
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
masm, receiver, key, value, ebx, mode, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
|
||||
ebx, edi, slow);
|
||||
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
|
||||
value, ebx, mode, slow);
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
LanguageMode language_mode) {
|
||||
typedef StoreWithVectorDescriptor Descriptor;
|
||||
// Return address is on the stack.
|
||||
Label slow, fast_object, fast_object_grow;
|
||||
Label fast_double, fast_double_grow;
|
||||
Label array, extra, check_if_double_array, maybe_name_key, miss;
|
||||
Register receiver = Descriptor::ReceiverRegister();
|
||||
Register key = Descriptor::NameRegister();
|
||||
DCHECK(receiver.is(edx));
|
||||
DCHECK(key.is(ecx));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
// Get the map from the receiver.
|
||||
__ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks.
|
||||
// The generic stub does not perform map checks.
|
||||
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
|
||||
Immediate(1 << Map::kIsAccessCheckNeeded));
|
||||
__ j(not_zero, &slow);
|
||||
|
||||
__ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
|
||||
Descriptor::kValue);
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &maybe_name_key);
|
||||
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
|
||||
__ j(equal, &array);
|
||||
// Check that the object is some kind of JS object EXCEPT JS Value type. In
|
||||
// the case that the object is a value-wrapper object, we enter the runtime
|
||||
// system to make sure that indexing into string objects works as intended.
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
__ CmpInstanceType(edi, JS_OBJECT_TYPE);
|
||||
__ j(below, &slow);
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
// Key is a smi.
|
||||
// edi: receiver map
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
__ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
|
||||
__ j(below, &fast_object);
|
||||
|
||||
// Slow case: call runtime.
|
||||
__ bind(&slow);
|
||||
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
|
||||
// Never returns to here.
|
||||
|
||||
__ bind(&maybe_name_key);
|
||||
__ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
|
||||
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
|
||||
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
|
||||
|
||||
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
|
||||
no_reg);
|
||||
|
||||
// Cache miss.
|
||||
__ jmp(&miss);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// receiver is a JSArray.
|
||||
// key is a smi.
|
||||
// ebx: receiver->elements, a FixedArray
|
||||
// edi: receiver map
|
||||
// flags: compare (key, receiver.length())
|
||||
// do not leave holes in the array:
|
||||
__ j(not_equal, &slow);
|
||||
__ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
|
||||
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
|
||||
__ j(not_equal, &check_if_double_array);
|
||||
__ jmp(&fast_object_grow);
|
||||
|
||||
__ bind(&check_if_double_array);
|
||||
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
|
||||
__ j(not_equal, &slow);
|
||||
__ jmp(&fast_double_grow);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode (and writable); if it
|
||||
// is the length is always a smi.
|
||||
__ bind(&array);
|
||||
// receiver is a JSArray.
|
||||
// key is a smi.
|
||||
// edi: receiver map
|
||||
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
// Check the key against the length in the array and fall through to the
|
||||
// common store code.
|
||||
__ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
|
||||
__ j(above_equal, &extra);
|
||||
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
|
||||
kCheckMap, kDontIncrementLength);
|
||||
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
|
||||
&fast_double_grow, &slow, kDontCheckMap,
|
||||
kIncrementLength);
|
||||
|
||||
__ bind(&miss);
|
||||
GenerateMiss(masm);
|
||||
}
|
||||
|
||||
void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
||||
Register dictionary = eax;
|
||||
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
|
||||
|
@ -1,185 +0,0 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
|
||||
StubCache::Table table, Register name, Register receiver,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
Register offset, Register extra) {
|
||||
ExternalReference key_offset(stub_cache->key_reference(table));
|
||||
ExternalReference value_offset(stub_cache->value_reference(table));
|
||||
ExternalReference map_offset(stub_cache->map_reference(table));
|
||||
|
||||
Label miss;
|
||||
Code::Kind ic_kind = stub_cache->ic_kind();
|
||||
bool is_vector_store =
|
||||
IC::ICUseVector(ic_kind) &&
|
||||
(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry (name, code, map).
|
||||
__ lea(offset, Operand(offset, offset, times_2, 0));
|
||||
|
||||
if (extra.is_valid()) {
|
||||
// Get the code entry from the cache.
|
||||
__ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Check the map matches.
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
|
||||
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ jmp(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ jmp(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (is_vector_store) {
|
||||
// The value, vector and slot were passed to the IC on the stack and
|
||||
// they are still there. So we can just jump to the handler.
|
||||
DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
|
||||
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(extra);
|
||||
} else {
|
||||
// The vector and slot were pushed onto the stack before starting the
|
||||
// probe, and need to be dropped before calling the handler.
|
||||
__ pop(LoadWithVectorDescriptor::VectorRegister());
|
||||
__ pop(LoadDescriptor::SlotRegister());
|
||||
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(extra);
|
||||
}
|
||||
|
||||
__ bind(&miss);
|
||||
} else {
|
||||
DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
|
||||
|
||||
// Save the offset on the stack.
|
||||
__ push(offset);
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Check the map matches.
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
|
||||
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Restore offset register.
|
||||
__ mov(offset, Operand(esp, 0));
|
||||
|
||||
// Get the code entry from the cache.
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
||||
__ jmp(&miss);
|
||||
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
||||
__ jmp(&miss);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Restore offset and re-load code entry from cache.
|
||||
__ pop(offset);
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
if (is_vector_store) {
|
||||
DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
|
||||
}
|
||||
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(offset);
|
||||
|
||||
// Pop at miss.
|
||||
__ bind(&miss);
|
||||
__ pop(offset);
|
||||
}
|
||||
}
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
Label miss;
|
||||
|
||||
// Assert that code is valid. The multiplying code relies on the entry size
|
||||
// being 12.
|
||||
DCHECK(sizeof(Entry) == 12);
|
||||
|
||||
// Assert that there are no register conflicts.
|
||||
DCHECK(!scratch.is(receiver));
|
||||
DCHECK(!scratch.is(name));
|
||||
DCHECK(!extra.is(receiver));
|
||||
DCHECK(!extra.is(name));
|
||||
DCHECK(!extra.is(scratch));
|
||||
|
||||
// Assert scratch and extra registers are valid, and extra2/3 are unused.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(extra2.is(no_reg));
|
||||
DCHECK(extra3.is(no_reg));
|
||||
|
||||
Register offset = scratch;
|
||||
scratch = no_reg;
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
|
||||
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ xor_(offset, kPrimaryMagic);
|
||||
// We mask out the last two bits because they are not part of the hash and
|
||||
// they are always 01 for maps. Also in the two 'and' instructions below.
|
||||
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
|
||||
// ProbeTable expects the offset to be pointer scaled, which it is, because
|
||||
// the heap object tag size is 2 and the pointer size log 2 is also 2.
|
||||
DCHECK(kCacheIndexShift == kPointerSizeLog2);
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
|
||||
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
__ xor_(offset, kPrimaryMagic);
|
||||
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
|
||||
__ sub(offset, name);
|
||||
__ add(offset, Immediate(kSecondaryMagic));
|
||||
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
__ bind(&miss);
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_X87
|
@ -3266,233 +3266,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, bool is_polymorphic,
|
||||
Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
|
||||
__ lw(cached_map,
|
||||
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
__ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
__ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
|
||||
Register length = scratch2;
|
||||
__ bind(&start_polymorphic);
|
||||
__ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
if (!is_polymorphic) {
|
||||
// If the IC could be monomorphic we have to make sure we don't go past the
|
||||
// end of the feedback array.
|
||||
__ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
|
||||
}
|
||||
|
||||
Register too_far = length;
|
||||
Register pointer_reg = feedback;
|
||||
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Addu(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ lw(cached_map, MemOperand(pointer_reg));
|
||||
__ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
|
||||
__ lw(handler, MemOperand(pointer_reg, kPointerSize));
|
||||
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
|
||||
__ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ jmp(miss);
|
||||
}
|
||||
|
||||
|
||||
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
|
||||
Register receiver_map, Register feedback,
|
||||
Register vector, Register slot,
|
||||
Register scratch, Label* compare_map,
|
||||
Label* load_smi_map, Label* try_array) {
|
||||
__ JumpIfSmi(receiver, load_smi_map);
|
||||
__ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ bind(compare_map);
|
||||
Register cached_map = scratch;
|
||||
// Move the weak map into the weak_cell register.
|
||||
__ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
|
||||
__ Branch(try_array, ne, cached_map, Operand(receiver_map));
|
||||
Register handler = feedback;
|
||||
|
||||
__ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ lw(handler,
|
||||
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
|
||||
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
|
||||
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
Label transition_call;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
Register too_far = scratch2;
|
||||
Register pointer_reg = feedback;
|
||||
__ lw(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
|
||||
// +-----+------+------+-----+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Addu(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ lw(cached_map, MemOperand(pointer_reg));
|
||||
__ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
|
||||
// Is it a transitioning store?
|
||||
__ lw(too_far, MemOperand(pointer_reg, kPointerSize));
|
||||
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
||||
__ Branch(&transition_call, ne, too_far, Operand(at));
|
||||
__ lw(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
__ Addu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
__ bind(&transition_call);
|
||||
__ lw(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(too_far, miss);
|
||||
|
||||
__ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
|
||||
// Load the map into the correct register.
|
||||
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ mov(feedback, too_far);
|
||||
|
||||
__ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
|
||||
__ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ jmp(miss);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // a2
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // t0
|
||||
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
|
||||
Register feedback = t1;
|
||||
Register receiver_map = t2;
|
||||
Register scratch1 = t5;
|
||||
|
||||
__ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
|
||||
|
||||
// Try to quickly handle the monomorphic case without knowing for sure
|
||||
// if we have a weak cell in feedback. We do know it's safe to look
|
||||
// at WeakCell::kValueOffset.
|
||||
Label try_array, load_smi_map, compare_map;
|
||||
Label not_array, miss;
|
||||
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
|
||||
scratch1, &compare_map, &load_smi_map, &try_array);
|
||||
|
||||
__ bind(&try_array);
|
||||
// Is it a fixed array?
|
||||
__ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
|
||||
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
|
||||
__ Branch(¬_array, ne, scratch1, Operand(at));
|
||||
|
||||
// We have a polymorphic element handler.
|
||||
Label polymorphic, try_poly_name;
|
||||
__ bind(&polymorphic);
|
||||
|
||||
Register scratch2 = t4;
|
||||
|
||||
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
|
||||
&miss);
|
||||
|
||||
__ bind(¬_array);
|
||||
// Is it generic?
|
||||
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
|
||||
__ Branch(&try_poly_name, ne, feedback, Operand(at));
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ Branch(&miss, ne, key, Operand(feedback));
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ lw(feedback,
|
||||
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
|
||||
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
|
||||
&miss);
|
||||
|
||||
__ bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ Branch(USE_DELAY_SLOT, &compare_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
|
||||
}
|
||||
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
||||
if (masm->isolate()->function_entry_hook() != NULL) {
|
||||
ProfileEntryHookStub stub(masm->isolate());
|
||||
|
@ -605,351 +605,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
Register scratch_elements = t0;
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
scratch_elements));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(
|
||||
receiver, scratch_elements, allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
t5,
|
||||
kRAHasNotBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Register ra contains the return address.
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map, done;
|
||||
Register elements = t0;
|
||||
Register length = t1;
|
||||
Register array = t2;
|
||||
Register array_end = array;
|
||||
|
||||
// target_map parameter can be clobbered.
|
||||
Register scratch1 = target_map;
|
||||
Register scratch2 = t5;
|
||||
Register scratch3 = t3;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, length, array, scratch2));
|
||||
|
||||
Register scratch = t6;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ Branch(&only_change_map, eq, at, Operand(elements));
|
||||
|
||||
__ push(ra);
|
||||
__ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// elements: source FixedArray
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
__ sll(scratch, length, 2);
|
||||
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
|
||||
__ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
|
||||
// array: destination FixedDoubleArray, tagged as heap object
|
||||
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
// Update receiver's map.
|
||||
__ sw(scratch2, FieldMemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
__ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch2,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ Addu(scratch1, array, Operand(kHeapObjectTag - kHeapObjectTag));
|
||||
__ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
JSObject::kElementsOffset,
|
||||
scratch1,
|
||||
scratch2,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ Addu(scratch1, elements,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Addu(scratch3, array,
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Lsa(array_end, scratch3, length, 2);
|
||||
|
||||
// Repurpose registers no longer in use.
|
||||
Register hole_lower = elements;
|
||||
Register hole_upper = length;
|
||||
__ li(hole_lower, Operand(kHoleNanLower32));
|
||||
__ li(hole_upper, Operand(kHoleNanUpper32));
|
||||
|
||||
// scratch1: begin of source FixedArray element fields, not tagged
|
||||
// hole_lower: kHoleNanLower32
|
||||
// hole_upper: kHoleNanUpper32
|
||||
// array_end: end of destination FixedDoubleArray, not tagged
|
||||
// scratch3: begin of FixedDoubleArray element fields, not tagged
|
||||
|
||||
__ Branch(&entry);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
__ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch2,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ Branch(&done);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ lw(ra, MemOperand(sp, 0));
|
||||
__ Branch(USE_DELAY_SLOT, fail);
|
||||
__ addiu(sp, sp, kPointerSize); // In delay slot.
|
||||
|
||||
// Convert and copy elements.
|
||||
__ bind(&loop);
|
||||
__ lw(scratch2, MemOperand(scratch1));
|
||||
__ Addu(scratch1, scratch1, kIntSize);
|
||||
// scratch2: current element
|
||||
__ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole);
|
||||
|
||||
// Normal smi, convert to double and store.
|
||||
__ mtc1(scratch2, f0);
|
||||
__ cvt_d_w(f0, f0);
|
||||
__ sdc1(f0, MemOperand(scratch3));
|
||||
__ Branch(USE_DELAY_SLOT, &entry);
|
||||
__ addiu(scratch3, scratch3, kDoubleSize); // In delay slot.
|
||||
|
||||
// Hole found, store the-hole NaN.
|
||||
__ bind(&convert_hole);
|
||||
if (FLAG_debug_code) {
|
||||
// Restore a "smi-untagged" heap object.
|
||||
__ SmiTag(scratch2);
|
||||
__ Or(scratch2, scratch2, Operand(1));
|
||||
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
|
||||
}
|
||||
// mantissa
|
||||
__ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
|
||||
// exponent
|
||||
__ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
|
||||
__ addiu(scratch3, scratch3, kDoubleSize);
|
||||
|
||||
__ bind(&entry);
|
||||
__ Branch(&loop, lt, scratch3, Operand(array_end));
|
||||
|
||||
__ bind(&done);
|
||||
__ pop(ra);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Register ra contains the return address.
|
||||
Label entry, loop, convert_hole, gc_required, only_change_map;
|
||||
Register elements = t0;
|
||||
Register array = t2;
|
||||
Register length = t1;
|
||||
Register scratch = t5;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, array, length, scratch));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ Branch(&only_change_map, eq, at, Operand(elements));
|
||||
|
||||
__ MultiPush(
|
||||
value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
|
||||
|
||||
__ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// elements: source FixedArray
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedArray.
|
||||
// Re-use value and target_map registers, as they have been saved on the
|
||||
// stack.
|
||||
Register array_size = value;
|
||||
Register allocate_scratch = target_map;
|
||||
__ sll(array_size, length, 1);
|
||||
__ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize);
|
||||
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
|
||||
NO_ALLOCATION_FLAGS);
|
||||
// array: destination FixedArray, not tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
|
||||
__ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
__ sw(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
Register src_elements = elements;
|
||||
Register dst_elements = target_map;
|
||||
Register dst_end = length;
|
||||
Register heap_number_map = scratch;
|
||||
__ Addu(src_elements, src_elements, Operand(
|
||||
FixedDoubleArray::kHeaderSize - kHeapObjectTag
|
||||
+ Register::kExponentOffset));
|
||||
__ Addu(dst_elements, array,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Lsa(dst_end, dst_elements, dst_end, 1);
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
Label initialization_loop, initialization_loop_entry;
|
||||
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
||||
__ Branch(&initialization_loop_entry);
|
||||
__ bind(&initialization_loop);
|
||||
__ sw(scratch, MemOperand(dst_elements));
|
||||
__ Addu(dst_elements, dst_elements, Operand(kPointerSize));
|
||||
__ bind(&initialization_loop_entry);
|
||||
__ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
|
||||
|
||||
__ Addu(dst_elements, array,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
// Using offsetted addresses.
|
||||
// dst_elements: begin of destination FixedArray element fields, not tagged
|
||||
// src_elements: begin of source FixedDoubleArray element fields, not tagged,
|
||||
// points to the exponent
|
||||
// dst_end: end of destination FixedArray, not tagged
|
||||
// array: destination FixedArray
|
||||
// heap_number_map: heap number map
|
||||
__ Branch(&entry);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ MultiPop(
|
||||
value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
|
||||
|
||||
__ Branch(fail);
|
||||
|
||||
__ bind(&loop);
|
||||
Register upper_bits = key;
|
||||
__ lw(upper_bits, MemOperand(src_elements));
|
||||
__ Addu(src_elements, src_elements, kDoubleSize);
|
||||
// upper_bits: current element's upper 32 bit
|
||||
// src_elements: address of next element's upper 32 bit
|
||||
__ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
Register heap_number = receiver;
|
||||
Register scratch2 = value;
|
||||
Register scratch3 = t6;
|
||||
__ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
|
||||
&gc_required);
|
||||
// heap_number: new heap number
|
||||
// Load mantissa of current element, src_elements
|
||||
// point to exponent of next element.
|
||||
__ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset
|
||||
- Register::kExponentOffset - kDoubleSize)));
|
||||
__ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
|
||||
__ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
|
||||
__ mov(scratch2, dst_elements);
|
||||
__ sw(heap_number, MemOperand(dst_elements));
|
||||
__ Addu(dst_elements, dst_elements, kIntSize);
|
||||
__ RecordWrite(array,
|
||||
scratch2,
|
||||
heap_number,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ Branch(&entry);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
|
||||
__ sw(scratch2, MemOperand(dst_elements));
|
||||
__ Addu(dst_elements, dst_elements, kIntSize);
|
||||
|
||||
__ bind(&entry);
|
||||
__ Branch(&loop, lt, dst_elements, Operand(dst_end));
|
||||
|
||||
__ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
JSObject::kElementsOffset,
|
||||
array,
|
||||
scratch,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ pop(ra);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// Update receiver's map.
|
||||
__ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch,
|
||||
kRAHasNotBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Register string,
|
||||
Register index,
|
||||
|
@ -4518,77 +4518,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
|
||||
Branch(&loop, ult, current_address, Operand(end_address));
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
Branch(fail, ls, scratch,
|
||||
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
Branch(fail, hi, scratch,
|
||||
Operand(Map::kMaximumBitField2FastHoleyElementValue));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckFastSmiElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
Branch(fail, hi, scratch,
|
||||
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
|
||||
Register key_reg,
|
||||
Register elements_reg,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register scratch3,
|
||||
Label* fail,
|
||||
int elements_offset) {
|
||||
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
|
||||
scratch3));
|
||||
Label smi_value, done;
|
||||
|
||||
// Handle smi values specially.
|
||||
JumpIfSmi(value_reg, &smi_value);
|
||||
|
||||
// Ensure that the object is a heap number
|
||||
CheckMap(value_reg,
|
||||
scratch1,
|
||||
Heap::kHeapNumberMapRootIndex,
|
||||
fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
// Double value, turn potential sNaN into qNan.
|
||||
DoubleRegister double_result = f0;
|
||||
DoubleRegister double_scratch = f2;
|
||||
|
||||
ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
||||
Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
|
||||
FPUCanonicalizeNaN(double_result, double_result);
|
||||
|
||||
bind(&smi_value);
|
||||
Register untagged_value = scratch2;
|
||||
SmiUntag(untagged_value, value_reg);
|
||||
mtc1(untagged_value, double_scratch);
|
||||
cvt_d_w(double_result, double_scratch);
|
||||
|
||||
bind(&done);
|
||||
Addu(scratch1, elements_reg,
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
|
||||
elements_offset));
|
||||
Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
|
||||
// scratch1 is now effective address of the double element
|
||||
sdc1(double_result, MemOperand(scratch1, 0));
|
||||
}
|
||||
|
||||
void MacroAssembler::CompareMapAndBranch(Register obj,
|
||||
Register scratch,
|
||||
Handle<Map> map,
|
||||
@ -5603,27 +5532,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
lw(scratch, NativeContextMemOperand());
|
||||
lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
|
||||
Branch(no_map_match, ne, map_in_out, Operand(at));
|
||||
|
||||
// Use the transitioned cached map.
|
||||
lw(map_in_out,
|
||||
ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
|
||||
lw(dst, NativeContextMemOperand());
|
||||
lw(dst, ContextMemOperand(dst, index));
|
||||
@ -5955,14 +5863,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
|
||||
Register src,
|
||||
Label* non_smi_case) {
|
||||
JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfSmi(Register value,
|
||||
Label* smi_label,
|
||||
Register scratch,
|
||||
@ -6622,40 +6522,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
|
||||
return no_reg;
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
||||
Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Factory* factory = isolate()->factory();
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
// Scratch contained elements pointer.
|
||||
Move(current, object);
|
||||
lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
Branch(&end, eq, current, Operand(factory->null_value()));
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
bind(&loop_again);
|
||||
lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
|
||||
lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
|
||||
lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
Branch(&loop_again, ne, current, Operand(factory->null_value()));
|
||||
|
||||
bind(&end);
|
||||
}
|
||||
|
||||
|
||||
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
|
||||
Register reg5, Register reg6, Register reg7, Register reg8,
|
||||
Register reg9, Register reg10) {
|
||||
|
@ -1037,17 +1037,6 @@ class MacroAssembler: public Assembler {
|
||||
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
|
||||
}
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
void LoadNativeContextSlot(int index, Register dst);
|
||||
|
||||
// Load the initial map from the global function. The registers
|
||||
@ -1158,30 +1147,6 @@ class MacroAssembler: public Assembler {
|
||||
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
|
||||
}
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object has fast smi only
|
||||
// elements. Jump to the specified label if it does not.
|
||||
void CheckFastSmiElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail);
|
||||
|
||||
// Check to see if maybe_number can be stored as a double in
|
||||
// FastDoubleElements. If it can, store it at the index specified by key in
|
||||
// the FastDoubleElements array elements. Otherwise jump to fail.
|
||||
void StoreNumberToDoubleElements(Register value_reg,
|
||||
Register key_reg,
|
||||
Register elements_reg,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register scratch3,
|
||||
Label* fail,
|
||||
int elements_offset = 0);
|
||||
|
||||
// Compare an object's map with the specified map and its transitioned
|
||||
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
|
||||
// "branch_to" if the result of the comparison is "cond". If multiple map
|
||||
@ -1557,10 +1522,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
||||
|
||||
// Untag the source value into destination and jump if source is not a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
// Jump the register contains a smi.
|
||||
void JumpIfSmi(Register value,
|
||||
Label* smi_label,
|
||||
@ -1731,20 +1692,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
|
||||
Register scratch_reg,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
|
||||
&no_memento_found);
|
||||
Branch(memento_found);
|
||||
bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
|
||||
|
||||
private:
|
||||
|
@ -3269,233 +3269,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, bool is_polymorphic,
|
||||
Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
|
||||
__ ld(cached_map,
|
||||
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
__ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
__ ld(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
Register length = scratch2;
|
||||
__ bind(&start_polymorphic);
|
||||
__ ld(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
if (!is_polymorphic) {
|
||||
// If the IC could be monomorphic we have to make sure we don't go past the
|
||||
// end of the feedback array.
|
||||
__ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
|
||||
}
|
||||
|
||||
Register too_far = length;
|
||||
Register pointer_reg = feedback;
|
||||
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ SmiScale(too_far, length, kPointerSizeLog2);
|
||||
__ Daddu(too_far, feedback, Operand(too_far));
|
||||
__ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Daddu(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ ld(cached_map, MemOperand(pointer_reg));
|
||||
__ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
|
||||
__ ld(handler, MemOperand(pointer_reg, kPointerSize));
|
||||
__ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
|
||||
__ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ Branch(miss);
|
||||
}
|
||||
|
||||
|
||||
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
|
||||
Register receiver_map, Register feedback,
|
||||
Register vector, Register slot,
|
||||
Register scratch, Label* compare_map,
|
||||
Label* load_smi_map, Label* try_array) {
|
||||
__ JumpIfSmi(receiver, load_smi_map);
|
||||
__ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ bind(compare_map);
|
||||
Register cached_map = scratch;
|
||||
// Move the weak map into the weak_cell register.
|
||||
__ ld(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
|
||||
__ Branch(try_array, ne, cached_map, Operand(receiver_map));
|
||||
Register handler = feedback;
|
||||
__ SmiScale(handler, slot, kPointerSizeLog2);
|
||||
__ Daddu(handler, vector, Operand(handler));
|
||||
__ ld(handler,
|
||||
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
|
||||
__ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Jump(t9);
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
|
||||
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
Label transition_call;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
Register too_far = scratch2;
|
||||
Register pointer_reg = feedback;
|
||||
|
||||
__ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
|
||||
// +-----+------+------+-----+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ SmiScale(too_far, too_far, kPointerSizeLog2);
|
||||
__ Daddu(too_far, feedback, Operand(too_far));
|
||||
__ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Daddu(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ ld(cached_map, MemOperand(pointer_reg));
|
||||
__ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
|
||||
// Is it a transitioning store?
|
||||
__ ld(too_far, MemOperand(pointer_reg, kPointerSize));
|
||||
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
||||
__ Branch(&transition_call, ne, too_far, Operand(at));
|
||||
|
||||
__ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
__ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
__ bind(&transition_call);
|
||||
__ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(too_far, miss);
|
||||
|
||||
__ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
// Load the map into the correct register.
|
||||
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ Move(feedback, too_far);
|
||||
__ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
|
||||
__ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ Branch(miss);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // a2
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // a4
|
||||
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
|
||||
Register feedback = a5;
|
||||
Register receiver_map = a6;
|
||||
Register scratch1 = a7;
|
||||
|
||||
__ SmiScale(scratch1, slot, kPointerSizeLog2);
|
||||
__ Daddu(feedback, vector, Operand(scratch1));
|
||||
__ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
|
||||
|
||||
// Try to quickly handle the monomorphic case without knowing for sure
|
||||
// if we have a weak cell in feedback. We do know it's safe to look
|
||||
// at WeakCell::kValueOffset.
|
||||
Label try_array, load_smi_map, compare_map;
|
||||
Label not_array, miss;
|
||||
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
|
||||
scratch1, &compare_map, &load_smi_map, &try_array);
|
||||
|
||||
__ bind(&try_array);
|
||||
// Is it a fixed array?
|
||||
__ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
|
||||
__ Branch(¬_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
|
||||
|
||||
// We have a polymorphic element handler.
|
||||
Label try_poly_name;
|
||||
|
||||
Register scratch2 = t0;
|
||||
|
||||
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
|
||||
&miss);
|
||||
|
||||
__ bind(¬_array);
|
||||
// Is it generic?
|
||||
__ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ Branch(&miss, ne, key, Operand(feedback));
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ SmiScale(scratch1, slot, kPointerSizeLog2);
|
||||
__ Daddu(feedback, vector, Operand(scratch1));
|
||||
__ ld(feedback,
|
||||
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
|
||||
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
|
||||
&miss);
|
||||
|
||||
__ bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ Branch(USE_DELAY_SLOT, &compare_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
|
||||
}
|
||||
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
||||
if (masm->isolate()->function_entry_hook() != NULL) {
|
||||
ProfileEntryHookStub stub(masm->isolate());
|
||||
|
@ -607,348 +607,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
Register scratch_elements = a4;
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
scratch_elements));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(
|
||||
receiver, scratch_elements, allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
t1,
|
||||
kRAHasNotBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Register ra contains the return address.
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map, done;
|
||||
Register elements = a4;
|
||||
Register length = a5;
|
||||
Register array = a6;
|
||||
Register array_end = array;
|
||||
|
||||
// target_map parameter can be clobbered.
|
||||
Register scratch1 = target_map;
|
||||
Register scratch2 = t1;
|
||||
Register scratch3 = a7;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, length, array, scratch2));
|
||||
|
||||
Register scratch = t2;
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ Branch(&only_change_map, eq, at, Operand(elements));
|
||||
|
||||
__ push(ra);
|
||||
__ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// elements: source FixedArray
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
__ SmiScale(scratch, length, kDoubleSizeLog2);
|
||||
__ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
|
||||
__ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
|
||||
__ Dsubu(array, array, kHeapObjectTag);
|
||||
// array: destination FixedDoubleArray, not tagged as heap object
|
||||
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
// Update receiver's map.
|
||||
__ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
__ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch2,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ Daddu(scratch1, array, Operand(kHeapObjectTag));
|
||||
__ sd(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
JSObject::kElementsOffset,
|
||||
scratch1,
|
||||
scratch2,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ Daddu(scratch1, elements,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ SmiScale(array_end, length, kDoubleSizeLog2);
|
||||
__ Daddu(array_end, array_end, scratch3);
|
||||
|
||||
// Repurpose registers no longer in use.
|
||||
Register hole_lower = elements;
|
||||
Register hole_upper = length;
|
||||
__ li(hole_lower, Operand(kHoleNanLower32));
|
||||
__ li(hole_upper, Operand(kHoleNanUpper32));
|
||||
|
||||
// scratch1: begin of source FixedArray element fields, not tagged
|
||||
// hole_lower: kHoleNanLower32
|
||||
// hole_upper: kHoleNanUpper32
|
||||
// array_end: end of destination FixedDoubleArray, not tagged
|
||||
// scratch3: begin of FixedDoubleArray element fields, not tagged
|
||||
|
||||
__ Branch(&entry);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
__ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch2,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ Branch(&done);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ ld(ra, MemOperand(sp, 0));
|
||||
__ Branch(USE_DELAY_SLOT, fail);
|
||||
__ daddiu(sp, sp, kPointerSize); // In delay slot.
|
||||
|
||||
// Convert and copy elements.
|
||||
__ bind(&loop);
|
||||
__ ld(scratch2, MemOperand(scratch1));
|
||||
__ Daddu(scratch1, scratch1, kPointerSize);
|
||||
// scratch2: current element
|
||||
__ JumpIfNotSmi(scratch2, &convert_hole);
|
||||
__ SmiUntag(scratch2);
|
||||
|
||||
// Normal smi, convert to double and store.
|
||||
__ mtc1(scratch2, f0);
|
||||
__ cvt_d_w(f0, f0);
|
||||
__ sdc1(f0, MemOperand(scratch3));
|
||||
__ Branch(USE_DELAY_SLOT, &entry);
|
||||
__ daddiu(scratch3, scratch3, kDoubleSize); // In delay slot.
|
||||
|
||||
// Hole found, store the-hole NaN.
|
||||
__ bind(&convert_hole);
|
||||
if (FLAG_debug_code) {
|
||||
// Restore a "smi-untagged" heap object.
|
||||
__ Or(scratch2, scratch2, Operand(1));
|
||||
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
|
||||
}
|
||||
// mantissa
|
||||
__ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
|
||||
// exponent
|
||||
__ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
|
||||
__ Daddu(scratch3, scratch3, kDoubleSize);
|
||||
|
||||
__ bind(&entry);
|
||||
__ Branch(&loop, lt, scratch3, Operand(array_end));
|
||||
|
||||
__ bind(&done);
|
||||
__ pop(ra);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Register ra contains the return address.
|
||||
Label entry, loop, convert_hole, gc_required, only_change_map;
|
||||
Register elements = a4;
|
||||
Register array = a6;
|
||||
Register length = a5;
|
||||
Register scratch = t1;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, array, length, scratch));
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ Branch(&only_change_map, eq, at, Operand(elements));
|
||||
|
||||
__ MultiPush(
|
||||
value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
|
||||
|
||||
__ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// elements: source FixedArray
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedArray.
|
||||
// Re-use value and target_map registers, as they have been saved on the
|
||||
// stack.
|
||||
Register array_size = value;
|
||||
Register allocate_scratch = target_map;
|
||||
__ SmiScale(array_size, length, kPointerSizeLog2);
|
||||
__ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
|
||||
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
|
||||
NO_ALLOCATION_FLAGS);
|
||||
__ Dsubu(array, array, kHeapObjectTag);
|
||||
// array: destination FixedArray, not tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
|
||||
__ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
__ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
Register src_elements = elements;
|
||||
Register dst_elements = target_map;
|
||||
Register dst_end = length;
|
||||
Register heap_number_map = scratch;
|
||||
__ Daddu(src_elements, src_elements,
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
|
||||
__ SmiScale(dst_end, dst_end, kPointerSizeLog2);
|
||||
__ Daddu(dst_end, dst_elements, dst_end);
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
Label initialization_loop, initialization_loop_entry;
|
||||
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
||||
__ Branch(&initialization_loop_entry);
|
||||
__ bind(&initialization_loop);
|
||||
__ sd(scratch, MemOperand(dst_elements));
|
||||
__ Daddu(dst_elements, dst_elements, Operand(kPointerSize));
|
||||
__ bind(&initialization_loop_entry);
|
||||
__ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
|
||||
|
||||
__ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
|
||||
__ Daddu(array, array, Operand(kHeapObjectTag));
|
||||
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
// Using offsetted addresses.
|
||||
// dst_elements: begin of destination FixedArray element fields, not tagged
|
||||
// src_elements: begin of source FixedDoubleArray element fields, not tagged,
|
||||
// points to the exponent
|
||||
// dst_end: end of destination FixedArray, not tagged
|
||||
// array: destination FixedArray
|
||||
// heap_number_map: heap number map
|
||||
__ Branch(&entry);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ MultiPop(
|
||||
value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
|
||||
|
||||
__ Branch(fail);
|
||||
|
||||
__ bind(&loop);
|
||||
Register upper_bits = key;
|
||||
__ lw(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
|
||||
__ Daddu(src_elements, src_elements, kDoubleSize);
|
||||
// upper_bits: current element's upper 32 bit
|
||||
// src_elements: address of next element
|
||||
__ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
Register heap_number = receiver;
|
||||
Register scratch2 = value;
|
||||
Register scratch3 = t2;
|
||||
__ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
|
||||
&gc_required);
|
||||
// heap_number: new heap number
|
||||
// Load current element, src_elements point to next element.
|
||||
|
||||
__ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
|
||||
__ sd(scratch2, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
|
||||
|
||||
__ mov(scratch2, dst_elements);
|
||||
__ sd(heap_number, MemOperand(dst_elements));
|
||||
__ Daddu(dst_elements, dst_elements, kPointerSize);
|
||||
__ RecordWrite(array,
|
||||
scratch2,
|
||||
heap_number,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ Branch(&entry);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
|
||||
__ sd(scratch2, MemOperand(dst_elements));
|
||||
__ Daddu(dst_elements, dst_elements, kPointerSize);
|
||||
|
||||
__ bind(&entry);
|
||||
__ Branch(&loop, lt, dst_elements, Operand(dst_end));
|
||||
|
||||
__ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
JSObject::kElementsOffset,
|
||||
array,
|
||||
scratch,
|
||||
kRAHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ pop(ra);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// Update receiver's map.
|
||||
__ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch,
|
||||
kRAHasNotBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Register string,
|
||||
Register index,
|
||||
|
@ -4657,76 +4657,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
|
||||
Branch(&loop, ult, current_address, Operand(end_address));
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
Branch(fail, ls, scratch,
|
||||
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
Branch(fail, hi, scratch,
|
||||
Operand(Map::kMaximumBitField2FastHoleyElementValue));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckFastSmiElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
Branch(fail, hi, scratch,
|
||||
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
|
||||
Register key_reg,
|
||||
Register elements_reg,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* fail,
|
||||
int elements_offset) {
|
||||
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
|
||||
Label smi_value, done;
|
||||
|
||||
// Handle smi values specially.
|
||||
JumpIfSmi(value_reg, &smi_value);
|
||||
|
||||
// Ensure that the object is a heap number.
|
||||
CheckMap(value_reg,
|
||||
scratch1,
|
||||
Heap::kHeapNumberMapRootIndex,
|
||||
fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
// Double value, turn potential sNaN into qNan.
|
||||
DoubleRegister double_result = f0;
|
||||
DoubleRegister double_scratch = f2;
|
||||
|
||||
ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
||||
Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
|
||||
FPUCanonicalizeNaN(double_result, double_result);
|
||||
|
||||
bind(&smi_value);
|
||||
// Untag and transfer.
|
||||
dsrl32(scratch1, value_reg, 0);
|
||||
mtc1(scratch1, double_scratch);
|
||||
cvt_d_w(double_result, double_scratch);
|
||||
|
||||
bind(&done);
|
||||
Daddu(scratch1, elements_reg,
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
|
||||
elements_offset));
|
||||
dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
|
||||
Daddu(scratch1, scratch1, scratch2);
|
||||
// scratch1 is now effective address of the double element.
|
||||
sdc1(double_result, MemOperand(scratch1, 0));
|
||||
}
|
||||
|
||||
void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
|
||||
FPURegister fs,
|
||||
FPURegister ft) {
|
||||
@ -5958,27 +5888,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
ld(scratch, NativeContextMemOperand());
|
||||
ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
|
||||
Branch(no_map_match, ne, map_in_out, Operand(at));
|
||||
|
||||
// Use the transitioned cached map.
|
||||
ld(map_in_out,
|
||||
ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
|
||||
ld(dst, NativeContextMemOperand());
|
||||
ld(dst, ContextMemOperand(dst, index));
|
||||
@ -6367,15 +6276,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
|
||||
Register src,
|
||||
Label* non_smi_case) {
|
||||
// DCHECK(!dst.is(src));
|
||||
JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfSmi(Register value,
|
||||
Label* smi_label,
|
||||
Register scratch,
|
||||
@ -7042,40 +6942,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
|
||||
return no_reg;
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
||||
Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Factory* factory = isolate()->factory();
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
// Scratch contained elements pointer.
|
||||
Move(current, object);
|
||||
ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
Branch(&end, eq, current, Operand(factory->null_value()));
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
bind(&loop_again);
|
||||
ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
|
||||
lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
|
||||
ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
Branch(&loop_again, ne, current, Operand(factory->null_value()));
|
||||
|
||||
bind(&end);
|
||||
}
|
||||
|
||||
|
||||
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
|
||||
Register reg5, Register reg6, Register reg7, Register reg8,
|
||||
Register reg9, Register reg10) {
|
||||
|
@ -1095,17 +1095,6 @@ class MacroAssembler: public Assembler {
|
||||
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
|
||||
}
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
void LoadNativeContextSlot(int index, Register dst);
|
||||
|
||||
// Load the initial map from the global function. The registers
|
||||
@ -1217,29 +1206,6 @@ class MacroAssembler: public Assembler {
|
||||
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
|
||||
}
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object has fast smi only
|
||||
// elements. Jump to the specified label if it does not.
|
||||
void CheckFastSmiElements(Register map,
|
||||
Register scratch,
|
||||
Label* fail);
|
||||
|
||||
// Check to see if maybe_number can be stored as a double in
|
||||
// FastDoubleElements. If it can, store it at the index specified by key in
|
||||
// the FastDoubleElements array elements. Otherwise jump to fail.
|
||||
void StoreNumberToDoubleElements(Register value_reg,
|
||||
Register key_reg,
|
||||
Register elements_reg,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* fail,
|
||||
int elements_offset = 0);
|
||||
|
||||
// Compare an object's map with the specified map and its transitioned
|
||||
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
|
||||
// "branch_to" if the result of the comparison is "cond". If multiple map
|
||||
@ -1706,10 +1672,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
|
||||
// Source and destination can be the same register.
|
||||
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
||||
|
||||
// Untag the source value into destination and jump if source is not a smi.
|
||||
// Source and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
// Jump the register contains a smi.
|
||||
void JumpIfSmi(Register value,
|
||||
Label* smi_label,
|
||||
@ -1871,20 +1833,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
|
||||
Register scratch_reg,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
|
||||
&no_memento_found);
|
||||
Branch(memento_found);
|
||||
bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
|
||||
|
||||
private:
|
||||
|
@ -3202,246 +3202,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, bool is_polymorphic,
|
||||
Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
|
||||
__ LoadP(cached_map,
|
||||
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ cmp(receiver_map, cached_map);
|
||||
__ bne(&start_polymorphic);
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
__ LoadP(handler,
|
||||
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
|
||||
|
||||
Register length = scratch2;
|
||||
__ bind(&start_polymorphic);
|
||||
__ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
if (!is_polymorphic) {
|
||||
// If the IC could be monomorphic we have to make sure we don't go past the
|
||||
// end of the feedback array.
|
||||
__ CmpSmiLiteral(length, Smi::FromInt(2), r0);
|
||||
__ beq(miss);
|
||||
}
|
||||
|
||||
Register too_far = length;
|
||||
Register pointer_reg = feedback;
|
||||
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ SmiToPtrArrayOffset(r0, length);
|
||||
__ add(too_far, feedback, r0);
|
||||
__ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ addi(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ LoadP(cached_map, MemOperand(pointer_reg));
|
||||
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ cmp(receiver_map, cached_map);
|
||||
__ bne(&prepare_next);
|
||||
__ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
|
||||
__ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
|
||||
__ cmp(pointer_reg, too_far);
|
||||
__ blt(&next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ b(miss);
|
||||
}
|
||||
|
||||
|
||||
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
|
||||
Register receiver_map, Register feedback,
|
||||
Register vector, Register slot,
|
||||
Register scratch, Label* compare_map,
|
||||
Label* load_smi_map, Label* try_array) {
|
||||
__ JumpIfSmi(receiver, load_smi_map);
|
||||
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ bind(compare_map);
|
||||
Register cached_map = scratch;
|
||||
// Move the weak map into the weak_cell register.
|
||||
__ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
|
||||
__ cmp(cached_map, receiver_map);
|
||||
__ bne(try_array);
|
||||
Register handler = feedback;
|
||||
__ SmiToPtrArrayOffset(r0, slot);
|
||||
__ add(handler, vector, r0);
|
||||
__ LoadP(handler,
|
||||
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
|
||||
__ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
|
||||
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
Label transition_call;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
Register too_far = scratch2;
|
||||
Register pointer_reg = feedback;
|
||||
__ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
|
||||
// +-----+------+------+-----+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ SmiToPtrArrayOffset(r0, too_far);
|
||||
__ add(too_far, feedback, r0);
|
||||
__ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ addi(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ LoadP(cached_map, MemOperand(pointer_reg));
|
||||
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ cmp(receiver_map, cached_map);
|
||||
__ bne(&prepare_next);
|
||||
// Is it a transitioning store?
|
||||
__ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
|
||||
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
|
||||
__ bne(&transition_call);
|
||||
__ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
__ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
|
||||
__ bind(&transition_call);
|
||||
__ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(too_far, miss);
|
||||
|
||||
__ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
|
||||
// Load the map into the correct register.
|
||||
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ mr(feedback, too_far);
|
||||
|
||||
__ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
|
||||
__ cmpl(pointer_reg, too_far);
|
||||
__ blt(&next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ b(miss);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r4
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // r5
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // r6
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // r7
|
||||
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3)); // r3
|
||||
Register feedback = r8;
|
||||
Register receiver_map = r9;
|
||||
Register scratch1 = r10;
|
||||
|
||||
__ SmiToPtrArrayOffset(r0, slot);
|
||||
__ add(feedback, vector, r0);
|
||||
__ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
|
||||
|
||||
// Try to quickly handle the monomorphic case without knowing for sure
|
||||
// if we have a weak cell in feedback. We do know it's safe to look
|
||||
// at WeakCell::kValueOffset.
|
||||
Label try_array, load_smi_map, compare_map;
|
||||
Label not_array, miss;
|
||||
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
|
||||
scratch1, &compare_map, &load_smi_map, &try_array);
|
||||
|
||||
__ bind(&try_array);
|
||||
// Is it a fixed array?
|
||||
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
|
||||
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
|
||||
__ bne(¬_array);
|
||||
|
||||
// We have a polymorphic element handler.
|
||||
Label polymorphic, try_poly_name;
|
||||
__ bind(&polymorphic);
|
||||
|
||||
Register scratch2 = r11;
|
||||
|
||||
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
|
||||
&miss);
|
||||
|
||||
__ bind(¬_array);
|
||||
// Is it generic?
|
||||
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
|
||||
__ bne(&try_poly_name);
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ cmp(key, feedback);
|
||||
__ bne(&miss);
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ SmiToPtrArrayOffset(r0, slot);
|
||||
__ add(feedback, vector, r0);
|
||||
__ LoadP(feedback,
|
||||
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
|
||||
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
|
||||
&miss);
|
||||
|
||||
__ bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ b(&compare_map);
|
||||
}
|
||||
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
||||
if (masm->isolate()->function_entry_hook() != NULL) {
|
||||
PredictableCodeSizeScope predictable(masm,
|
||||
|
@ -73,304 +73,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm, Register receiver, Register key, Register value,
|
||||
Register target_map, AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
Register scratch_elements = r7;
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11,
|
||||
allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm, Register receiver, Register key, Register value,
|
||||
Register target_map, AllocationSiteMode mode, Label* fail) {
|
||||
// lr contains the return address
|
||||
Label loop, entry, convert_hole, only_change_map, done;
|
||||
Register elements = r7;
|
||||
Register length = r8;
|
||||
Register array = r9;
|
||||
Register array_end = array;
|
||||
|
||||
// target_map parameter can be clobbered.
|
||||
Register scratch1 = target_map;
|
||||
Register scratch2 = r10;
|
||||
Register scratch3 = r11;
|
||||
Register scratch4 = r14;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
|
||||
scratch2));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ beq(&only_change_map);
|
||||
|
||||
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
__ SmiToDoubleArrayOffset(scratch3, length);
|
||||
__ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
|
||||
__ subi(array, array, Operand(kHeapObjectTag));
|
||||
// array: destination FixedDoubleArray, not tagged as heap object.
|
||||
// elements: source FixedArray.
|
||||
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
// Update receiver's map.
|
||||
__ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ addi(scratch1, array, Operand(kHeapObjectTag));
|
||||
__ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
|
||||
__ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ addi(scratch1, elements,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ SmiToDoubleArrayOffset(array_end, length);
|
||||
__ add(array_end, scratch2, array_end);
|
||||
// Repurpose registers no longer in use.
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
Register hole_int64 = elements;
|
||||
__ mov(hole_int64, Operand(kHoleNanInt64));
|
||||
#else
|
||||
Register hole_lower = elements;
|
||||
Register hole_upper = length;
|
||||
__ mov(hole_lower, Operand(kHoleNanLower32));
|
||||
__ mov(hole_upper, Operand(kHoleNanUpper32));
|
||||
#endif
|
||||
// scratch1: begin of source FixedArray element fields, not tagged
|
||||
// hole_lower: kHoleNanLower32 OR hol_int64
|
||||
// hole_upper: kHoleNanUpper32
|
||||
// array_end: end of destination FixedDoubleArray, not tagged
|
||||
// scratch2: begin of FixedDoubleArray element fields, not tagged
|
||||
|
||||
__ b(&entry);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ b(&done);
|
||||
|
||||
// Convert and copy elements.
|
||||
__ bind(&loop);
|
||||
__ LoadP(scratch3, MemOperand(scratch1));
|
||||
__ addi(scratch1, scratch1, Operand(kPointerSize));
|
||||
// scratch3: current element
|
||||
__ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
|
||||
|
||||
// Normal smi, convert to double and store.
|
||||
__ ConvertIntToDouble(scratch3, d0);
|
||||
__ stfd(d0, MemOperand(scratch2, 0));
|
||||
__ addi(scratch2, scratch2, Operand(8));
|
||||
__ b(&entry);
|
||||
|
||||
// Hole found, store the-hole NaN.
|
||||
__ bind(&convert_hole);
|
||||
if (FLAG_debug_code) {
|
||||
__ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
|
||||
__ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, kObjectFoundInSmiOnlyArray);
|
||||
}
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
__ std(hole_int64, MemOperand(scratch2, 0));
|
||||
#else
|
||||
__ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
|
||||
__ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
|
||||
#endif
|
||||
__ addi(scratch2, scratch2, Operand(8));
|
||||
|
||||
__ bind(&entry);
|
||||
__ cmp(scratch2, array_end);
|
||||
__ blt(&loop);
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm, Register receiver, Register key, Register value,
|
||||
Register target_map, AllocationSiteMode mode, Label* fail) {
|
||||
// Register lr contains the return address.
|
||||
Label loop, convert_hole, gc_required, only_change_map;
|
||||
Register elements = r7;
|
||||
Register array = r9;
|
||||
Register length = r8;
|
||||
Register scratch = r10;
|
||||
Register scratch3 = r11;
|
||||
Register hole_value = r14;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
|
||||
scratch));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ beq(&only_change_map);
|
||||
|
||||
__ Push(target_map, receiver, key, value);
|
||||
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// elements: source FixedDoubleArray
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedArray.
|
||||
// Re-use value and target_map registers, as they have been saved on the
|
||||
// stack.
|
||||
Register array_size = value;
|
||||
Register allocate_scratch = target_map;
|
||||
__ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ SmiToPtrArrayOffset(r0, length);
|
||||
__ add(array_size, array_size, r0);
|
||||
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
|
||||
NO_ALLOCATION_FLAGS);
|
||||
// array: destination FixedArray, tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
|
||||
__ StoreP(length, FieldMemOperand(array,
|
||||
FixedDoubleArray::kLengthOffset), r0);
|
||||
__ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
|
||||
|
||||
// Prepare for conversion loop.
|
||||
Register src_elements = elements;
|
||||
Register dst_elements = target_map;
|
||||
Register dst_end = length;
|
||||
Register heap_number_map = scratch;
|
||||
__ addi(src_elements, elements,
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiToPtrArrayOffset(length, length);
|
||||
__ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
|
||||
|
||||
Label initialization_loop, loop_done;
|
||||
__ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
|
||||
__ beq(&loop_done, cr0);
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
__ mtctr(r0);
|
||||
__ addi(dst_elements, array,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
|
||||
__ bind(&initialization_loop);
|
||||
__ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
|
||||
__ bdnz(&initialization_loop);
|
||||
|
||||
__ addi(dst_elements, array,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(dst_end, dst_elements, length);
|
||||
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
// Using offsetted addresses in src_elements to fully take advantage of
|
||||
// post-indexing.
|
||||
// dst_elements: begin of destination FixedArray element fields, not tagged
|
||||
// src_elements: begin of source FixedDoubleArray element fields,
|
||||
// not tagged, +4
|
||||
// dst_end: end of destination FixedArray, not tagged
|
||||
// array: destination FixedArray
|
||||
// hole_value: the-hole pointer
|
||||
// heap_number_map: heap number map
|
||||
__ b(&loop);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ Pop(target_map, receiver, key, value);
|
||||
__ b(fail);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ StoreP(hole_value, MemOperand(dst_elements));
|
||||
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
|
||||
__ cmpl(dst_elements, dst_end);
|
||||
__ bge(&loop_done);
|
||||
|
||||
__ bind(&loop);
|
||||
Register upper_bits = key;
|
||||
__ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
|
||||
__ addi(src_elements, src_elements, Operand(kDoubleSize));
|
||||
// upper_bits: current element's upper 32 bit
|
||||
// src_elements: address of next element's upper 32 bit
|
||||
__ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
|
||||
__ beq(&convert_hole);
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
Register heap_number = receiver;
|
||||
Register scratch2 = value;
|
||||
__ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
|
||||
&gc_required);
|
||||
// heap_number: new heap number
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
__ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
|
||||
// subtract tag for std
|
||||
__ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
|
||||
__ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
|
||||
#else
|
||||
__ lwz(scratch2,
|
||||
MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
|
||||
__ lwz(upper_bits,
|
||||
MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
|
||||
__ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
|
||||
__ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
|
||||
#endif
|
||||
__ mr(scratch2, dst_elements);
|
||||
__ StoreP(heap_number, MemOperand(dst_elements));
|
||||
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
|
||||
__ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ cmpl(dst_elements, dst_end);
|
||||
__ blt(&loop);
|
||||
__ bind(&loop_done);
|
||||
|
||||
__ Pop(target_map, receiver, key, value);
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
|
||||
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// Update receiver's map.
|
||||
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
// assume ip can be used as a scratch register below
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
|
||||
Register index, Register result,
|
||||
|
@ -2070,60 +2070,6 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
|
||||
cmp(obj, r0);
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
ble(fail);
|
||||
cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
|
||||
bgt(fail);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
|
||||
cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
bgt(fail);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::StoreNumberToDoubleElements(
|
||||
Register value_reg, Register key_reg, Register elements_reg,
|
||||
Register scratch1, DoubleRegister double_scratch, Label* fail,
|
||||
int elements_offset) {
|
||||
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
|
||||
Label smi_value, store;
|
||||
|
||||
// Handle smi values specially.
|
||||
JumpIfSmi(value_reg, &smi_value);
|
||||
|
||||
// Ensure that the object is a heap number
|
||||
CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
||||
// Double value, turn potential sNaN into qNaN.
|
||||
CanonicalizeNaN(double_scratch);
|
||||
b(&store);
|
||||
|
||||
bind(&smi_value);
|
||||
SmiToDouble(double_scratch, value_reg);
|
||||
|
||||
bind(&store);
|
||||
SmiToDoubleArrayOffset(scratch1, key_reg);
|
||||
add(scratch1, elements_reg, scratch1);
|
||||
stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
|
||||
elements_offset));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
|
||||
Register right,
|
||||
Register overflow_dst,
|
||||
@ -2737,25 +2683,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind, ElementsKind transitioned_kind,
|
||||
Register map_in_out, Register scratch, Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
LoadP(scratch, NativeContextMemOperand());
|
||||
LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
|
||||
cmp(map_in_out, ip);
|
||||
bne(no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
LoadP(map_in_out,
|
||||
ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
|
||||
LoadP(dst, NativeContextMemOperand());
|
||||
LoadP(dst, ContextMemOperand(dst, index));
|
||||
@ -2840,16 +2767,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
|
||||
beq(smi_case, cr0);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
|
||||
Label* non_smi_case) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
TestBitRange(src, kSmiTagSize - 1, 0, r0);
|
||||
SmiUntag(dst, src);
|
||||
bne(non_smi_case, cr0);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
|
||||
Label* on_either_smi) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
@ -4492,44 +4409,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
|
||||
return no_reg;
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
// scratch contained elements pointer.
|
||||
mr(current, object);
|
||||
LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
CompareRoot(current, Heap::kNullValueRootIndex);
|
||||
beq(&end);
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
bind(&loop_again);
|
||||
LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
lbz(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
|
||||
cmpi(scratch1, Operand(JS_OBJECT_TYPE));
|
||||
blt(found);
|
||||
|
||||
lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
|
||||
beq(found);
|
||||
LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
CompareRoot(current, Heap::kNullValueRootIndex);
|
||||
bne(&loop_again);
|
||||
|
||||
bind(&end);
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
|
||||
Register reg5, Register reg6, Register reg7, Register reg8,
|
||||
|
@ -470,16 +470,6 @@ class MacroAssembler : public Assembler {
|
||||
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
|
||||
}
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
void LoadNativeContextSlot(int index, Register dst);
|
||||
|
||||
// Load the initial map from the global function. The registers
|
||||
@ -803,22 +793,6 @@ class MacroAssembler : public Assembler {
|
||||
// sets the flags and leaves the object type in the type_reg register.
|
||||
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object has fast smi only
|
||||
// elements. Jump to the specified label if it does not.
|
||||
void CheckFastSmiElements(Register map, Register scratch, Label* fail);
|
||||
|
||||
// Check to see if maybe_number can be stored as a double in
|
||||
// FastDoubleElements. If it can, store it at the index specified by key in
|
||||
// the FastDoubleElements array elements. Otherwise jump to fail.
|
||||
void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
|
||||
Register elements_reg, Register scratch1,
|
||||
DoubleRegister double_scratch, Label* fail,
|
||||
int elements_offset = 0);
|
||||
|
||||
// Compare an object's map with the specified map and its transitioned
|
||||
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
|
||||
// set with result of map compare. If multiple map compares are required, the
|
||||
@ -1309,10 +1283,6 @@ class MacroAssembler : public Assembler {
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
||||
|
||||
// Untag the source value into destination and jump if source is not a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
inline void TestIfSmi(Register value, Register scratch) {
|
||||
TestBitRange(value, kSmiTagSize - 1, 0, scratch);
|
||||
}
|
||||
@ -1528,21 +1498,6 @@ class MacroAssembler : public Assembler {
|
||||
Register scratch2_reg,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Register scratch2_reg,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
|
||||
&no_memento_found);
|
||||
beq(memento_found);
|
||||
bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
// Loads the constant pool pointer (kConstantPoolRegister).
|
||||
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
|
||||
Register code_target_address);
|
||||
|
@ -3145,240 +3145,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, bool is_polymorphic,
|
||||
Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
|
||||
__ LoadP(cached_map,
|
||||
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ CmpP(receiver_map, cached_map);
|
||||
__ bne(&start_polymorphic, Label::kNear);
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
__ LoadP(handler,
|
||||
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
|
||||
Register length = scratch2;
|
||||
__ bind(&start_polymorphic);
|
||||
__ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
if (!is_polymorphic) {
|
||||
// If the IC could be monomorphic we have to make sure we don't go past the
|
||||
// end of the feedback array.
|
||||
__ CmpSmiLiteral(length, Smi::FromInt(2), r0);
|
||||
__ beq(miss);
|
||||
}
|
||||
|
||||
Register too_far = length;
|
||||
Register pointer_reg = feedback;
|
||||
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ SmiToPtrArrayOffset(r0, length);
|
||||
__ AddP(too_far, feedback, r0);
|
||||
__ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ AddP(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ LoadP(cached_map, MemOperand(pointer_reg));
|
||||
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ CmpP(receiver_map, cached_map);
|
||||
__ bne(&prepare_next, Label::kNear);
|
||||
__ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
|
||||
__ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ AddP(pointer_reg, Operand(kPointerSize * 2));
|
||||
__ CmpP(pointer_reg, too_far);
|
||||
__ blt(&next_loop, Label::kNear);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ b(miss);
|
||||
}
|
||||
|
||||
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
|
||||
Register receiver_map, Register feedback,
|
||||
Register vector, Register slot,
|
||||
Register scratch, Label* compare_map,
|
||||
Label* load_smi_map, Label* try_array) {
|
||||
__ JumpIfSmi(receiver, load_smi_map);
|
||||
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ bind(compare_map);
|
||||
Register cached_map = scratch;
|
||||
// Move the weak map into the weak_cell register.
|
||||
__ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
|
||||
__ CmpP(cached_map, receiver_map);
|
||||
__ bne(try_array);
|
||||
Register handler = feedback;
|
||||
__ SmiToPtrArrayOffset(r1, slot);
|
||||
__ LoadP(handler,
|
||||
FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
|
||||
__ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
Label transition_call;
|
||||
|
||||
Register cached_map = scratch1;
|
||||
Register too_far = scratch2;
|
||||
Register pointer_reg = feedback;
|
||||
__ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
|
||||
|
||||
// +-----+------+------+-----+-----+-----+ ... ----+
|
||||
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
|
||||
// +-----+------+------+-----+-----+ ----+ ... ----+
|
||||
// 0 1 2 len-1
|
||||
// ^ ^
|
||||
// | |
|
||||
// pointer_reg too_far
|
||||
// aka feedback scratch2
|
||||
// also need receiver_map
|
||||
// use cached_map (scratch1) to look in the weak map values.
|
||||
__ SmiToPtrArrayOffset(r0, too_far);
|
||||
__ AddP(too_far, feedback, r0);
|
||||
__ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ AddP(pointer_reg, feedback,
|
||||
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ LoadP(cached_map, MemOperand(pointer_reg));
|
||||
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ CmpP(receiver_map, cached_map);
|
||||
__ bne(&prepare_next);
|
||||
// Is it a transitioning store?
|
||||
__ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
|
||||
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
|
||||
__ bne(&transition_call);
|
||||
__ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
__ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
|
||||
__ bind(&transition_call);
|
||||
__ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(too_far, miss);
|
||||
|
||||
__ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
|
||||
|
||||
// Load the map into the correct register.
|
||||
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ LoadRR(feedback, too_far);
|
||||
|
||||
__ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
|
||||
__ CmpLogicalP(pointer_reg, too_far);
|
||||
__ blt(&next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ b(miss);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r3
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // r4
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // r5
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // r6
|
||||
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2)); // r2
|
||||
Register feedback = r7;
|
||||
Register receiver_map = r8;
|
||||
Register scratch1 = r9;
|
||||
|
||||
__ SmiToPtrArrayOffset(r0, slot);
|
||||
__ AddP(feedback, vector, r0);
|
||||
__ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
|
||||
|
||||
// Try to quickly handle the monomorphic case without knowing for sure
|
||||
// if we have a weak cell in feedback. We do know it's safe to look
|
||||
// at WeakCell::kValueOffset.
|
||||
Label try_array, load_smi_map, compare_map;
|
||||
Label not_array, miss;
|
||||
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
|
||||
scratch1, &compare_map, &load_smi_map, &try_array);
|
||||
|
||||
__ bind(&try_array);
|
||||
// Is it a fixed array?
|
||||
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
|
||||
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
|
||||
__ bne(¬_array);
|
||||
|
||||
// We have a polymorphic element handler.
|
||||
Label polymorphic, try_poly_name;
|
||||
__ bind(&polymorphic);
|
||||
|
||||
Register scratch2 = ip;
|
||||
|
||||
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
|
||||
&miss);
|
||||
|
||||
__ bind(¬_array);
|
||||
// Is it generic?
|
||||
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
|
||||
__ bne(&try_poly_name);
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ CmpP(key, feedback);
|
||||
__ bne(&miss);
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ SmiToPtrArrayOffset(r0, slot);
|
||||
__ AddP(feedback, vector, r0);
|
||||
__ LoadP(feedback,
|
||||
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
|
||||
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
|
||||
&miss);
|
||||
|
||||
__ bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ b(&compare_map);
|
||||
}
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
||||
if (masm->isolate()->function_entry_hook() != NULL) {
|
||||
PredictableCodeSizeScope predictable(masm,
|
||||
|
@ -66,306 +66,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm, Register receiver, Register key, Register value,
|
||||
Register target_map, AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
Register scratch_elements = r6;
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
|
||||
allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm, Register receiver, Register key, Register value,
|
||||
Register target_map, AllocationSiteMode mode, Label* fail) {
|
||||
// lr contains the return address
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map, done;
|
||||
Register elements = r6;
|
||||
Register length = r7;
|
||||
Register array = r8;
|
||||
Register array_end = array;
|
||||
|
||||
// target_map parameter can be clobbered.
|
||||
Register scratch1 = target_map;
|
||||
Register scratch2 = r1;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
|
||||
scratch2));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ beq(&only_change_map, Label::kNear);
|
||||
|
||||
// Preserve lr and use r14 as a temporary register.
|
||||
__ push(r14);
|
||||
|
||||
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
__ SmiToDoubleArrayOffset(r14, length);
|
||||
__ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
|
||||
__ SubP(array, array, Operand(kHeapObjectTag));
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
|
||||
// Update receiver's map.
|
||||
__ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
|
||||
|
||||
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
|
||||
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ AddP(scratch1, array, Operand(kHeapObjectTag));
|
||||
__ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
|
||||
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ AddP(target_map, elements,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ SmiToDoubleArrayOffset(array, length);
|
||||
__ AddP(array_end, r9, array);
|
||||
// Repurpose registers no longer in use.
|
||||
#if V8_TARGET_ARCH_S390X
|
||||
Register hole_int64 = elements;
|
||||
#else
|
||||
Register hole_lower = elements;
|
||||
Register hole_upper = length;
|
||||
#endif
|
||||
// scratch1: begin of source FixedArray element fields, not tagged
|
||||
// hole_lower: kHoleNanLower32 OR hol_int64
|
||||
// hole_upper: kHoleNanUpper32
|
||||
// array_end: end of destination FixedDoubleArray, not tagged
|
||||
// scratch2: begin of FixedDoubleArray element fields, not tagged
|
||||
|
||||
__ b(&entry, Label::kNear);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ b(&done, Label::kNear);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ pop(r14);
|
||||
__ b(fail);
|
||||
|
||||
// Convert and copy elements.
|
||||
__ bind(&loop);
|
||||
__ LoadP(r14, MemOperand(scratch1));
|
||||
__ la(scratch1, MemOperand(scratch1, kPointerSize));
|
||||
// r1: current element
|
||||
__ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
|
||||
|
||||
// Normal smi, convert to double and store.
|
||||
__ ConvertIntToDouble(r14, d0);
|
||||
__ StoreDouble(d0, MemOperand(r9, 0));
|
||||
__ la(r9, MemOperand(r9, 8));
|
||||
|
||||
__ b(&entry, Label::kNear);
|
||||
|
||||
// Hole found, store the-hole NaN.
|
||||
__ bind(&convert_hole);
|
||||
if (FLAG_debug_code) {
|
||||
// Restore a "smi-untagged" heap object.
|
||||
__ LoadP(r1, MemOperand(r5, -kPointerSize));
|
||||
__ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, kObjectFoundInSmiOnlyArray);
|
||||
}
|
||||
#if V8_TARGET_ARCH_S390X
|
||||
__ stg(hole_int64, MemOperand(r9, 0));
|
||||
#else
|
||||
__ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
|
||||
__ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
|
||||
#endif
|
||||
__ AddP(r9, Operand(8));
|
||||
|
||||
__ bind(&entry);
|
||||
__ CmpP(r9, array_end);
|
||||
__ blt(&loop);
|
||||
|
||||
__ pop(r14);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm, Register receiver, Register key, Register value,
|
||||
Register target_map, AllocationSiteMode mode, Label* fail) {
|
||||
// Register lr contains the return address.
|
||||
Label loop, convert_hole, gc_required, only_change_map;
|
||||
Register elements = r6;
|
||||
Register array = r8;
|
||||
Register length = r7;
|
||||
Register scratch = r1;
|
||||
Register scratch3 = r9;
|
||||
Register hole_value = r9;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
|
||||
scratch));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ beq(&only_change_map);
|
||||
|
||||
__ Push(target_map, receiver, key, value);
|
||||
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
// elements: source FixedDoubleArray
|
||||
// length: number of elements (smi-tagged)
|
||||
|
||||
// Allocate new FixedArray.
|
||||
// Re-use value and target_map registers, as they have been saved on the
|
||||
// stack.
|
||||
Register array_size = value;
|
||||
Register allocate_scratch = target_map;
|
||||
__ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
|
||||
__ SmiToPtrArrayOffset(r0, length);
|
||||
__ AddP(array_size, r0);
|
||||
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
|
||||
NO_ALLOCATION_FLAGS);
|
||||
// array: destination FixedArray, tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
|
||||
__ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
|
||||
r0);
|
||||
__ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
|
||||
|
||||
// Prepare for conversion loop.
|
||||
Register src_elements = elements;
|
||||
Register dst_elements = target_map;
|
||||
Register dst_end = length;
|
||||
Register heap_number_map = scratch;
|
||||
__ AddP(src_elements,
|
||||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
||||
__ SmiToPtrArrayOffset(length, length);
|
||||
__ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
|
||||
|
||||
Label initialization_loop, loop_done;
|
||||
__ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
|
||||
__ beq(&loop_done, Label::kNear);
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
__ AddP(dst_elements, array,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
|
||||
__ bind(&initialization_loop);
|
||||
__ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
|
||||
__ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
|
||||
__ BranchOnCount(scratch, &initialization_loop);
|
||||
|
||||
__ AddP(dst_elements, array,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ AddP(dst_end, dst_elements, length);
|
||||
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
// Using offsetted addresses in src_elements to fully take advantage of
|
||||
// post-indexing.
|
||||
// dst_elements: begin of destination FixedArray element fields, not tagged
|
||||
// src_elements: begin of source FixedDoubleArray element fields,
|
||||
// not tagged, +4
|
||||
// dst_end: end of destination FixedArray, not tagged
|
||||
// array: destination FixedArray
|
||||
// hole_value: the-hole pointer
|
||||
// heap_number_map: heap number map
|
||||
__ b(&loop, Label::kNear);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ Pop(target_map, receiver, key, value);
|
||||
__ b(fail);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ StoreP(hole_value, MemOperand(dst_elements));
|
||||
__ AddP(dst_elements, Operand(kPointerSize));
|
||||
__ CmpLogicalP(dst_elements, dst_end);
|
||||
__ bge(&loop_done);
|
||||
|
||||
__ bind(&loop);
|
||||
Register upper_bits = key;
|
||||
__ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
|
||||
__ AddP(src_elements, Operand(kDoubleSize));
|
||||
// upper_bits: current element's upper 32 bit
|
||||
// src_elements: address of next element's upper 32 bit
|
||||
__ Cmp32(upper_bits, Operand(kHoleNanUpper32));
|
||||
__ beq(&convert_hole, Label::kNear);
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
Register heap_number = receiver;
|
||||
Register scratch2 = value;
|
||||
__ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
|
||||
&gc_required);
|
||||
// heap_number: new heap number
|
||||
#if V8_TARGET_ARCH_S390X
|
||||
__ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
|
||||
// subtract tag for std
|
||||
__ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
|
||||
__ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
|
||||
#else
|
||||
__ LoadlW(scratch2,
|
||||
MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
|
||||
__ LoadlW(upper_bits,
|
||||
MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
|
||||
__ StoreW(scratch2,
|
||||
FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
|
||||
__ StoreW(upper_bits,
|
||||
FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
|
||||
#endif
|
||||
__ LoadRR(scratch2, dst_elements);
|
||||
__ StoreP(heap_number, MemOperand(dst_elements));
|
||||
__ AddP(dst_elements, Operand(kPointerSize));
|
||||
__ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ CmpLogicalP(dst_elements, dst_end);
|
||||
__ blt(&loop);
|
||||
__ bind(&loop_done);
|
||||
|
||||
__ Pop(target_map, receiver, key, value);
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// Update receiver's map.
|
||||
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
// assume ip can be used as a scratch register below
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
|
||||
Register index, Register result,
|
||||
|
@ -1956,62 +1956,10 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
|
||||
CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
|
||||
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
ble(fail);
|
||||
CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
|
||||
Operand(Map::kMaximumBitField2FastHoleyElementValue));
|
||||
bgt(fail);
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
|
||||
Label* fail) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
|
||||
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
bgt(fail);
|
||||
}
|
||||
|
||||
void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
|
||||
SmiUntag(ip, smi);
|
||||
ConvertIntToDouble(ip, value);
|
||||
}
|
||||
void MacroAssembler::StoreNumberToDoubleElements(
|
||||
Register value_reg, Register key_reg, Register elements_reg,
|
||||
Register scratch1, DoubleRegister double_scratch, Label* fail,
|
||||
int elements_offset) {
|
||||
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
|
||||
Label smi_value, store;
|
||||
|
||||
// Handle smi values specially.
|
||||
JumpIfSmi(value_reg, &smi_value);
|
||||
|
||||
// Ensure that the object is a heap number
|
||||
CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
LoadDouble(double_scratch,
|
||||
FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
||||
// Force a canonical NaN.
|
||||
CanonicalizeNaN(double_scratch);
|
||||
b(&store);
|
||||
|
||||
bind(&smi_value);
|
||||
SmiToDouble(double_scratch, value_reg);
|
||||
|
||||
bind(&store);
|
||||
SmiToDoubleArrayOffset(scratch1, key_reg);
|
||||
StoreDouble(double_scratch,
|
||||
FieldMemOperand(elements_reg, scratch1,
|
||||
FixedDoubleArray::kHeaderSize - elements_offset));
|
||||
}
|
||||
|
||||
void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
|
||||
Label* early_success) {
|
||||
@ -2491,23 +2439,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind, ElementsKind transitioned_kind,
|
||||
Register map_in_out, Register scratch, Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
LoadP(scratch, NativeContextMemOperand());
|
||||
LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
|
||||
CmpP(map_in_out, ip);
|
||||
bne(no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
LoadP(map_in_out,
|
||||
ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
|
||||
LoadP(dst, NativeContextMemOperand());
|
||||
LoadP(dst, ContextMemOperand(dst, index));
|
||||
@ -2592,25 +2523,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
|
||||
beq(smi_case);
|
||||
}
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
|
||||
Label* non_smi_case) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
STATIC_ASSERT(kSmiTagSize == 1);
|
||||
|
||||
// We can more optimally use TestIfSmi if dst != src
|
||||
// otherwise, the UnTag operation will kill the CC and we cannot
|
||||
// test the Tag bit.
|
||||
if (src.code() != dst.code()) {
|
||||
SmiUntag(dst, src);
|
||||
TestIfSmi(src);
|
||||
} else {
|
||||
TestBit(src, 0, r0);
|
||||
SmiUntag(dst, src);
|
||||
LoadAndTestRR(r0, r0);
|
||||
}
|
||||
bne(non_smi_case);
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
|
||||
Label* on_either_smi) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
@ -3413,42 +3325,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
|
||||
return no_reg;
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
// scratch contained elements pointer.
|
||||
LoadRR(current, object);
|
||||
LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
CompareRoot(current, Heap::kNullValueRootIndex);
|
||||
beq(&end);
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
bind(&loop_again);
|
||||
LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
|
||||
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
|
||||
CmpP(scratch1, Operand(JS_OBJECT_TYPE));
|
||||
blt(found);
|
||||
|
||||
LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
|
||||
beq(found);
|
||||
LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
|
||||
CompareRoot(current, Heap::kNullValueRootIndex);
|
||||
bne(&loop_again);
|
||||
|
||||
bind(&end);
|
||||
}
|
||||
|
||||
void MacroAssembler::mov(Register dst, const Operand& src) {
|
||||
if (src.rmode_ != kRelocInfo_NONEPTR) {
|
||||
// some form of relocation needed
|
||||
|
@ -784,16 +784,6 @@ class MacroAssembler : public Assembler {
|
||||
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
|
||||
}
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
void LoadNativeContextSlot(int index, Register dst);
|
||||
|
||||
// Load the initial map from the global function. The registers
|
||||
@ -1071,22 +1061,6 @@ class MacroAssembler : public Assembler {
|
||||
// sets the flags and leaves the object type in the type_reg register.
|
||||
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object has fast smi only
|
||||
// elements. Jump to the specified label if it does not.
|
||||
void CheckFastSmiElements(Register map, Register scratch, Label* fail);
|
||||
|
||||
// Check to see if maybe_number can be stored as a double in
|
||||
// FastDoubleElements. If it can, store it at the index specified by key in
|
||||
// the FastDoubleElements array elements. Otherwise jump to fail.
|
||||
void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
|
||||
Register elements_reg, Register scratch1,
|
||||
DoubleRegister double_scratch, Label* fail,
|
||||
int elements_offset = 0);
|
||||
|
||||
// Compare an object's map with the specified map and its transitioned
|
||||
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
|
||||
// set with result of map compare. If multiple map compares are required, the
|
||||
@ -1576,10 +1550,6 @@ class MacroAssembler : public Assembler {
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
||||
|
||||
// Untag the source value into destination and jump if source is not a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
|
||||
|
||||
inline void TestIfPositiveSmi(Register value, Register scratch) {
|
||||
@ -1772,21 +1742,6 @@ class MacroAssembler : public Assembler {
|
||||
Register scratch2_reg,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Register scratch2_reg,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
|
||||
&no_memento_found);
|
||||
beq(memento_found);
|
||||
bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
private:
|
||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||
|
||||
|
18
src/v8.gyp
18
src/v8.gyp
@ -1360,8 +1360,6 @@
|
||||
'ic/arm/access-compiler-arm.cc',
|
||||
'ic/arm/handler-compiler-arm.cc',
|
||||
'ic/arm/ic-arm.cc',
|
||||
'ic/arm/ic-compiler-arm.cc',
|
||||
'ic/arm/stub-cache-arm.cc',
|
||||
'regexp/arm/regexp-macro-assembler-arm.cc',
|
||||
'regexp/arm/regexp-macro-assembler-arm.h',
|
||||
],
|
||||
@ -1420,8 +1418,6 @@
|
||||
'ic/arm64/access-compiler-arm64.cc',
|
||||
'ic/arm64/handler-compiler-arm64.cc',
|
||||
'ic/arm64/ic-arm64.cc',
|
||||
'ic/arm64/ic-compiler-arm64.cc',
|
||||
'ic/arm64/stub-cache-arm64.cc',
|
||||
'regexp/arm64/regexp-macro-assembler-arm64.cc',
|
||||
'regexp/arm64/regexp-macro-assembler-arm64.h',
|
||||
],
|
||||
@ -1461,8 +1457,6 @@
|
||||
'ic/ia32/access-compiler-ia32.cc',
|
||||
'ic/ia32/handler-compiler-ia32.cc',
|
||||
'ic/ia32/ic-ia32.cc',
|
||||
'ic/ia32/ic-compiler-ia32.cc',
|
||||
'ic/ia32/stub-cache-ia32.cc',
|
||||
'regexp/ia32/regexp-macro-assembler-ia32.cc',
|
||||
'regexp/ia32/regexp-macro-assembler-ia32.h',
|
||||
],
|
||||
@ -1502,8 +1496,6 @@
|
||||
'ic/x87/access-compiler-x87.cc',
|
||||
'ic/x87/handler-compiler-x87.cc',
|
||||
'ic/x87/ic-x87.cc',
|
||||
'ic/x87/ic-compiler-x87.cc',
|
||||
'ic/x87/stub-cache-x87.cc',
|
||||
'regexp/x87/regexp-macro-assembler-x87.cc',
|
||||
'regexp/x87/regexp-macro-assembler-x87.h',
|
||||
],
|
||||
@ -1545,8 +1537,6 @@
|
||||
'ic/mips/access-compiler-mips.cc',
|
||||
'ic/mips/handler-compiler-mips.cc',
|
||||
'ic/mips/ic-mips.cc',
|
||||
'ic/mips/ic-compiler-mips.cc',
|
||||
'ic/mips/stub-cache-mips.cc',
|
||||
'regexp/mips/regexp-macro-assembler-mips.cc',
|
||||
'regexp/mips/regexp-macro-assembler-mips.h',
|
||||
],
|
||||
@ -1588,8 +1578,6 @@
|
||||
'ic/mips64/access-compiler-mips64.cc',
|
||||
'ic/mips64/handler-compiler-mips64.cc',
|
||||
'ic/mips64/ic-mips64.cc',
|
||||
'ic/mips64/ic-compiler-mips64.cc',
|
||||
'ic/mips64/stub-cache-mips64.cc',
|
||||
'regexp/mips64/regexp-macro-assembler-mips64.cc',
|
||||
'regexp/mips64/regexp-macro-assembler-mips64.h',
|
||||
],
|
||||
@ -1633,8 +1621,6 @@
|
||||
'ic/x64/access-compiler-x64.cc',
|
||||
'ic/x64/handler-compiler-x64.cc',
|
||||
'ic/x64/ic-x64.cc',
|
||||
'ic/x64/ic-compiler-x64.cc',
|
||||
'ic/x64/stub-cache-x64.cc',
|
||||
'regexp/x64/regexp-macro-assembler-x64.cc',
|
||||
'regexp/x64/regexp-macro-assembler-x64.h',
|
||||
'third_party/valgrind/valgrind.h',
|
||||
@ -1658,8 +1644,6 @@
|
||||
'ic/ppc/access-compiler-ppc.cc',
|
||||
'ic/ppc/handler-compiler-ppc.cc',
|
||||
'ic/ppc/ic-ppc.cc',
|
||||
'ic/ppc/ic-compiler-ppc.cc',
|
||||
'ic/ppc/stub-cache-ppc.cc',
|
||||
'ppc/assembler-ppc-inl.h',
|
||||
'ppc/assembler-ppc.cc',
|
||||
'ppc/assembler-ppc.h',
|
||||
@ -1700,9 +1684,7 @@
|
||||
'full-codegen/s390/full-codegen-s390.cc',
|
||||
'ic/s390/access-compiler-s390.cc',
|
||||
'ic/s390/handler-compiler-s390.cc',
|
||||
'ic/s390/ic-compiler-s390.cc',
|
||||
'ic/s390/ic-s390.cc',
|
||||
'ic/s390/stub-cache-s390.cc',
|
||||
'regexp/s390/regexp-macro-assembler-s390.cc',
|
||||
'regexp/s390/regexp-macro-assembler-s390.h',
|
||||
's390/assembler-s390.cc',
|
||||
|
@ -2949,203 +2949,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
|
||||
}
|
||||
|
||||
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
|
||||
Register receiver_map, Register scratch1,
|
||||
Register scratch2, Register scratch3,
|
||||
bool is_polymorphic, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next_loop, prepare_next;
|
||||
Label start_polymorphic;
|
||||
|
||||
Register counter = scratch1;
|
||||
Register length = scratch2;
|
||||
Register cached_map = scratch3;
|
||||
|
||||
__ movp(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
__ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &start_polymorphic);
|
||||
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
__ movp(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ leap(handler, FieldOperand(handler, Code::kHeaderSize));
|
||||
__ jmp(handler);
|
||||
|
||||
// Polymorphic, we have to loop from 2 to N
|
||||
__ bind(&start_polymorphic);
|
||||
__ SmiToInteger32(length, FieldOperand(feedback, FixedArray::kLengthOffset));
|
||||
if (!is_polymorphic) {
|
||||
// If the IC could be monomorphic we have to make sure we don't go past the
|
||||
// end of the feedback array.
|
||||
__ cmpl(length, Immediate(2));
|
||||
__ j(equal, miss);
|
||||
}
|
||||
__ movl(counter, Immediate(2));
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &prepare_next);
|
||||
__ movp(handler, FieldOperand(feedback, counter, times_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ leap(handler, FieldOperand(handler, Code::kHeaderSize));
|
||||
__ jmp(handler);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ addl(counter, Immediate(2));
|
||||
__ cmpl(counter, length);
|
||||
__ j(less, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ jmp(miss);
|
||||
}
|
||||
|
||||
|
||||
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
|
||||
Register receiver_map, Register feedback,
|
||||
Register vector, Register integer_slot,
|
||||
Label* compare_map, Label* load_smi_map,
|
||||
Label* try_array) {
|
||||
__ JumpIfSmi(receiver, load_smi_map);
|
||||
__ movp(receiver_map, FieldOperand(receiver, 0));
|
||||
|
||||
__ bind(compare_map);
|
||||
__ cmpp(receiver_map, FieldOperand(feedback, WeakCell::kValueOffset));
|
||||
__ j(not_equal, try_array);
|
||||
Register handler = feedback;
|
||||
__ movp(handler, FieldOperand(vector, integer_slot, times_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ leap(handler, FieldOperand(handler, Code::kHeaderSize));
|
||||
__ jmp(handler);
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
|
||||
static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
|
||||
Register receiver_map,
|
||||
Register feedback, Register scratch,
|
||||
Register scratch1,
|
||||
Register scratch2, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next, next_loop, prepare_next;
|
||||
Label transition_call;
|
||||
|
||||
Register cached_map = scratch;
|
||||
Register counter = scratch1;
|
||||
Register length = scratch2;
|
||||
|
||||
// Polymorphic, we have to loop from 0 to N - 1
|
||||
__ movp(counter, Immediate(0));
|
||||
__ movp(length, FieldOperand(feedback, FixedArray::kLengthOffset));
|
||||
__ SmiToInteger32(length, length);
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &prepare_next);
|
||||
__ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
|
||||
__ j(not_equal, &transition_call);
|
||||
__ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
|
||||
FixedArray::kHeaderSize + 2 * kPointerSize));
|
||||
__ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
|
||||
__ jmp(feedback);
|
||||
|
||||
__ bind(&transition_call);
|
||||
DCHECK(receiver_map.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
// The weak cell may have been cleared.
|
||||
__ JumpIfSmi(receiver_map, miss);
|
||||
// Get the handler in value.
|
||||
__ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
|
||||
FixedArray::kHeaderSize + 2 * kPointerSize));
|
||||
__ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
|
||||
__ jmp(feedback);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ addl(counter, Immediate(3));
|
||||
__ cmpl(counter, length);
|
||||
__ j(less, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ jmp(miss);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // rdx
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // rcx
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // rbx
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // rdi
|
||||
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(rax)); // rax
|
||||
Register feedback = r8;
|
||||
Register integer_slot = r9;
|
||||
Register receiver_map = r11;
|
||||
DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
|
||||
|
||||
__ SmiToInteger32(integer_slot, slot);
|
||||
__ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
|
||||
// Try to quickly handle the monomorphic case without knowing for sure
|
||||
// if we have a weak cell in feedback. We do know it's safe to look
|
||||
// at WeakCell::kValueOffset.
|
||||
Label try_array, load_smi_map, compare_map;
|
||||
Label not_array, miss;
|
||||
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
|
||||
integer_slot, &compare_map, &load_smi_map, &try_array);
|
||||
|
||||
// Is it a fixed array?
|
||||
__ bind(&try_array);
|
||||
__ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
|
||||
__ j(not_equal, ¬_array);
|
||||
HandlePolymorphicKeyedStoreCase(masm, receiver_map, feedback, integer_slot,
|
||||
r15, r14, &miss);
|
||||
|
||||
__ bind(¬_array);
|
||||
Label try_poly_name;
|
||||
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
|
||||
__ j(not_equal, &try_poly_name);
|
||||
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ cmpp(key, feedback);
|
||||
__ j(not_equal, &miss);
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, false,
|
||||
&miss);
|
||||
|
||||
__ bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ jmp(&compare_map);
|
||||
}
|
||||
|
||||
|
||||
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(rbx);
|
||||
CallICStub stub(isolate(), state());
|
||||
|
@ -62,309 +62,6 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
// Return address is on the stack.
|
||||
Register scratch = rdi;
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(
|
||||
receiver, scratch, allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
|
||||
__ RecordWriteField(receiver,
|
||||
HeapObject::kMapOffset,
|
||||
target_map,
|
||||
scratch,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Return address is on the stack.
|
||||
DCHECK(receiver.is(rdx));
|
||||
DCHECK(key.is(rcx));
|
||||
DCHECK(value.is(rax));
|
||||
DCHECK(target_map.is(rbx));
|
||||
|
||||
// The fail label is not actually used since we do not allocate.
|
||||
Label allocated, new_backing_store, only_change_map, done;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
|
||||
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ j(equal, &only_change_map);
|
||||
|
||||
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
|
||||
if (kPointerSize == kDoubleSize) {
|
||||
// Check backing store for COW-ness. For COW arrays we have to
|
||||
// allocate a new backing store.
|
||||
__ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
|
||||
Heap::kFixedCOWArrayMapRootIndex);
|
||||
__ j(equal, &new_backing_store);
|
||||
} else {
|
||||
// For x32 port we have to allocate a new backing store as SMI size is
|
||||
// not equal with double size.
|
||||
DCHECK(kDoubleSize == 2 * kPointerSize);
|
||||
__ jmp(&new_backing_store);
|
||||
}
|
||||
|
||||
// Check if the backing store is in new-space. If not, we need to allocate
|
||||
// a new one since the old one is in pointer-space.
|
||||
// If in new space, we can reuse the old backing store because it is
|
||||
// the same size.
|
||||
__ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
|
||||
|
||||
__ movp(r14, r8); // Destination array equals source array.
|
||||
|
||||
// r8 : source FixedArray
|
||||
// r9 : elements array length
|
||||
// r14: destination FixedDoubleArray
|
||||
// Set backing store's map
|
||||
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
|
||||
|
||||
__ bind(&allocated);
|
||||
// Set transitioned map.
|
||||
__ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
|
||||
__ RecordWriteField(rdx,
|
||||
HeapObject::kMapOffset,
|
||||
rbx,
|
||||
rdi,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Convert smis to doubles and holes to hole NaNs. The Array's length
|
||||
// remains unchanged.
|
||||
STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
|
||||
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
|
||||
|
||||
Label loop, entry, convert_hole;
|
||||
__ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
|
||||
// r15: the-hole NaN
|
||||
__ jmp(&entry);
|
||||
|
||||
// Allocate new backing store.
|
||||
__ bind(&new_backing_store);
|
||||
__ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
|
||||
__ Allocate(rdi, r14, r11, r15, fail, NO_ALLOCATION_FLAGS);
|
||||
// Set backing store's map
|
||||
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
|
||||
// Set receiver's backing store.
|
||||
__ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
|
||||
__ movp(r11, r14);
|
||||
__ RecordWriteField(rdx,
|
||||
JSObject::kElementsOffset,
|
||||
r11,
|
||||
r15,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
// Set backing store's length.
|
||||
__ Integer32ToSmi(r11, r9);
|
||||
__ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
|
||||
__ jmp(&allocated);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// Set transitioned map.
|
||||
__ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
|
||||
__ RecordWriteField(rdx,
|
||||
HeapObject::kMapOffset,
|
||||
rbx,
|
||||
rdi,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ jmp(&done);
|
||||
|
||||
// Conversion loop.
|
||||
__ bind(&loop);
|
||||
__ movp(rbx,
|
||||
FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
|
||||
// r9 : current element's index
|
||||
// rbx: current element (smi-tagged)
|
||||
__ JumpIfNotSmi(rbx, &convert_hole);
|
||||
__ SmiToInteger32(rbx, rbx);
|
||||
__ Cvtlsi2sd(kScratchDoubleReg, rbx);
|
||||
__ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
|
||||
kScratchDoubleReg);
|
||||
__ jmp(&entry);
|
||||
__ bind(&convert_hole);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(equal, kObjectFoundInSmiOnlyArray);
|
||||
}
|
||||
|
||||
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
|
||||
__ bind(&entry);
|
||||
__ decp(r9);
|
||||
__ j(not_sign, &loop);
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Return address is on the stack.
|
||||
DCHECK(receiver.is(rdx));
|
||||
DCHECK(key.is(rcx));
|
||||
DCHECK(value.is(rax));
|
||||
DCHECK(target_map.is(rbx));
|
||||
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
|
||||
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ j(equal, &only_change_map);
|
||||
|
||||
__ Push(rsi);
|
||||
__ Push(rax);
|
||||
|
||||
__ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
|
||||
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
|
||||
// r8 : source FixedDoubleArray
|
||||
// r9 : number of elements
|
||||
__ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
|
||||
__ Allocate(rdi, r11, r14, r15, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
// r11: destination FixedArray
|
||||
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
|
||||
__ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
|
||||
__ Integer32ToSmi(r14, r9);
|
||||
__ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
|
||||
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
|
||||
// rsi: the-hole NaN
|
||||
// rdi: pointer to the-hole
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
Label initialization_loop, initialization_loop_entry;
|
||||
__ jmp(&initialization_loop_entry, Label::kNear);
|
||||
__ bind(&initialization_loop);
|
||||
__ movp(FieldOperand(r11, r9, times_pointer_size, FixedArray::kHeaderSize),
|
||||
rdi);
|
||||
__ bind(&initialization_loop_entry);
|
||||
__ decp(r9);
|
||||
__ j(not_sign, &initialization_loop);
|
||||
|
||||
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
|
||||
__ jmp(&entry);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ Pop(rax);
|
||||
__ Pop(rsi);
|
||||
__ jmp(fail);
|
||||
|
||||
// Box doubles into heap numbers.
|
||||
__ bind(&loop);
|
||||
__ movq(r14, FieldOperand(r8,
|
||||
r9,
|
||||
times_8,
|
||||
FixedDoubleArray::kHeaderSize));
|
||||
// r9 : current element's index
|
||||
// r14: current element
|
||||
__ cmpq(r14, rsi);
|
||||
__ j(equal, &convert_hole);
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
__ AllocateHeapNumber(rax, r15, &gc_required);
|
||||
// rax: new heap number
|
||||
__ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
|
||||
__ movp(FieldOperand(r11,
|
||||
r9,
|
||||
times_pointer_size,
|
||||
FixedArray::kHeaderSize),
|
||||
rax);
|
||||
__ movp(r15, r9);
|
||||
__ RecordWriteArray(r11,
|
||||
rax,
|
||||
r15,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ jmp(&entry, Label::kNear);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ movp(FieldOperand(r11,
|
||||
r9,
|
||||
times_pointer_size,
|
||||
FixedArray::kHeaderSize),
|
||||
rdi);
|
||||
|
||||
__ bind(&entry);
|
||||
__ decp(r9);
|
||||
__ j(not_sign, &loop);
|
||||
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
|
||||
__ RecordWriteField(rdx,
|
||||
JSObject::kElementsOffset,
|
||||
r11,
|
||||
r15,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ Pop(rax);
|
||||
__ Pop(rsi);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// Set transitioned map.
|
||||
__ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
|
||||
__ RecordWriteField(rdx,
|
||||
HeapObject::kMapOffset,
|
||||
rbx,
|
||||
rdi,
|
||||
kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Register string,
|
||||
Register index,
|
||||
|
@ -3663,66 +3663,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
|
||||
Immediate(static_cast<int8_t>(type)));
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map,
|
||||
Label* fail,
|
||||
Label::Distance distance) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
j(below_equal, fail, distance);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleyElementValue));
|
||||
j(above, fail, distance);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckFastSmiElements(Register map,
|
||||
Label* fail,
|
||||
Label::Distance distance) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
j(above, fail, distance);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::StoreNumberToDoubleElements(
|
||||
Register maybe_number,
|
||||
Register elements,
|
||||
Register index,
|
||||
XMMRegister xmm_scratch,
|
||||
Label* fail,
|
||||
int elements_offset) {
|
||||
Label smi_value, done;
|
||||
|
||||
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
|
||||
|
||||
CheckMap(maybe_number,
|
||||
isolate()->factory()->heap_number_map(),
|
||||
fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
// Double value, turn potential sNaN into qNaN.
|
||||
Move(xmm_scratch, 1.0);
|
||||
mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
|
||||
jmp(&done, Label::kNear);
|
||||
|
||||
bind(&smi_value);
|
||||
// Value is a smi. convert to a double and store.
|
||||
// Preserve original value.
|
||||
SmiToInteger32(kScratchRegister, maybe_number);
|
||||
Cvtlsi2sd(xmm_scratch, kScratchRegister);
|
||||
bind(&done);
|
||||
Movsd(FieldOperand(elements, index, times_8,
|
||||
FixedDoubleArray::kHeaderSize - elements_offset),
|
||||
xmm_scratch);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
|
||||
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
|
||||
}
|
||||
@ -5137,28 +5077,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
movp(scratch, NativeContextOperand());
|
||||
cmpp(map_in_out,
|
||||
ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
|
||||
j(not_equal, no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
movp(map_in_out,
|
||||
ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
|
||||
#ifdef _WIN64
|
||||
static const int kRegisterPassedArguments = 4;
|
||||
#else
|
||||
@ -5501,42 +5419,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
|
||||
Heap::kAllocationMementoMapRootIndex);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
||||
Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
movp(current, object);
|
||||
movp(current, FieldOperand(current, HeapObject::kMapOffset));
|
||||
movp(current, FieldOperand(current, Map::kPrototypeOffset));
|
||||
CompareRoot(current, Heap::kNullValueRootIndex);
|
||||
j(equal, &end);
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
bind(&loop_again);
|
||||
movp(current, FieldOperand(current, HeapObject::kMapOffset));
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
CmpInstanceType(current, JS_OBJECT_TYPE);
|
||||
j(below, found);
|
||||
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
|
||||
j(equal, found);
|
||||
movp(current, FieldOperand(current, Map::kPrototypeOffset));
|
||||
CompareRoot(current, Heap::kNullValueRootIndex);
|
||||
j(not_equal, &loop_again);
|
||||
|
||||
bind(&end);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
|
||||
DCHECK(!dividend.is(rax));
|
||||
DCHECK(!dividend.is(rdx));
|
||||
|
@ -1112,29 +1112,6 @@ class MacroAssembler: public Assembler {
|
||||
// Always use unsigned comparisons: above and below, not less and greater.
|
||||
void CmpInstanceType(Register map, InstanceType type);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map,
|
||||
Label* fail,
|
||||
Label::Distance distance = Label::kFar);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object has fast smi only
|
||||
// elements. Jump to the specified label if it does not.
|
||||
void CheckFastSmiElements(Register map,
|
||||
Label* fail,
|
||||
Label::Distance distance = Label::kFar);
|
||||
|
||||
// Check to see if maybe_number can be stored as a double in
|
||||
// FastDoubleElements. If it can, store it at the index specified by index in
|
||||
// the FastDoubleElements array elements, otherwise jump to fail. Note that
|
||||
// index must not be smi-tagged.
|
||||
void StoreNumberToDoubleElements(Register maybe_number,
|
||||
Register elements,
|
||||
Register index,
|
||||
XMMRegister xmm_scratch,
|
||||
Label* fail,
|
||||
int elements_offset = 0);
|
||||
|
||||
// Compare an object's map with the specified map.
|
||||
void CompareMap(Register obj, Handle<Map> map);
|
||||
|
||||
@ -1420,17 +1397,6 @@ class MacroAssembler: public Assembler {
|
||||
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
|
||||
}
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
// Load the native context slot with the current index.
|
||||
void LoadNativeContextSlot(int index, Register dst);
|
||||
|
||||
@ -1593,20 +1559,6 @@ class MacroAssembler: public Assembler {
|
||||
Register scratch_reg,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
|
||||
&no_memento_found);
|
||||
j(equal, memento_found);
|
||||
bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
private:
|
||||
// Order general registers are pushed by Pushad.
|
||||
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
|
||||
|
@ -2834,328 +2834,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
|
||||
}
|
||||
|
||||
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
|
||||
KeyedStoreICStub stub(isolate(), state());
|
||||
stub.GenerateForTrampoline(masm);
|
||||
}
|
||||
|
||||
// value is on the stack already.
|
||||
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
|
||||
Register key, Register vector,
|
||||
Register slot, Register feedback,
|
||||
bool is_polymorphic, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next, next_loop, prepare_next;
|
||||
Label load_smi_map, compare_map;
|
||||
Label start_polymorphic;
|
||||
Label pop_and_miss;
|
||||
|
||||
__ push(receiver);
|
||||
// Value, vector and slot are passed on the stack, so no need to save/restore
|
||||
// them.
|
||||
|
||||
Register receiver_map = receiver;
|
||||
Register cached_map = vector;
|
||||
|
||||
// Receiver might not be a heap object.
|
||||
__ JumpIfSmi(receiver, &load_smi_map);
|
||||
__ mov(receiver_map, FieldOperand(receiver, 0));
|
||||
__ bind(&compare_map);
|
||||
__ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
|
||||
|
||||
// A named keyed store might have a 2 element array, all other cases can count
|
||||
// on an array with at least 2 {map, handler} pairs, so they can go right
|
||||
// into polymorphic array handling.
|
||||
__ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &start_polymorphic);
|
||||
|
||||
// found, now call handler.
|
||||
Register handler = feedback;
|
||||
DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
|
||||
__ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
|
||||
__ pop(receiver);
|
||||
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
|
||||
__ jmp(handler);
|
||||
|
||||
// Polymorphic, we have to loop from 2 to N
|
||||
__ bind(&start_polymorphic);
|
||||
__ push(key);
|
||||
Register counter = key;
|
||||
__ mov(counter, Immediate(Smi::FromInt(2)));
|
||||
|
||||
if (!is_polymorphic) {
|
||||
// If is_polymorphic is false, we may only have a two element array.
|
||||
// Check against length now in that case.
|
||||
__ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
|
||||
__ j(greater_equal, &pop_and_miss);
|
||||
}
|
||||
|
||||
__ bind(&next_loop);
|
||||
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &prepare_next);
|
||||
__ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
__ jmp(handler);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ add(counter, Immediate(Smi::FromInt(2)));
|
||||
__ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
|
||||
__ j(less, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ bind(&pop_and_miss);
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
__ jmp(miss);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ jmp(&compare_map);
|
||||
}
|
||||
|
||||
|
||||
static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
|
||||
Register key, Register vector,
|
||||
Register slot, Register weak_cell,
|
||||
Label* miss) {
|
||||
// The store ic value is on the stack.
|
||||
DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
|
||||
|
||||
// feedback initially contains the feedback array
|
||||
Label compare_smi_map;
|
||||
|
||||
// Move the weak map into the weak_cell register.
|
||||
Register ic_map = weak_cell;
|
||||
__ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
|
||||
|
||||
// Receiver might not be a heap object.
|
||||
__ JumpIfSmi(receiver, &compare_smi_map);
|
||||
__ cmp(ic_map, FieldOperand(receiver, 0));
|
||||
__ j(not_equal, miss);
|
||||
__ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
|
||||
// jump to the handler.
|
||||
__ jmp(weak_cell);
|
||||
|
||||
// In microbenchmarks, it made sense to unroll this code so that the call to
|
||||
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
|
||||
__ bind(&compare_smi_map);
|
||||
__ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, miss);
|
||||
__ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
|
||||
// jump to the handler.
|
||||
__ jmp(weak_cell);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, false);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
|
||||
GenerateImpl(masm, true);
|
||||
}
|
||||
|
||||
|
||||
static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
|
||||
Register receiver, Register key,
|
||||
Register vector, Register slot,
|
||||
Register feedback, Label* miss) {
|
||||
// feedback initially contains the feedback array
|
||||
Label next, next_loop, prepare_next;
|
||||
Label load_smi_map, compare_map;
|
||||
Label transition_call;
|
||||
Label pop_and_miss;
|
||||
|
||||
__ push(receiver);
|
||||
// Value, vector and slot are passed on the stack, so no need to save/restore
|
||||
// them.
|
||||
|
||||
Register receiver_map = receiver;
|
||||
Register cached_map = vector;
|
||||
|
||||
// Receiver might not be a heap object.
|
||||
__ JumpIfSmi(receiver, &load_smi_map);
|
||||
__ mov(receiver_map, FieldOperand(receiver, 0));
|
||||
__ bind(&compare_map);
|
||||
|
||||
// Polymorphic, we have to loop from 0 to N - 1
|
||||
__ push(key);
|
||||
// Current stack layout:
|
||||
// - esp[0] -- key
|
||||
// - esp[4] -- receiver
|
||||
// - esp[8] -- return address
|
||||
// - esp[12] -- vector
|
||||
// - esp[16] -- slot
|
||||
// - esp[20] -- value
|
||||
//
|
||||
// Required stack layout for handler call (see StoreWithVectorDescriptor):
|
||||
// - esp[0] -- return address
|
||||
// - esp[4] -- vector
|
||||
// - esp[8] -- slot
|
||||
// - esp[12] -- value
|
||||
// - receiver, key, handler in registers.
|
||||
Register counter = key;
|
||||
__ mov(counter, Immediate(Smi::kZero));
|
||||
__ bind(&next_loop);
|
||||
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
__ j(not_equal, &prepare_next);
|
||||
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
__ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
|
||||
__ j(not_equal, &transition_call);
|
||||
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + 2 * kPointerSize));
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
|
||||
__ jmp(feedback);
|
||||
|
||||
__ bind(&transition_call);
|
||||
// Current stack layout:
|
||||
// - esp[0] -- key
|
||||
// - esp[4] -- receiver
|
||||
// - esp[8] -- return address
|
||||
// - esp[12] -- vector
|
||||
// - esp[16] -- slot
|
||||
// - esp[20] -- value
|
||||
//
|
||||
// Required stack layout for handler call (see StoreTransitionDescriptor):
|
||||
// - esp[0] -- return address
|
||||
// - esp[4] -- vector
|
||||
// - esp[8] -- slot
|
||||
// - esp[12] -- value
|
||||
// - receiver, key, map, handler in registers.
|
||||
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + 2 * kPointerSize));
|
||||
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
|
||||
|
||||
__ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
|
||||
// The weak cell may have been cleared.
|
||||
__ JumpIfSmi(cached_map, &pop_and_miss);
|
||||
DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
|
||||
__ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
|
||||
|
||||
// Call store transition handler using StoreTransitionDescriptor calling
|
||||
// convention.
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
// Ensure that the transition handler we are going to call has the same
|
||||
// number of stack arguments which means that we don't have to adapt them
|
||||
// before the call.
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
|
||||
STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
|
||||
StoreWithVectorDescriptor::kValue ==
|
||||
StoreTransitionDescriptor::kParameterCount -
|
||||
StoreTransitionDescriptor::kValue);
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
|
||||
StoreWithVectorDescriptor::kSlot ==
|
||||
StoreTransitionDescriptor::kParameterCount -
|
||||
StoreTransitionDescriptor::kSlot);
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
|
||||
StoreWithVectorDescriptor::kVector ==
|
||||
StoreTransitionDescriptor::kParameterCount -
|
||||
StoreTransitionDescriptor::kVector);
|
||||
__ jmp(feedback);
|
||||
|
||||
__ bind(&prepare_next);
|
||||
__ add(counter, Immediate(Smi::FromInt(3)));
|
||||
__ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
|
||||
__ j(less, &next_loop);
|
||||
|
||||
// We exhausted our array of map handler pairs.
|
||||
__ bind(&pop_and_miss);
|
||||
__ pop(key);
|
||||
__ pop(receiver);
|
||||
__ jmp(miss);
|
||||
|
||||
__ bind(&load_smi_map);
|
||||
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ jmp(&compare_map);
|
||||
}
|
||||
|
||||
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
|
||||
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
|
||||
Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
|
||||
Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
|
||||
Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
|
||||
Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
|
||||
Label miss;
|
||||
|
||||
if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
|
||||
// Current stack layout:
|
||||
// - esp[8] -- value
|
||||
// - esp[4] -- slot
|
||||
// - esp[0] -- return address
|
||||
STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
|
||||
STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
|
||||
if (in_frame) {
|
||||
__ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
|
||||
// If the vector is not on the stack, then insert the vector beneath
|
||||
// return address in order to prepare for calling handler with
|
||||
// StoreWithVector calling convention.
|
||||
__ push(Operand(esp, 0));
|
||||
__ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
|
||||
__ RecordComment("]");
|
||||
} else {
|
||||
__ mov(vector, Operand(esp, 1 * kPointerSize));
|
||||
}
|
||||
__ mov(slot, Operand(esp, 2 * kPointerSize));
|
||||
}
|
||||
|
||||
Register scratch = value;
|
||||
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
|
||||
// Is it a weak cell?
|
||||
Label try_array;
|
||||
Label not_array, smi_key, key_okay;
|
||||
__ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
|
||||
__ j(not_equal, &try_array);
|
||||
HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
|
||||
|
||||
// Is it a fixed array?
|
||||
__ bind(&try_array);
|
||||
__ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
|
||||
__ j(not_equal, ¬_array);
|
||||
HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
|
||||
&miss);
|
||||
|
||||
__ bind(¬_array);
|
||||
Label try_poly_name;
|
||||
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
|
||||
__ j(not_equal, &try_poly_name);
|
||||
|
||||
Handle<Code> megamorphic_stub =
|
||||
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
|
||||
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
|
||||
|
||||
__ bind(&try_poly_name);
|
||||
// We might have a name in feedback, and a fixed array in the next slot.
|
||||
__ cmp(key, scratch);
|
||||
__ j(not_equal, &miss);
|
||||
// If the name comparison succeeded, we know we have a fixed array with
|
||||
// at least one map/handler pair.
|
||||
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize + kPointerSize));
|
||||
HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
|
||||
&miss);
|
||||
|
||||
__ bind(&miss);
|
||||
KeyedStoreIC::GenerateMiss(masm);
|
||||
}
|
||||
|
||||
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
|
||||
__ EmitLoadTypeFeedbackVector(ebx);
|
||||
CallICStub stub(isolate(), state());
|
||||
|
@ -212,274 +212,6 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
Register scratch = edi;
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(
|
||||
receiver, scratch, allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
|
||||
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Return address is on the stack.
|
||||
DCHECK(receiver.is(edx));
|
||||
DCHECK(key.is(ecx));
|
||||
DCHECK(value.is(eax));
|
||||
DCHECK(target_map.is(ebx));
|
||||
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
|
||||
__ j(equal, &only_change_map);
|
||||
|
||||
__ push(eax);
|
||||
__ push(ebx);
|
||||
__ push(esi);
|
||||
|
||||
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
// edx: receiver
|
||||
// edi: length of source FixedArray (smi-tagged)
|
||||
AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
|
||||
__ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
|
||||
REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
|
||||
|
||||
// eax: destination FixedDoubleArray
|
||||
// edi: number of elements
|
||||
// edx: receiver
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
|
||||
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
|
||||
__ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
|
||||
__ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
|
||||
__ mov(ebx, eax);
|
||||
__ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
|
||||
__ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
ExternalReference canonical_the_hole_nan_reference =
|
||||
ExternalReference::address_of_the_hole_nan();
|
||||
__ jmp(&entry);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
|
||||
// Restore registers before jumping into runtime.
|
||||
__ pop(esi);
|
||||
__ pop(ebx);
|
||||
__ pop(eax);
|
||||
__ jmp(fail);
|
||||
|
||||
// Convert and copy elements
|
||||
// esi: source FixedArray
|
||||
__ bind(&loop);
|
||||
__ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
|
||||
// ebx: current element from source
|
||||
// edi: index of current element
|
||||
__ JumpIfNotSmi(ebx, &convert_hole);
|
||||
|
||||
// Normal smi, convert it to double and store.
|
||||
__ SmiUntag(ebx);
|
||||
__ push(ebx);
|
||||
__ fild_s(Operand(esp, 0));
|
||||
__ pop(ebx);
|
||||
__ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
|
||||
__ jmp(&entry);
|
||||
|
||||
// Found hole, store hole_nan_as_double instead.
|
||||
__ bind(&convert_hole);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
|
||||
__ Assert(equal, kObjectFoundInSmiOnlyArray);
|
||||
}
|
||||
|
||||
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
|
||||
__ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
|
||||
|
||||
__ bind(&entry);
|
||||
__ sub(edi, Immediate(Smi::FromInt(1)));
|
||||
__ j(not_sign, &loop);
|
||||
|
||||
// Restore registers.
|
||||
__ pop(esi);
|
||||
__ pop(ebx);
|
||||
__ pop(eax);
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// eax: value
|
||||
// ebx: target map
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register key,
|
||||
Register value,
|
||||
Register target_map,
|
||||
AllocationSiteMode mode,
|
||||
Label* fail) {
|
||||
// Return address is on the stack.
|
||||
DCHECK(receiver.is(edx));
|
||||
DCHECK(key.is(ecx));
|
||||
DCHECK(value.is(eax));
|
||||
DCHECK(target_map.is(ebx));
|
||||
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map, success;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
|
||||
__ j(equal, &only_change_map);
|
||||
|
||||
__ push(esi);
|
||||
__ push(eax);
|
||||
__ push(edx);
|
||||
__ push(ebx);
|
||||
|
||||
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
|
||||
|
||||
// Allocate new FixedArray.
|
||||
// ebx: length of source FixedDoubleArray (smi-tagged)
|
||||
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
|
||||
__ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
|
||||
// eax: destination FixedArray
|
||||
// ebx: number of elements
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
|
||||
Immediate(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
|
||||
// Allocating heap numbers in the loop below can fail and cause a jump to
|
||||
// gc_required. We can't leave a partly initialized FixedArray behind,
|
||||
// so pessimistically fill it with holes now.
|
||||
Label initialization_loop, initialization_loop_entry;
|
||||
__ jmp(&initialization_loop_entry, Label::kNear);
|
||||
__ bind(&initialization_loop);
|
||||
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
|
||||
masm->isolate()->factory()->the_hole_value());
|
||||
__ bind(&initialization_loop_entry);
|
||||
__ sub(ebx, Immediate(Smi::FromInt(1)));
|
||||
__ j(not_sign, &initialization_loop);
|
||||
|
||||
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
|
||||
__ jmp(&entry);
|
||||
|
||||
// ebx: target map
|
||||
// edx: receiver
|
||||
// Set transitioned map.
|
||||
__ bind(&only_change_map);
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ jmp(&success);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ pop(ebx);
|
||||
__ pop(edx);
|
||||
__ pop(eax);
|
||||
__ pop(esi);
|
||||
__ jmp(fail);
|
||||
|
||||
// Box doubles into heap numbers.
|
||||
// edi: source FixedDoubleArray
|
||||
// eax: destination FixedArray
|
||||
__ bind(&loop);
|
||||
// ebx: index of current element (smi-tagged)
|
||||
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
|
||||
__ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
|
||||
__ j(equal, &convert_hole);
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
__ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
|
||||
// edx: new heap number
|
||||
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
|
||||
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
|
||||
__ mov(esi, FieldOperand(edi, ebx, times_4, offset));
|
||||
__ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
|
||||
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
|
||||
__ mov(esi, ebx);
|
||||
__ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ jmp(&entry, Label::kNear);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
|
||||
masm->isolate()->factory()->the_hole_value());
|
||||
|
||||
__ bind(&entry);
|
||||
__ sub(ebx, Immediate(Smi::FromInt(1)));
|
||||
__ j(not_sign, &loop);
|
||||
|
||||
__ pop(ebx);
|
||||
__ pop(edx);
|
||||
// ebx: target map
|
||||
// edx: receiver
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
|
||||
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
|
||||
__ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
|
||||
// Restore registers.
|
||||
__ pop(eax);
|
||||
__ pop(esi);
|
||||
|
||||
__ bind(&success);
|
||||
}
|
||||
|
||||
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Factory* factory,
|
||||
Register string,
|
||||
|
@ -701,65 +701,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
|
||||
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckFastObjectElements(Register map,
|
||||
Label* fail,
|
||||
Label::Distance distance) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
||||
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
j(below_equal, fail, distance);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleyElementValue));
|
||||
j(above, fail, distance);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckFastSmiElements(Register map,
|
||||
Label* fail,
|
||||
Label::Distance distance) {
|
||||
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
||||
Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
|
||||
j(above, fail, distance);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::StoreNumberToDoubleElements(
|
||||
Register maybe_number,
|
||||
Register elements,
|
||||
Register key,
|
||||
Register scratch,
|
||||
Label* fail,
|
||||
int elements_offset) {
|
||||
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
|
||||
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
|
||||
|
||||
CheckMap(maybe_number,
|
||||
isolate()->factory()->heap_number_map(),
|
||||
fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
|
||||
jmp(&done, Label::kNear);
|
||||
|
||||
bind(&smi_value);
|
||||
// Value is a smi. Convert to a double and store.
|
||||
// Preserve original value.
|
||||
mov(scratch, maybe_number);
|
||||
SmiUntag(scratch);
|
||||
push(scratch);
|
||||
fild_s(Operand(esp, 0));
|
||||
pop(scratch);
|
||||
bind(&done);
|
||||
fstp_d(FieldOperand(elements, key, times_4,
|
||||
FixedDoubleArray::kHeaderSize - elements_offset));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
|
||||
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
|
||||
}
|
||||
@ -2230,28 +2171,6 @@ void MacroAssembler::LoadGlobalProxy(Register dst) {
|
||||
mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match) {
|
||||
DCHECK(IsFastElementsKind(expected_kind));
|
||||
DCHECK(IsFastElementsKind(transitioned_kind));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
mov(scratch, NativeContextOperand());
|
||||
cmp(map_in_out,
|
||||
ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
|
||||
j(not_equal, no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
mov(map_in_out,
|
||||
ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
||||
// Load the native context from the current context.
|
||||
mov(function, NativeContextOperand());
|
||||
@ -3009,43 +2928,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
|
||||
cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
||||
Register object,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Factory* factory = isolate()->factory();
|
||||
Register current = scratch0;
|
||||
Label loop_again, end;
|
||||
|
||||
// scratch contained elements pointer.
|
||||
mov(current, object);
|
||||
mov(current, FieldOperand(current, HeapObject::kMapOffset));
|
||||
mov(current, FieldOperand(current, Map::kPrototypeOffset));
|
||||
cmp(current, Immediate(factory->null_value()));
|
||||
j(equal, &end);
|
||||
|
||||
// Loop based on the map going up the prototype chain.
|
||||
bind(&loop_again);
|
||||
mov(current, FieldOperand(current, HeapObject::kMapOffset));
|
||||
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
|
||||
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
|
||||
CmpInstanceType(current, JS_OBJECT_TYPE);
|
||||
j(below, found);
|
||||
mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
|
||||
DecodeField<Map::ElementsKindBits>(scratch1);
|
||||
cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
|
||||
j(equal, found);
|
||||
mov(current, FieldOperand(current, Map::kPrototypeOffset));
|
||||
cmp(current, Immediate(factory->null_value()));
|
||||
j(not_equal, &loop_again);
|
||||
|
||||
bind(&end);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
|
||||
DCHECK(!dividend.is(eax));
|
||||
DCHECK(!dividend.is(edx));
|
||||
|
@ -263,16 +263,6 @@ class MacroAssembler: public Assembler {
|
||||
// Load the global proxy from the current context.
|
||||
void LoadGlobalProxy(Register dst);
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the native context if the map in register
|
||||
// map_in_out is the cached Array map in the native context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
// Load the global function with the given index.
|
||||
void LoadGlobalFunction(int index, Register function);
|
||||
|
||||
@ -381,23 +371,6 @@ class MacroAssembler: public Assembler {
|
||||
// Compare instance type for map.
|
||||
void CmpInstanceType(Register map, InstanceType type);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object can have both smi
|
||||
// and HeapObject elements. Jump to the specified label if it does not.
|
||||
void CheckFastObjectElements(Register map, Label* fail,
|
||||
Label::Distance distance = Label::kFar);
|
||||
|
||||
// Check if a map for a JSObject indicates that the object has fast smi only
|
||||
// elements. Jump to the specified label if it does not.
|
||||
void CheckFastSmiElements(Register map, Label* fail,
|
||||
Label::Distance distance = Label::kFar);
|
||||
|
||||
// Check to see if maybe_number can be stored as a double in
|
||||
// FastDoubleElements. If it can, store it at the index specified by key in
|
||||
// the FastDoubleElements array elements, otherwise jump to fail.
|
||||
void StoreNumberToDoubleElements(Register maybe_number, Register elements,
|
||||
Register key, Register scratch, Label* fail,
|
||||
int offset = 0);
|
||||
|
||||
// Compare an object's map with the specified map.
|
||||
void CompareMap(Register obj, Handle<Map> map);
|
||||
|
||||
@ -922,20 +895,6 @@ class MacroAssembler: public Assembler {
|
||||
Register scratch_reg,
|
||||
Label* no_memento_found);
|
||||
|
||||
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* memento_found) {
|
||||
Label no_memento_found;
|
||||
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
|
||||
&no_memento_found);
|
||||
j(equal, memento_found);
|
||||
bind(&no_memento_found);
|
||||
}
|
||||
|
||||
// Jumps to found label if a prototype map has dictionary elements.
|
||||
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
||||
Register scratch1, Label* found);
|
||||
|
||||
private:
|
||||
bool generating_stub_;
|
||||
bool has_frame_;
|
||||
|
@ -21791,10 +21791,10 @@ void TestStubCache(bool primary) {
|
||||
// Enforce recompilation of IC stubs that access megamorphic stub cache
|
||||
// to respect enabled native code counters and stub cache test flags.
|
||||
i::CodeStub::Major code_stub_keys[] = {
|
||||
i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
|
||||
i::CodeStub::KeyedLoadICTF, i::CodeStub::KeyedLoadICTrampolineTF,
|
||||
i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
|
||||
i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
|
||||
i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
|
||||
i::CodeStub::KeyedLoadICTF, i::CodeStub::KeyedLoadICTrampolineTF,
|
||||
i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
|
||||
i::CodeStub::KeyedStoreICTF, i::CodeStub::KeyedStoreICTrampolineTF,
|
||||
};
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
i::Heap* heap = i_isolate->heap();
|
||||
@ -22718,10 +22718,10 @@ TEST(AccessCheckInIC) {
|
||||
// Enforce recompilation of IC stubs that access megamorphic stub cache
|
||||
// to respect enabled native code counters and stub cache test flags.
|
||||
i::CodeStub::Major code_stub_keys[] = {
|
||||
i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
|
||||
i::CodeStub::KeyedLoadICTF, i::CodeStub::KeyedLoadICTrampolineTF,
|
||||
i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
|
||||
i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
|
||||
i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
|
||||
i::CodeStub::KeyedLoadICTF, i::CodeStub::KeyedLoadICTrampolineTF,
|
||||
i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
|
||||
i::CodeStub::KeyedStoreICTF, i::CodeStub::KeyedStoreICTrampolineTF,
|
||||
};
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
i::Heap* heap = i_isolate->heap();
|
||||
|
Loading…
Reference in New Issue
Block a user