Reshuffle registers in JSConstructStub to avoid trashing costructor and new.target on fast path (so we don't need to push/pop them).
This CL also fixed register usages in MacroAssembler::Allocate() broken by 2fc2cb99
(r32144).
BUG=chromium:560239
LOG=Y
Review URL: https://codereview.chromium.org/1468073004
Cr-Commit-Position: refs/heads/master@{#32219}
This commit is contained in:
parent
e777784fb4
commit
0ef5ad5ab9
@ -369,9 +369,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ push(r0);
|
||||
|
||||
if (create_implicit_receiver) {
|
||||
__ push(r1);
|
||||
__ push(r3);
|
||||
|
||||
// Try to allocate the object without transitioning into C code. If any of
|
||||
// the preconditions is not met, the code bails out to the runtime call.
|
||||
Label rt_call, allocated;
|
||||
@ -399,6 +396,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// initial map's instance type would be JS_FUNCTION_TYPE.
|
||||
// r1: constructor function
|
||||
// r2: initial map
|
||||
// r3: new target
|
||||
__ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
|
||||
__ b(eq, &rt_call);
|
||||
|
||||
@ -407,22 +405,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
|
||||
// Check if slack tracking is enabled.
|
||||
__ ldr(r4, bit_field3);
|
||||
__ DecodeField<Map::Counter>(r3, r4);
|
||||
__ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ DecodeField<Map::Counter>(r0, r4);
|
||||
__ cmp(r0, Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ b(lt, &allocate);
|
||||
// Decrease generous allocation count.
|
||||
__ sub(r4, r4, Operand(1 << Map::Counter::kShift));
|
||||
__ str(r4, bit_field3);
|
||||
__ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ cmp(r0, Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ b(ne, &allocate);
|
||||
|
||||
__ Push(r1, r2);
|
||||
// Push the constructor, new_target and map to the stack, and
|
||||
// the map again as an argument to the runtime call.
|
||||
__ Push(r1, r3, r2);
|
||||
|
||||
__ push(r2); // r2 = intial map
|
||||
__ push(r2);
|
||||
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
|
||||
|
||||
__ pop(r2);
|
||||
__ pop(r1);
|
||||
__ Pop(r1, r3, r2);
|
||||
__ mov(r0, Operand(Map::kSlackTrackingCounterEnd - 1));
|
||||
|
||||
__ bind(&allocate);
|
||||
}
|
||||
@ -430,17 +430,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// Now allocate the JSObject on the heap.
|
||||
// r1: constructor function
|
||||
// r2: initial map
|
||||
Label rt_call_reload_new_target;
|
||||
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
|
||||
// r3: new target
|
||||
// r0: slack tracking counter (non-API function case)
|
||||
__ ldrb(r9, FieldMemOperand(r2, Map::kInstanceSizeOffset));
|
||||
|
||||
__ Allocate(r3, r4, r3, r6, &rt_call_reload_new_target, SIZE_IN_WORDS);
|
||||
__ Allocate(r9, r4, r9, r6, &rt_call, SIZE_IN_WORDS);
|
||||
|
||||
// Allocated the JSObject, now initialize the fields. Map is set to
|
||||
// initial map and properties and elements are set to empty fixed array.
|
||||
// r1: constructor function
|
||||
// r2: initial map
|
||||
// r3: new target
|
||||
// r4: JSObject (not tagged)
|
||||
// r3: start of next object
|
||||
// r9: start of next object
|
||||
// r0: slack tracking counter (non-API function case)
|
||||
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ mov(r5, r4);
|
||||
STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
|
||||
@ -449,56 +452,49 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
|
||||
STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
|
||||
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
|
||||
STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
|
||||
|
||||
// Fill all the in-object properties with the appropriate filler.
|
||||
// r1: constructor function
|
||||
// r2: initial map
|
||||
// r4: JSObject (not tagged)
|
||||
// r3: start of next object
|
||||
// r5: First in-object property of JSObject (not tagged)
|
||||
DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
|
||||
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
|
||||
|
||||
if (!is_api_function) {
|
||||
Label no_inobject_slack_tracking;
|
||||
|
||||
// Check if slack tracking is enabled.
|
||||
__ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset));
|
||||
__ DecodeField<Map::Counter>(ip);
|
||||
__ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ cmp(r0, Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ b(lt, &no_inobject_slack_tracking);
|
||||
|
||||
// Allocate object with a slack.
|
||||
__ ldr(r2, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
|
||||
__ Ubfx(r2, r2, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
|
||||
kBitsPerByte);
|
||||
__ sub(r0, r3, Operand(r2, LSL, kPointerSizeLog2));
|
||||
__ sub(r0, r9, Operand(r2, LSL, kPointerSizeLog2));
|
||||
// r0: offset of first field after pre-allocated fields
|
||||
if (FLAG_debug_code) {
|
||||
__ cmp(r5, r0);
|
||||
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
|
||||
}
|
||||
__ InitializeFieldsWithFiller(r5, r0, r6);
|
||||
// To allow for truncation.
|
||||
|
||||
// To allow truncation fill the remaining fields with one pointer
|
||||
// filler map.
|
||||
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
|
||||
// Fill the remaining fields with one pointer filler map.
|
||||
|
||||
__ bind(&no_inobject_slack_tracking);
|
||||
}
|
||||
|
||||
__ InitializeFieldsWithFiller(r5, r3, r6);
|
||||
__ InitializeFieldsWithFiller(r5, r9, r6);
|
||||
|
||||
// Add the object tag to make the JSObject real, so that we can continue
|
||||
// and jump into the continuation code at any time from now on.
|
||||
__ add(r4, r4, Operand(kHeapObjectTag));
|
||||
|
||||
// Continue with JSObject being successfully allocated
|
||||
// r1: constructor function
|
||||
// r3: new target
|
||||
// r4: JSObject
|
||||
__ jmp(&allocated);
|
||||
|
||||
// Reload the new target and fall-through.
|
||||
__ bind(&rt_call_reload_new_target);
|
||||
__ ldr(r3, MemOperand(sp, 0 * kPointerSize));
|
||||
}
|
||||
|
||||
// Allocate the new receiver object using the runtime call.
|
||||
@ -506,19 +502,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r3: new target
|
||||
__ bind(&rt_call);
|
||||
|
||||
__ push(r1); // constructor function
|
||||
__ push(r3); // new target
|
||||
// Push the constructor and new_target twice, second pair as arguments
|
||||
// to the runtime call.
|
||||
__ Push(r1, r3);
|
||||
__ Push(r1, r3); // constructor function, new target
|
||||
__ CallRuntime(Runtime::kNewObject, 2);
|
||||
__ mov(r4, r0);
|
||||
__ Pop(r1, r3);
|
||||
|
||||
// Receiver for constructor call allocated.
|
||||
// r1: constructor function
|
||||
// r3: new target
|
||||
// r4: JSObject
|
||||
__ bind(&allocated);
|
||||
|
||||
// Restore the parameters.
|
||||
__ pop(r3);
|
||||
__ pop(r1);
|
||||
|
||||
// Retrieve smi-tagged arguments count from the stack.
|
||||
__ ldr(r0, MemOperand(sp));
|
||||
}
|
||||
|
@ -1590,7 +1590,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
||||
__ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
|
||||
|
||||
// Do the allocation of all three objects in one go.
|
||||
__ Allocate(r9, r0, r4, r9, &runtime, TAG_OBJECT);
|
||||
__ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
|
||||
|
||||
// r0 = address of new object(s) (tagged)
|
||||
// r2 = argument count (smi-tagged)
|
||||
|
@ -1661,11 +1661,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
return;
|
||||
}
|
||||
|
||||
DCHECK(!result.is(scratch1));
|
||||
DCHECK(!result.is(scratch2));
|
||||
DCHECK(!scratch1.is(scratch2));
|
||||
DCHECK(!scratch1.is(ip));
|
||||
DCHECK(!scratch2.is(ip));
|
||||
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
|
||||
|
||||
// Make object size into bytes.
|
||||
if ((flags & SIZE_IN_WORDS) != 0) {
|
||||
@ -1682,48 +1678,46 @@ void MacroAssembler::Allocate(int object_size,
|
||||
ExternalReference allocation_limit =
|
||||
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
||||
|
||||
intptr_t top =
|
||||
reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit =
|
||||
reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
DCHECK(result.code() < ip.code());
|
||||
|
||||
// Set up allocation top address register.
|
||||
Register topaddr = scratch1;
|
||||
mov(topaddr, Operand(allocation_top));
|
||||
|
||||
Register top_address = scratch1;
|
||||
// This code stores a temporary value in ip. This is OK, as the code below
|
||||
// does not need ip for implicit literal generation.
|
||||
Register alloc_limit = ip;
|
||||
Register result_end = scratch2;
|
||||
mov(top_address, Operand(allocation_top));
|
||||
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and allocation limit into ip.
|
||||
ldm(ia, topaddr, result.bit() | ip.bit());
|
||||
// Load allocation top into result and allocation limit into alloc_limit.
|
||||
ldm(ia, top_address, result.bit() | alloc_limit.bit());
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry. ip is used
|
||||
// immediately below so this use of ip does not cause difference with
|
||||
// respect to register content between debug and release mode.
|
||||
ldr(ip, MemOperand(topaddr));
|
||||
cmp(result, ip);
|
||||
// Assert that result actually contains top on entry.
|
||||
ldr(alloc_limit, MemOperand(top_address));
|
||||
cmp(result, alloc_limit);
|
||||
Check(eq, kUnexpectedAllocationTop);
|
||||
}
|
||||
// Load allocation limit into ip. Result already contains allocation top.
|
||||
ldr(ip, MemOperand(topaddr, limit - top));
|
||||
// Load allocation limit. Result already contains allocation top.
|
||||
ldr(alloc_limit, MemOperand(top_address, limit - top));
|
||||
}
|
||||
|
||||
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
||||
// Align the next allocation. Storing the filler map without checking top is
|
||||
// safe in new-space because the limit of the heap is aligned there.
|
||||
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
|
||||
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
|
||||
and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
|
||||
Label aligned;
|
||||
b(eq, &aligned);
|
||||
if ((flags & PRETENURE) != 0) {
|
||||
cmp(result, Operand(ip));
|
||||
cmp(result, Operand(alloc_limit));
|
||||
b(hs, gc_required);
|
||||
}
|
||||
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
|
||||
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
|
||||
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
|
||||
str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
|
||||
bind(&aligned);
|
||||
}
|
||||
|
||||
@ -1743,15 +1737,15 @@ void MacroAssembler::Allocate(int object_size,
|
||||
shift += 8;
|
||||
Operand bits_operand(bits);
|
||||
DCHECK(bits_operand.instructions_required(this) == 1);
|
||||
add(scratch2, source, bits_operand, SetCC, cond);
|
||||
source = scratch2;
|
||||
add(result_end, source, bits_operand, SetCC, cond);
|
||||
source = result_end;
|
||||
cond = cc;
|
||||
}
|
||||
}
|
||||
b(cs, gc_required);
|
||||
cmp(scratch2, Operand(ip));
|
||||
cmp(result_end, Operand(alloc_limit));
|
||||
b(hi, gc_required);
|
||||
str(scratch2, MemOperand(topaddr));
|
||||
str(result_end, MemOperand(top_address));
|
||||
|
||||
// Tag object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
@ -1774,15 +1768,11 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
return;
|
||||
}
|
||||
|
||||
// Assert that the register arguments are different and that none of
|
||||
// them are ip. ip is used explicitly in the code generated below.
|
||||
DCHECK(!result.is(scratch));
|
||||
DCHECK(!result.is(result_end));
|
||||
DCHECK(!scratch.is(result_end));
|
||||
DCHECK(!object_size.is(ip));
|
||||
DCHECK(!result.is(ip));
|
||||
DCHECK(!scratch.is(ip));
|
||||
DCHECK(!result_end.is(ip));
|
||||
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
|
||||
// is not specified. Other registers must not overlap.
|
||||
DCHECK(!AreAliased(object_size, result, scratch, ip));
|
||||
DCHECK(!AreAliased(result_end, result, scratch, ip));
|
||||
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
|
||||
|
||||
// Check relative positions of allocation top and limit addresses.
|
||||
// The values must be adjacent in memory to allow the use of LDM.
|
||||
@ -1792,33 +1782,30 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
AllocationUtils::GetAllocationTopReference(isolate(), flags);
|
||||
ExternalReference allocation_limit =
|
||||
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
||||
intptr_t top =
|
||||
reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit =
|
||||
reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
DCHECK(result.code() < ip.code());
|
||||
|
||||
// Set up allocation top address.
|
||||
Register topaddr = scratch;
|
||||
mov(topaddr, Operand(allocation_top));
|
||||
|
||||
// Set up allocation top address and allocation limit registers.
|
||||
Register top_address = scratch;
|
||||
// This code stores a temporary value in ip. This is OK, as the code below
|
||||
// does not need ip for implicit literal generation.
|
||||
Register alloc_limit = ip;
|
||||
mov(top_address, Operand(allocation_top));
|
||||
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and allocation limit into ip.
|
||||
ldm(ia, topaddr, result.bit() | ip.bit());
|
||||
// Load allocation top into result and allocation limit into alloc_limit.
|
||||
ldm(ia, top_address, result.bit() | alloc_limit.bit());
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry. ip is used
|
||||
// immediately below so this use of ip does not cause difference with
|
||||
// respect to register content between debug and release mode.
|
||||
ldr(ip, MemOperand(topaddr));
|
||||
cmp(result, ip);
|
||||
// Assert that result actually contains top on entry.
|
||||
ldr(alloc_limit, MemOperand(top_address));
|
||||
cmp(result, alloc_limit);
|
||||
Check(eq, kUnexpectedAllocationTop);
|
||||
}
|
||||
// Load allocation limit into ip. Result already contains allocation top.
|
||||
ldr(ip, MemOperand(topaddr, limit - top));
|
||||
// Load allocation limit. Result already contains allocation top.
|
||||
ldr(alloc_limit, MemOperand(top_address, limit - top));
|
||||
}
|
||||
|
||||
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
||||
@ -1829,7 +1816,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
Label aligned;
|
||||
b(eq, &aligned);
|
||||
if ((flags & PRETENURE) != 0) {
|
||||
cmp(result, Operand(ip));
|
||||
cmp(result, Operand(alloc_limit));
|
||||
b(hs, gc_required);
|
||||
}
|
||||
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
|
||||
@ -1846,7 +1833,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
add(result_end, result, Operand(object_size), SetCC);
|
||||
}
|
||||
b(cs, gc_required);
|
||||
cmp(result_end, Operand(ip));
|
||||
cmp(result_end, Operand(alloc_limit));
|
||||
b(hi, gc_required);
|
||||
|
||||
// Update allocation top. result temporarily holds the new top.
|
||||
@ -1854,7 +1841,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
tst(result_end, Operand(kObjectAlignmentMask));
|
||||
Check(eq, kUnalignedAllocationInNewSpace);
|
||||
}
|
||||
str(result_end, MemOperand(topaddr));
|
||||
str(result_end, MemOperand(top_address));
|
||||
|
||||
// Tag object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
|
@ -366,11 +366,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// Preserve the incoming parameters on the stack.
|
||||
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
|
||||
__ SmiTag(argc);
|
||||
if (create_implicit_receiver) {
|
||||
__ Push(allocation_site, argc, constructor, new_target);
|
||||
} else {
|
||||
__ Push(allocation_site, argc);
|
||||
}
|
||||
__ Push(allocation_site, argc);
|
||||
|
||||
if (create_implicit_receiver) {
|
||||
// sp[0]: new.target
|
||||
@ -423,23 +419,21 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ B(ne, &allocate);
|
||||
|
||||
// Push the constructor and map to the stack, and the map again
|
||||
// as argument to the runtime call.
|
||||
__ Push(constructor, init_map, init_map);
|
||||
// Push the constructor, new_target and map to the stack, and
|
||||
// the map again as an argument to the runtime call.
|
||||
__ Push(constructor, new_target, init_map, init_map);
|
||||
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
|
||||
__ Pop(init_map, constructor);
|
||||
__ Pop(init_map, new_target, constructor);
|
||||
__ Mov(constructon_count, Operand(Map::kSlackTrackingCounterEnd - 1));
|
||||
__ Bind(&allocate);
|
||||
}
|
||||
|
||||
// Now allocate the JSObject on the heap.
|
||||
Label rt_call_reload_new_target;
|
||||
Register obj_size = x3;
|
||||
Register obj_size = x10;
|
||||
Register new_obj = x4;
|
||||
Register next_obj = x10;
|
||||
Register next_obj = obj_size; // May overlap.
|
||||
__ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
|
||||
__ Allocate(obj_size, new_obj, next_obj, x11,
|
||||
&rt_call_reload_new_target, SIZE_IN_WORDS);
|
||||
__ Allocate(obj_size, new_obj, next_obj, x11, &rt_call, SIZE_IN_WORDS);
|
||||
|
||||
// Allocated the JSObject, now initialize the fields. Map is set to
|
||||
// initial map and properties and elements are set to empty fixed array.
|
||||
@ -454,6 +448,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
|
||||
__ Stp(empty, empty,
|
||||
MemOperand(write_address, 2 * kPointerSize, PostIndex));
|
||||
STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
|
||||
|
||||
// Fill all of the in-object properties with the appropriate filler.
|
||||
Register filler = x7;
|
||||
@ -503,28 +498,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
|
||||
// Continue with JSObject being successfully allocated.
|
||||
__ B(&allocated);
|
||||
|
||||
// Reload the new target and fall-through.
|
||||
__ Bind(&rt_call_reload_new_target);
|
||||
__ Peek(x3, 0 * kXRegSize);
|
||||
}
|
||||
|
||||
// Allocate the new receiver object using the runtime call.
|
||||
// x1: constructor function
|
||||
// x3: new target
|
||||
__ Bind(&rt_call);
|
||||
__ Push(constructor, new_target); // arguments 1-2
|
||||
|
||||
// Push the constructor and new_target twice, second pair as arguments
|
||||
// to the runtime call.
|
||||
__ Push(constructor, new_target, constructor, new_target);
|
||||
__ CallRuntime(Runtime::kNewObject, 2);
|
||||
__ Mov(x4, x0);
|
||||
__ Pop(new_target, constructor);
|
||||
|
||||
// Receiver for constructor call allocated.
|
||||
// x1: constructor function
|
||||
// x3: new target
|
||||
// x4: JSObject
|
||||
__ Bind(&allocated);
|
||||
|
||||
// Restore the parameters.
|
||||
__ Pop(new_target);
|
||||
__ Pop(constructor);
|
||||
|
||||
// Reload the number of arguments from the stack.
|
||||
// Set it up in x0 for the function call below.
|
||||
// jssp[0]: number of arguments (smi-tagged)
|
||||
|
@ -3081,23 +3081,24 @@ void MacroAssembler::Allocate(int object_size,
|
||||
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
|
||||
// Set up allocation top address and object size registers.
|
||||
// Set up allocation top address and allocation limit registers.
|
||||
Register top_address = scratch1;
|
||||
Register allocation_limit = scratch2;
|
||||
Register alloc_limit = scratch2;
|
||||
Register result_end = scratch3;
|
||||
Mov(top_address, Operand(heap_allocation_top));
|
||||
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and the allocation limit.
|
||||
Ldp(result, allocation_limit, MemOperand(top_address));
|
||||
// Load allocation top into result and allocation limit into alloc_limit.
|
||||
Ldp(result, alloc_limit, MemOperand(top_address));
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry.
|
||||
Ldr(scratch3, MemOperand(top_address));
|
||||
Cmp(result, scratch3);
|
||||
Ldr(alloc_limit, MemOperand(top_address));
|
||||
Cmp(result, alloc_limit);
|
||||
Check(eq, kUnexpectedAllocationTop);
|
||||
}
|
||||
// Load the allocation limit. 'result' already contains the allocation top.
|
||||
Ldr(allocation_limit, MemOperand(top_address, limit - top));
|
||||
// Load allocation limit. Result already contains allocation top.
|
||||
Ldr(alloc_limit, MemOperand(top_address, limit - top));
|
||||
}
|
||||
|
||||
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
|
||||
@ -3105,10 +3106,10 @@ void MacroAssembler::Allocate(int object_size,
|
||||
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
|
||||
|
||||
// Calculate new top and bail out if new space is exhausted.
|
||||
Adds(scratch3, result, object_size);
|
||||
Ccmp(scratch3, allocation_limit, CFlag, cc);
|
||||
Adds(result_end, result, object_size);
|
||||
Ccmp(result_end, alloc_limit, CFlag, cc);
|
||||
B(hi, gc_required);
|
||||
Str(scratch3, MemOperand(top_address));
|
||||
Str(result_end, MemOperand(top_address));
|
||||
|
||||
// Tag the object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
@ -3135,7 +3136,9 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch2 = temps.AcquireX();
|
||||
|
||||
DCHECK(!AreAliased(object_size, result, scratch, scratch2, result_end));
|
||||
// |object_size| and |result_end| may overlap, other registers must not.
|
||||
DCHECK(!AreAliased(object_size, result, scratch, scratch2));
|
||||
DCHECK(!AreAliased(result_end, result, scratch, scratch2));
|
||||
DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
|
||||
result_end.Is64Bits());
|
||||
|
||||
@ -3149,23 +3152,23 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
|
||||
// Set up allocation top address and object size registers.
|
||||
// Set up allocation top address and allocation limit registers.
|
||||
Register top_address = scratch;
|
||||
Register allocation_limit = scratch2;
|
||||
Register alloc_limit = scratch2;
|
||||
Mov(top_address, heap_allocation_top);
|
||||
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and the allocation limit.
|
||||
Ldp(result, allocation_limit, MemOperand(top_address));
|
||||
// Load allocation top into result and allocation limit into alloc_limit.
|
||||
Ldp(result, alloc_limit, MemOperand(top_address));
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry.
|
||||
Ldr(result_end, MemOperand(top_address));
|
||||
Cmp(result, result_end);
|
||||
Ldr(alloc_limit, MemOperand(top_address));
|
||||
Cmp(result, alloc_limit);
|
||||
Check(eq, kUnexpectedAllocationTop);
|
||||
}
|
||||
// Load the allocation limit. 'result' already contains the allocation top.
|
||||
Ldr(allocation_limit, MemOperand(top_address, limit - top));
|
||||
// Load allocation limit. Result already contains allocation top.
|
||||
Ldr(alloc_limit, MemOperand(top_address, limit - top));
|
||||
}
|
||||
|
||||
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
|
||||
@ -3184,7 +3187,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
Check(eq, kUnalignedAllocationInNewSpace);
|
||||
}
|
||||
|
||||
Ccmp(result_end, allocation_limit, CFlag, cc);
|
||||
Ccmp(result_end, alloc_limit, CFlag, cc);
|
||||
B(hi, gc_required);
|
||||
Str(result_end, MemOperand(top_address));
|
||||
|
||||
|
@ -258,8 +258,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
kUnexpectedNumberOfPreAllocatedPropertyFields);
|
||||
}
|
||||
__ InitializeFieldsWithFiller(ecx, esi, edx);
|
||||
|
||||
// To allow truncation fill the remaining fields with one pointer
|
||||
// filler map.
|
||||
__ mov(edx, factory->one_pointer_filler_map());
|
||||
// Fill the remaining fields with one pointer filler map.
|
||||
|
||||
__ bind(&no_inobject_slack_tracking);
|
||||
}
|
||||
|
@ -376,11 +376,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// Preserve the incoming parameters on the stack.
|
||||
__ AssertUndefinedOrAllocationSite(a2, t0);
|
||||
__ SmiTag(a0);
|
||||
if (create_implicit_receiver) {
|
||||
__ Push(a2, a0, a1, a3);
|
||||
} else {
|
||||
__ Push(a2, a0);
|
||||
}
|
||||
__ Push(a2, a0);
|
||||
|
||||
if (create_implicit_receiver) {
|
||||
// Try to allocate the object without transitioning into C code. If any of
|
||||
@ -425,10 +421,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ sw(t0, bit_field3); // In delay slot.
|
||||
|
||||
__ Push(a1, a2, a2); // a2 = Initial map.
|
||||
// Push the constructor, new_target and map to the stack, and
|
||||
// the map again as an argument to the runtime call.
|
||||
__ Push(a1, a3, a2, a2);
|
||||
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
|
||||
|
||||
__ Pop(a1, a2);
|
||||
__ Pop(a1, a3, a2);
|
||||
__ li(t2, Operand(Map::kSlackTrackingCounterEnd - 1));
|
||||
|
||||
__ bind(&allocate);
|
||||
@ -437,18 +435,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// Now allocate the JSObject on the heap.
|
||||
// a1: constructor function
|
||||
// a2: initial map
|
||||
Label rt_call_reload_new_target;
|
||||
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
|
||||
// a3: new target
|
||||
// t2: slack tracking counter (non-API function case)
|
||||
__ lbu(t3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
|
||||
|
||||
__ Allocate(a3, t4, t3, t6, &rt_call_reload_new_target, SIZE_IN_WORDS);
|
||||
__ Allocate(t3, t4, t3, t6, &rt_call, SIZE_IN_WORDS);
|
||||
|
||||
// Allocated the JSObject, now initialize the fields. Map is set to
|
||||
// initial map and properties and elements are set to empty fixed array.
|
||||
// a1: constructor function
|
||||
// a2: initial map
|
||||
// a3: object size
|
||||
// a3: new target
|
||||
// t4: JSObject (not tagged)
|
||||
// t3: start of next object
|
||||
// t2: slack tracking counter (non-API function case)
|
||||
__ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ mov(t5, t4);
|
||||
STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
|
||||
@ -457,16 +457,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
|
||||
STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
|
||||
__ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
|
||||
STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
|
||||
__ Addu(t5, t5, Operand(3 * kPointerSize));
|
||||
|
||||
// Fill all the in-object properties with appropriate filler.
|
||||
// a1: constructor function
|
||||
// a2: initial map
|
||||
// a3: object size (in words)
|
||||
// t4: JSObject (not tagged)
|
||||
// t5: First in-object property of JSObject (not tagged)
|
||||
// t2: slack tracking counter (non-API function case)
|
||||
DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
|
||||
|
||||
// Use t7 to hold undefined, which is used in several places below.
|
||||
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
|
||||
@ -488,9 +483,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
Operand(a0));
|
||||
}
|
||||
__ InitializeFieldsWithFiller(t5, a0, t7);
|
||||
// To allow for truncation.
|
||||
|
||||
// To allow truncation fill the remaining fields with one pointer
|
||||
// filler map.
|
||||
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
|
||||
// Fill the remaining fields with one pointer filler map.
|
||||
|
||||
__ bind(&no_inobject_slack_tracking);
|
||||
}
|
||||
@ -502,12 +498,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ Addu(t4, t4, Operand(kHeapObjectTag));
|
||||
|
||||
// Continue with JSObject being successfully allocated.
|
||||
// a1: constructor function
|
||||
// a3: new target
|
||||
// t4: JSObject
|
||||
__ jmp(&allocated);
|
||||
|
||||
// Reload the new target and fall-through.
|
||||
__ bind(&rt_call_reload_new_target);
|
||||
__ lw(a3, MemOperand(sp, 0 * kPointerSize));
|
||||
}
|
||||
|
||||
// Allocate the new receiver object using the runtime call.
|
||||
@ -515,18 +509,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// a3: new target
|
||||
__ bind(&rt_call);
|
||||
|
||||
__ Push(a1, a3); // constructor function, new target
|
||||
// Push the constructor and new_target twice, second pair as arguments
|
||||
// to the runtime call.
|
||||
__ Push(a1, a3, a1, a3); // constructor function, new target
|
||||
__ CallRuntime(Runtime::kNewObject, 2);
|
||||
__ mov(t4, v0);
|
||||
__ Pop(a1, a3);
|
||||
|
||||
// Receiver for constructor call allocated.
|
||||
// a1: constructor function
|
||||
// a3: new target
|
||||
// t4: JSObject
|
||||
__ bind(&allocated);
|
||||
|
||||
// Restore the parameters.
|
||||
__ Pop(a3); // new.target
|
||||
__ Pop(a1);
|
||||
|
||||
// Retrieve smi-tagged arguments count from the stack.
|
||||
__ lw(a0, MemOperand(sp));
|
||||
}
|
||||
|
@ -1688,7 +1688,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
||||
__ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
|
||||
|
||||
// Do the allocation of all three objects in one go.
|
||||
__ Allocate(t5, v0, t0, t5, &runtime, TAG_OBJECT);
|
||||
__ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
|
||||
|
||||
// v0 = address of new object(s) (tagged)
|
||||
// a2 = argument count (smi-tagged)
|
||||
|
@ -3270,12 +3270,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
return;
|
||||
}
|
||||
|
||||
DCHECK(!result.is(scratch1));
|
||||
DCHECK(!result.is(scratch2));
|
||||
DCHECK(!scratch1.is(scratch2));
|
||||
DCHECK(!scratch1.is(t9));
|
||||
DCHECK(!scratch2.is(t9));
|
||||
DCHECK(!result.is(t9));
|
||||
DCHECK(!AreAliased(result, scratch1, scratch2, t9));
|
||||
|
||||
// Make object size into bytes.
|
||||
if ((flags & SIZE_IN_WORDS) != 0) {
|
||||
@ -3291,114 +3286,29 @@ void MacroAssembler::Allocate(int object_size,
|
||||
ExternalReference allocation_limit =
|
||||
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
||||
|
||||
intptr_t top =
|
||||
reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit =
|
||||
reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
|
||||
// Set up allocation top address and object size registers.
|
||||
Register topaddr = scratch1;
|
||||
li(topaddr, Operand(allocation_top));
|
||||
|
||||
// Set up allocation top address and allocation limit registers.
|
||||
Register top_address = scratch1;
|
||||
// This code stores a temporary value in t9.
|
||||
Register alloc_limit = t9;
|
||||
Register result_end = scratch2;
|
||||
li(top_address, Operand(allocation_top));
|
||||
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and allocation limit into t9.
|
||||
lw(result, MemOperand(topaddr));
|
||||
lw(t9, MemOperand(topaddr, kPointerSize));
|
||||
// Load allocation top into result and allocation limit into alloc_limit.
|
||||
lw(result, MemOperand(top_address));
|
||||
lw(alloc_limit, MemOperand(top_address, kPointerSize));
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry. t9 is used
|
||||
// immediately below so this use of t9 does not cause difference with
|
||||
// respect to register content between debug and release mode.
|
||||
lw(t9, MemOperand(topaddr));
|
||||
Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
|
||||
// Assert that result actually contains top on entry.
|
||||
lw(alloc_limit, MemOperand(top_address));
|
||||
Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
|
||||
}
|
||||
// Load allocation limit into t9. Result already contains allocation top.
|
||||
lw(t9, MemOperand(topaddr, limit - top));
|
||||
}
|
||||
|
||||
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
||||
// Align the next allocation. Storing the filler map without checking top is
|
||||
// safe in new-space because the limit of the heap is aligned there.
|
||||
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
|
||||
And(scratch2, result, Operand(kDoubleAlignmentMask));
|
||||
Label aligned;
|
||||
Branch(&aligned, eq, scratch2, Operand(zero_reg));
|
||||
if ((flags & PRETENURE) != 0) {
|
||||
Branch(gc_required, Ugreater_equal, result, Operand(t9));
|
||||
}
|
||||
li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
|
||||
sw(scratch2, MemOperand(result));
|
||||
Addu(result, result, Operand(kDoubleSize / 2));
|
||||
bind(&aligned);
|
||||
}
|
||||
|
||||
// Calculate new top and bail out if new space is exhausted. Use result
|
||||
// to calculate the new top.
|
||||
Addu(scratch2, result, Operand(object_size));
|
||||
Branch(gc_required, Ugreater, scratch2, Operand(t9));
|
||||
sw(scratch2, MemOperand(topaddr));
|
||||
|
||||
// Tag object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
Addu(result, result, Operand(kHeapObjectTag));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
Register result_end, Register scratch1,
|
||||
Label* gc_required, AllocationFlags flags) {
|
||||
if (!FLAG_inline_new) {
|
||||
if (emit_debug_code()) {
|
||||
// Trash the registers to simulate an allocation failure.
|
||||
li(result, 0x7091);
|
||||
li(scratch1, 0x7191);
|
||||
li(result_end, 0x7291);
|
||||
}
|
||||
jmp(gc_required);
|
||||
return;
|
||||
}
|
||||
|
||||
DCHECK(!result.is(scratch1));
|
||||
DCHECK(!result.is(result_end));
|
||||
DCHECK(!scratch1.is(result_end));
|
||||
DCHECK(!object_size.is(t9));
|
||||
DCHECK(!scratch1.is(t9) && !result_end.is(t9) && !result.is(t9));
|
||||
|
||||
// Check relative positions of allocation top and limit addresses.
|
||||
// ARM adds additional checks to make sure the ldm instruction can be
|
||||
// used. On MIPS we don't have ldm so we don't need additional checks either.
|
||||
ExternalReference allocation_top =
|
||||
AllocationUtils::GetAllocationTopReference(isolate(), flags);
|
||||
ExternalReference allocation_limit =
|
||||
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
||||
intptr_t top =
|
||||
reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit =
|
||||
reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
|
||||
// Set up allocation top address and object size registers.
|
||||
Register topaddr = scratch1;
|
||||
li(topaddr, Operand(allocation_top));
|
||||
|
||||
// This code stores a temporary value in t9.
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and allocation limit into t9.
|
||||
lw(result, MemOperand(topaddr));
|
||||
lw(t9, MemOperand(topaddr, kPointerSize));
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry. t9 is used
|
||||
// immediately below so this use of t9 does not cause difference with
|
||||
// respect to register content between debug and release mode.
|
||||
lw(t9, MemOperand(topaddr));
|
||||
Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
|
||||
}
|
||||
// Load allocation limit into t9. Result already contains allocation top.
|
||||
lw(t9, MemOperand(topaddr, limit - top));
|
||||
// Load allocation limit. Result already contains allocation top.
|
||||
lw(alloc_limit, MemOperand(top_address, limit - top));
|
||||
}
|
||||
|
||||
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
||||
@ -3409,7 +3319,87 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
Label aligned;
|
||||
Branch(&aligned, eq, result_end, Operand(zero_reg));
|
||||
if ((flags & PRETENURE) != 0) {
|
||||
Branch(gc_required, Ugreater_equal, result, Operand(t9));
|
||||
Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
|
||||
}
|
||||
li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
|
||||
sw(result_end, MemOperand(result));
|
||||
Addu(result, result, Operand(kDoubleSize / 2));
|
||||
bind(&aligned);
|
||||
}
|
||||
|
||||
// Calculate new top and bail out if new space is exhausted. Use result
|
||||
// to calculate the new top.
|
||||
Addu(result_end, result, Operand(object_size));
|
||||
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
|
||||
sw(result_end, MemOperand(top_address));
|
||||
|
||||
// Tag object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
Addu(result, result, Operand(kHeapObjectTag));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
Register result_end, Register scratch,
|
||||
Label* gc_required, AllocationFlags flags) {
|
||||
if (!FLAG_inline_new) {
|
||||
if (emit_debug_code()) {
|
||||
// Trash the registers to simulate an allocation failure.
|
||||
li(result, 0x7091);
|
||||
li(scratch, 0x7191);
|
||||
li(result_end, 0x7291);
|
||||
}
|
||||
jmp(gc_required);
|
||||
return;
|
||||
}
|
||||
|
||||
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
|
||||
// is not specified. Other registers must not overlap.
|
||||
DCHECK(!AreAliased(object_size, result, scratch, t9));
|
||||
DCHECK(!AreAliased(result_end, result, scratch, t9));
|
||||
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
|
||||
|
||||
// Check relative positions of allocation top and limit addresses.
|
||||
// ARM adds additional checks to make sure the ldm instruction can be
|
||||
// used. On MIPS we don't have ldm so we don't need additional checks either.
|
||||
ExternalReference allocation_top =
|
||||
AllocationUtils::GetAllocationTopReference(isolate(), flags);
|
||||
ExternalReference allocation_limit =
|
||||
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
||||
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
|
||||
// Set up allocation top address and allocation limit registers.
|
||||
Register top_address = scratch;
|
||||
// This code stores a temporary value in t9.
|
||||
Register alloc_limit = t9;
|
||||
li(top_address, Operand(allocation_top));
|
||||
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and allocation limit into alloc_limit.
|
||||
lw(result, MemOperand(top_address));
|
||||
lw(alloc_limit, MemOperand(top_address, kPointerSize));
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry.
|
||||
lw(alloc_limit, MemOperand(top_address));
|
||||
Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
|
||||
}
|
||||
// Load allocation limit. Result already contains allocation top.
|
||||
lw(alloc_limit, MemOperand(top_address, limit - top));
|
||||
}
|
||||
|
||||
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
||||
// Align the next allocation. Storing the filler map without checking top is
|
||||
// safe in new-space because the limit of the heap is aligned there.
|
||||
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
|
||||
And(result_end, result, Operand(kDoubleAlignmentMask));
|
||||
Label aligned;
|
||||
Branch(&aligned, eq, result_end, Operand(zero_reg));
|
||||
if ((flags & PRETENURE) != 0) {
|
||||
Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
|
||||
}
|
||||
li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
|
||||
sw(result_end, MemOperand(result));
|
||||
@ -3426,14 +3416,14 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
} else {
|
||||
Addu(result_end, result, Operand(object_size));
|
||||
}
|
||||
Branch(gc_required, Ugreater, result_end, Operand(t9));
|
||||
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
|
||||
|
||||
// Update allocation top. result temporarily holds the new top.
|
||||
if (emit_debug_code()) {
|
||||
And(t9, result_end, Operand(kObjectAlignmentMask));
|
||||
Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
|
||||
And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
|
||||
Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
|
||||
}
|
||||
sw(result_end, MemOperand(topaddr));
|
||||
sw(result_end, MemOperand(top_address));
|
||||
|
||||
// Tag object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
|
@ -373,11 +373,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// Preserve the incoming parameters on the stack.
|
||||
__ AssertUndefinedOrAllocationSite(a2, t0);
|
||||
__ SmiTag(a0);
|
||||
if (create_implicit_receiver) {
|
||||
__ Push(a2, a0, a1, a3);
|
||||
} else {
|
||||
__ Push(a2, a0);
|
||||
}
|
||||
__ Push(a2, a0);
|
||||
|
||||
if (create_implicit_receiver) {
|
||||
// Try to allocate the object without transitioning into C code. If any of
|
||||
@ -424,10 +420,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
Operand(Map::kSlackTrackingCounterEnd));
|
||||
__ sw(a4, bit_field3); // In delay slot.
|
||||
|
||||
__ Push(a1, a2, a2); // a2 = Initial map.
|
||||
// Push the constructor, new_target and map to the stack, and
|
||||
// the map again as an argument to the runtime call.
|
||||
__ Push(a1, a3, a2, a2);
|
||||
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
|
||||
|
||||
__ Pop(a1, a2);
|
||||
__ Pop(a1, a3, a2);
|
||||
__ li(a6, Operand(Map::kSlackTrackingCounterEnd - 1));
|
||||
|
||||
__ bind(&allocate);
|
||||
@ -436,10 +434,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// Now allocate the JSObject on the heap.
|
||||
// a1: constructor function
|
||||
// a2: initial map
|
||||
Label rt_call_reload_new_target;
|
||||
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
|
||||
|
||||
__ Allocate(a3, t0, a4, t2, &rt_call_reload_new_target, SIZE_IN_WORDS);
|
||||
// a6: slack tracking counter (non-API function case)
|
||||
__ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
|
||||
__ Allocate(a4, t0, a4, t2, &rt_call, SIZE_IN_WORDS);
|
||||
|
||||
// Allocated the JSObject, now initialize the fields. Map is set to
|
||||
// initial map and properties and elements are set to empty fixed array.
|
||||
@ -448,6 +445,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// a3: object size
|
||||
// t0: JSObject (not tagged)
|
||||
// a4: start of next object
|
||||
// a6: slack tracking counter (non-API function case)
|
||||
__ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ mov(t1, t0);
|
||||
STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
|
||||
@ -456,17 +454,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
|
||||
STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
|
||||
__ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
|
||||
STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
|
||||
__ Daddu(t1, t1, Operand(3 * kPointerSize));
|
||||
|
||||
// Fill all the in-object properties with appropriate filler.
|
||||
// a1: constructor function
|
||||
// a2: initial map
|
||||
// a3: object size (in words)
|
||||
// t0: JSObject (not tagged)
|
||||
// a4: start of next object
|
||||
// t1: First in-object property of JSObject (not tagged)
|
||||
// a6: slack tracking counter (non-API function case)
|
||||
DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
|
||||
|
||||
// Use t3 to hold undefined, which is used in several places below.
|
||||
__ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
|
||||
@ -489,9 +481,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
Operand(a0));
|
||||
}
|
||||
__ InitializeFieldsWithFiller(t1, a0, t3);
|
||||
// To allow for truncation.
|
||||
|
||||
// To allow truncation fill the remaining fields with one pointer
|
||||
// filler map.
|
||||
__ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
|
||||
// Fill the remaining fields with one pointer filler map.
|
||||
|
||||
__ bind(&no_inobject_slack_tracking);
|
||||
}
|
||||
@ -503,12 +496,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ Daddu(t0, t0, Operand(kHeapObjectTag));
|
||||
|
||||
// Continue with JSObject being successfully allocated.
|
||||
// a1: constructor function
|
||||
// a3: new target
|
||||
// a4: JSObject
|
||||
__ jmp(&allocated);
|
||||
|
||||
// Reload the new target and fall-through.
|
||||
__ bind(&rt_call_reload_new_target);
|
||||
__ ld(a3, MemOperand(sp, 0 * kPointerSize));
|
||||
}
|
||||
|
||||
// Allocate the new receiver object using the runtime call.
|
||||
@ -516,18 +507,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// a3: new target
|
||||
__ bind(&rt_call);
|
||||
|
||||
__ Push(a1, a3); // constructor function, new target
|
||||
// Push the constructor and new_target twice, second pair as arguments
|
||||
// to the runtime call.
|
||||
__ Push(a1, a3, a1, a3); // constructor function, new target
|
||||
__ CallRuntime(Runtime::kNewObject, 2);
|
||||
__ mov(t0, v0);
|
||||
__ Pop(a1, a3);
|
||||
|
||||
// Receiver for constructor call allocated.
|
||||
// a1: constructor function
|
||||
// a3: new target
|
||||
// t0: JSObject
|
||||
__ bind(&allocated);
|
||||
|
||||
// Restore the parameters.
|
||||
__ Pop(a3); // new.target
|
||||
__ Pop(a1);
|
||||
|
||||
__ ld(a0, MemOperand(sp));
|
||||
}
|
||||
__ SmiUntag(a0);
|
||||
|
@ -1690,7 +1690,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
||||
__ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
|
||||
|
||||
// Do the allocation of all three objects in one go.
|
||||
__ Allocate(t1, v0, a4, t1, &runtime, TAG_OBJECT);
|
||||
__ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
|
||||
|
||||
// v0 = address of new object(s) (tagged)
|
||||
// a2 = argument count (smi-tagged)
|
||||
|
@ -3504,12 +3504,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
return;
|
||||
}
|
||||
|
||||
DCHECK(!result.is(scratch1));
|
||||
DCHECK(!result.is(scratch2));
|
||||
DCHECK(!scratch1.is(scratch2));
|
||||
DCHECK(!scratch1.is(t9));
|
||||
DCHECK(!scratch2.is(t9));
|
||||
DCHECK(!result.is(t9));
|
||||
DCHECK(!AreAliased(result, scratch1, scratch2, t9));
|
||||
|
||||
// Make object size into bytes.
|
||||
if ((flags & SIZE_IN_WORDS) != 0) {
|
||||
@ -3525,34 +3520,35 @@ void MacroAssembler::Allocate(int object_size,
|
||||
ExternalReference allocation_limit =
|
||||
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
||||
|
||||
intptr_t top =
|
||||
reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit =
|
||||
reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
|
||||
// Set up allocation top address and object size registers.
|
||||
Register topaddr = scratch1;
|
||||
li(topaddr, Operand(allocation_top));
|
||||
|
||||
// Set up allocation top address and allocation limit registers.
|
||||
Register top_address = scratch1;
|
||||
// This code stores a temporary value in t9.
|
||||
Register alloc_limit = t9;
|
||||
Register result_end = scratch2;
|
||||
li(top_address, Operand(allocation_top));
|
||||
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and allocation limit into t9.
|
||||
ld(result, MemOperand(topaddr));
|
||||
ld(t9, MemOperand(topaddr, kPointerSize));
|
||||
// Load allocation top into result and allocation limit into alloc_limit.
|
||||
ld(result, MemOperand(top_address));
|
||||
ld(alloc_limit, MemOperand(top_address, kPointerSize));
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry. t9 is used
|
||||
// immediately below so this use of t9 does not cause difference with
|
||||
// respect to register content between debug and release mode.
|
||||
ld(t9, MemOperand(topaddr));
|
||||
Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
|
||||
// Assert that result actually contains top on entry.
|
||||
ld(alloc_limit, MemOperand(top_address));
|
||||
Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
|
||||
}
|
||||
// Load allocation limit into t9. Result already contains allocation top.
|
||||
ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
|
||||
// Load allocation limit. Result already contains allocation top.
|
||||
ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
|
||||
}
|
||||
|
||||
DCHECK(kPointerSize == kDoubleSize);
|
||||
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
|
||||
// the same alignment on ARM64.
|
||||
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
|
||||
|
||||
if (emit_debug_code()) {
|
||||
And(at, result, Operand(kDoubleAlignmentMask));
|
||||
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
|
||||
@ -3560,9 +3556,9 @@ void MacroAssembler::Allocate(int object_size,
|
||||
|
||||
// Calculate new top and bail out if new space is exhausted. Use result
|
||||
// to calculate the new top.
|
||||
Daddu(scratch2, result, Operand(object_size));
|
||||
Branch(gc_required, Ugreater, scratch2, Operand(t9));
|
||||
sd(scratch2, MemOperand(topaddr));
|
||||
Daddu(result_end, result, Operand(object_size));
|
||||
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
|
||||
sd(result_end, MemOperand(top_address));
|
||||
|
||||
// Tag object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
@ -3585,11 +3581,9 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
return;
|
||||
}
|
||||
|
||||
DCHECK(!result.is(scratch));
|
||||
DCHECK(!result.is(result_end));
|
||||
DCHECK(!scratch.is(result_end));
|
||||
DCHECK(!object_size.is(t9));
|
||||
DCHECK(!scratch.is(t9) && !result_end.is(t9) && !result.is(t9));
|
||||
// |object_size| and |result_end| may overlap, other registers must not.
|
||||
DCHECK(!AreAliased(object_size, result, scratch, t9));
|
||||
DCHECK(!AreAliased(result_end, result, scratch, t9));
|
||||
|
||||
// Check relative positions of allocation top and limit addresses.
|
||||
// ARM adds additional checks to make sure the ldm instruction can be
|
||||
@ -3598,34 +3592,34 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
AllocationUtils::GetAllocationTopReference(isolate(), flags);
|
||||
ExternalReference allocation_limit =
|
||||
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
||||
intptr_t top =
|
||||
reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit =
|
||||
reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
|
||||
// Set up allocation top address and object size registers.
|
||||
Register topaddr = scratch;
|
||||
li(topaddr, Operand(allocation_top));
|
||||
|
||||
Register top_address = scratch;
|
||||
// This code stores a temporary value in t9.
|
||||
Register alloc_limit = t9;
|
||||
li(top_address, Operand(allocation_top));
|
||||
|
||||
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
||||
// Load allocation top into result and allocation limit into t9.
|
||||
ld(result, MemOperand(topaddr));
|
||||
ld(t9, MemOperand(topaddr, kPointerSize));
|
||||
// Load allocation top into result and allocation limit into alloc_limit.
|
||||
ld(result, MemOperand(top_address));
|
||||
ld(alloc_limit, MemOperand(top_address, kPointerSize));
|
||||
} else {
|
||||
if (emit_debug_code()) {
|
||||
// Assert that result actually contains top on entry. t9 is used
|
||||
// immediately below so this use of t9 does not cause difference with
|
||||
// respect to register content between debug and release mode.
|
||||
ld(t9, MemOperand(topaddr));
|
||||
Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
|
||||
// Assert that result actually contains top on entry.
|
||||
ld(alloc_limit, MemOperand(top_address));
|
||||
Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
|
||||
}
|
||||
// Load allocation limit into t9. Result already contains allocation top.
|
||||
ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
|
||||
// Load allocation limit. Result already contains allocation top.
|
||||
ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
|
||||
}
|
||||
|
||||
DCHECK(kPointerSize == kDoubleSize);
|
||||
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
|
||||
// the same alignment on ARM64.
|
||||
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
|
||||
|
||||
if (emit_debug_code()) {
|
||||
And(at, result, Operand(kDoubleAlignmentMask));
|
||||
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
|
||||
@ -3640,14 +3634,14 @@ void MacroAssembler::Allocate(Register object_size, Register result,
|
||||
} else {
|
||||
Daddu(result_end, result, Operand(object_size));
|
||||
}
|
||||
Branch(gc_required, Ugreater, result_end, Operand(t9));
|
||||
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
|
||||
|
||||
// Update allocation top. result temporarily holds the new top.
|
||||
if (emit_debug_code()) {
|
||||
And(t9, result_end, Operand(kObjectAlignmentMask));
|
||||
Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
|
||||
And(at, result_end, Operand(kObjectAlignmentMask));
|
||||
Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
|
||||
}
|
||||
sd(result_end, MemOperand(topaddr));
|
||||
sd(result_end, MemOperand(top_address));
|
||||
|
||||
// Tag object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
|
@ -146,9 +146,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ Push(rcx);
|
||||
|
||||
if (create_implicit_receiver) {
|
||||
__ Push(rdi);
|
||||
__ Push(rdx);
|
||||
|
||||
// Try to allocate the object without transitioning into C code. If any of
|
||||
// the preconditions is not met, the code bails out to the runtime call.
|
||||
Label rt_call, allocated;
|
||||
@ -190,6 +187,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
|
||||
__ j(not_equal, &allocate);
|
||||
|
||||
// Push the constructor, new_target and map to the stack, and
|
||||
// the map again as an argument to the runtime call.
|
||||
__ Push(rax);
|
||||
__ Push(rdx);
|
||||
__ Push(rdi);
|
||||
@ -206,25 +205,28 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Now allocate the JSObject on the heap.
|
||||
__ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
|
||||
__ shlp(rdi, Immediate(kPointerSizeLog2));
|
||||
// rdi: size of new object
|
||||
__ Allocate(rdi, rbx, rdi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
|
||||
__ movzxbp(r9, FieldOperand(rax, Map::kInstanceSizeOffset));
|
||||
__ shlp(r9, Immediate(kPointerSizeLog2));
|
||||
// r9: size of new object
|
||||
__ Allocate(r9, rbx, r9, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
|
||||
// Allocated the JSObject, now initialize the fields.
|
||||
// rdi: constructor
|
||||
// rdx: new target
|
||||
// rax: initial map
|
||||
// rbx: JSObject (not HeapObject tagged - the actual address).
|
||||
// rdi: start of next object
|
||||
// r9: start of next object
|
||||
__ movp(Operand(rbx, JSObject::kMapOffset), rax);
|
||||
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
|
||||
__ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
|
||||
// Set extra fields in the newly allocated object.
|
||||
// rax: initial map
|
||||
// rdx: new target
|
||||
// rbx: JSObject
|
||||
// rdi: start of next object
|
||||
// r9: start of next object
|
||||
// rsi: slack tracking counter (non-API function case)
|
||||
__ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
|
||||
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
|
||||
__ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
|
||||
if (!is_api_function) {
|
||||
Label no_inobject_slack_tracking;
|
||||
|
||||
@ -235,21 +237,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// Allocate object with a slack.
|
||||
__ movzxbp(rsi, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
|
||||
__ negp(rsi);
|
||||
__ leap(rsi, Operand(rdi, rsi, times_pointer_size, 0));
|
||||
__ leap(rsi, Operand(r9, rsi, times_pointer_size, 0));
|
||||
// rsi: offset of first field after pre-allocated fields
|
||||
if (FLAG_debug_code) {
|
||||
__ cmpp(rcx, rsi);
|
||||
__ Assert(less_equal,
|
||||
kUnexpectedNumberOfPreAllocatedPropertyFields);
|
||||
}
|
||||
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
|
||||
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
|
||||
// Fill the remaining fields with one pointer filler map.
|
||||
__ InitializeFieldsWithFiller(rcx, rsi, r11);
|
||||
|
||||
// To allow truncation fill the remaining fields with one pointer
|
||||
// filler map.
|
||||
__ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
|
||||
|
||||
__ bind(&no_inobject_slack_tracking);
|
||||
}
|
||||
|
||||
__ InitializeFieldsWithFiller(rcx, rdi, rdx);
|
||||
__ InitializeFieldsWithFiller(rcx, r9, r11);
|
||||
|
||||
// Add the object tag to make the JSObject real, so that we can continue
|
||||
// and jump into the continuation code at any time from now on.
|
||||
@ -257,32 +261,37 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ orp(rbx, Immediate(kHeapObjectTag));
|
||||
|
||||
// Continue with JSObject being successfully allocated
|
||||
// rdi: constructor
|
||||
// rdx: new target
|
||||
// rbx: JSObject (tagged)
|
||||
__ jmp(&allocated);
|
||||
}
|
||||
|
||||
// Allocate the new receiver object using the runtime call.
|
||||
// rdi: constructor
|
||||
// rdx: new target
|
||||
__ bind(&rt_call);
|
||||
int offset = kPointerSize;
|
||||
|
||||
// Must restore rsi (context) and rdi (constructor) before calling
|
||||
// runtime.
|
||||
// Must restore rsi (context) before calling runtime.
|
||||
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
||||
__ movp(rdi, Operand(rsp, offset));
|
||||
|
||||
// Push the constructor and new_target twice, second pair as arguments
|
||||
// to the runtime call.
|
||||
__ Push(rdi);
|
||||
__ Push(rdx);
|
||||
__ Push(rdi); // constructor function
|
||||
__ Push(rdx); // new target
|
||||
__ CallRuntime(Runtime::kNewObject, 2);
|
||||
__ movp(rbx, rax); // store result in rbx
|
||||
|
||||
// New object allocated.
|
||||
// rbx: newly allocated object
|
||||
__ bind(&allocated);
|
||||
|
||||
// Restore the parameters.
|
||||
__ Pop(rdx);
|
||||
__ Pop(rdi);
|
||||
|
||||
// Receiver for constructor call allocated.
|
||||
// rdi: constructor
|
||||
// rdx: new target
|
||||
// rbx: newly allocated object
|
||||
__ bind(&allocated);
|
||||
|
||||
// Retrieve smi-tagged arguments count from the stack.
|
||||
__ movp(rax, Operand(rsp, 0));
|
||||
__ SmiToInteger32(rax, rax);
|
||||
|
Loading…
Reference in New Issue
Block a user