[crankshaft] Fragmentation-free allocation folding.

The new allocation folding implementation avoids fragmentation between folded allocation. As a consequence, our heap will always be iterable i.e. we do not have to perform a garbage collection before iterating the heap.

BUG=chromium:580959
LOG=n

Review-Url: https://codereview.chromium.org/1899813003
Cr-Commit-Position: refs/heads/master@{#36133}
This commit is contained in:
hpayer 2016-05-10 04:28:53 -07:00 committed by Commit bot
parent 2fe1ee4e04
commit 61f5fbbb19
41 changed files with 1267 additions and 352 deletions

View File

@ -1985,6 +1985,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -2077,9 +2078,14 @@ void MacroAssembler::Allocate(int object_size,
cond = cc;
}
}
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
str(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
}
// Tag object.
add(result, result, Operand(kHeapObjectTag));
@ -2089,6 +2095,7 @@ void MacroAssembler::Allocate(int object_size,
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -2164,7 +2171,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
add(result_end, result, Operand(object_size), SetCC);
}
b(cs, gc_required);
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
@ -2173,12 +2180,122 @@ void MacroAssembler::Allocate(Register object_size, Register result,
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
str(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
}
// Tag object.
add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, Register scratch,
AllocationFlags flags) {
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
DCHECK(!AreAliased(object_size, result, scratch, ip));
DCHECK(!AreAliased(result_end, result, scratch, ip));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
Register top_address = scratch;
mov(top_address, Operand(allocation_top));
ldr(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
// Calculate new top using result. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
} else {
add(result_end, result, Operand(object_size), SetCC);
}
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
// The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
DCHECK_EQ(0, object_size & kObjectAlignmentMask);
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address register.
Register top_address = scratch1;
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
ldr(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
// Calculate new top using result. Object size may be in words so a shift is
// required to get the number of bytes. We must preserve the ip register at
// this point, so we cannot just use add().
DCHECK(object_size > 0);
Register source = result;
Condition cond = al;
int shift = 0;
while (object_size != 0) {
if (((object_size >> shift) & 0x03) == 0) {
shift += 2;
} else {
int bits = object_size & (0xff << shift);
object_size -= bits;
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
add(result_end, source, bits_operand, LeaveCC, cond);
source = result_end;
cond = cc;
}
}
// The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,

View File

@ -783,6 +783,15 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,

View File

@ -3030,6 +3030,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -3092,7 +3093,11 @@ void MacroAssembler::Allocate(int object_size,
Adds(result_end, result, object_size);
Ccmp(result_end, alloc_limit, NoFlag, cc);
B(hi, gc_required);
Str(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
Str(result_end, MemOperand(top_address));
}
// Tag the object.
ObjectTag(result, result);
@ -3168,14 +3173,88 @@ void MacroAssembler::Allocate(Register object_size, Register result,
Check(eq, kUnalignedAllocationInNewSpace);
}
Ccmp(result_end, alloc_limit, CFlag, cc);
Ccmp(result_end, alloc_limit, NoFlag, cc);
B(hi, gc_required);
Str(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
Str(result_end, MemOperand(top_address));
}
// Tag the object.
ObjectTag(result, result);
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2));
DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
DCHECK(0 == (object_size & kObjectAlignmentMask));
ExternalReference heap_allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address and allocation limit registers.
Register top_address = scratch1;
Register result_end = scratch2;
Mov(top_address, Operand(heap_allocation_top));
Ldr(result, MemOperand(top_address));
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on ARM64.
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and write it back.
Adds(result_end, result, object_size);
Str(result_end, MemOperand(top_address));
ObjectTag(result, result);
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, Register scratch,
AllocationFlags flags) {
// |object_size| and |result_end| may overlap, other registers must not.
DCHECK(!AreAliased(object_size, result, scratch));
DCHECK(!AreAliased(result_end, result, scratch));
DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
result_end.Is64Bits());
ExternalReference heap_allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
Mov(top_address, heap_allocation_top);
Ldr(result, MemOperand(top_address));
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on ARM64.
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and write it back.
if ((flags & SIZE_IN_WORDS) != 0) {
Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
} else {
Adds(result_end, result, object_size);
}
Str(result_end, MemOperand(top_address));
if (emit_debug_code()) {
Tst(result_end, kObjectAlignmentMask);
Check(eq, kUnalignedAllocationInNewSpace);
}
ObjectTag(result, result);
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,

View File

@ -1312,7 +1312,6 @@ class MacroAssembler : public Assembler {
//
// If the new space is exhausted control continues at the gc_required label.
// In this case, the result and scratch registers may still be clobbered.
// If flags includes TAG_OBJECT, the result is tagged as as a heap object.
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
@ -1323,6 +1322,15 @@ class MacroAssembler : public Assembler {
Label* gc_required,
AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,

View File

@ -443,7 +443,7 @@ HValue* CodeStubGraphBuilder<FastCloneRegExpStub>::BuildCodeStub() {
JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
HValue* result =
Add<HAllocate>(Add<HConstant>(result_size), HType::JSObject(),
NOT_TENURED, JS_REGEXP_TYPE);
NOT_TENURED, JS_REGEXP_TYPE, graph()->GetConstant0());
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap()));
@ -556,8 +556,9 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
info()->MarkMustNotHaveEagerFrame();
HValue* size = Add<HConstant>(AllocationSite::kSize);
HInstruction* object = Add<HAllocate>(size, HType::JSObject(), TENURED,
JS_OBJECT_TYPE);
HInstruction* object =
Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE,
graph()->GetConstant0());
// Store the map
Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
@ -635,7 +636,8 @@ HValue* CodeStubGraphBuilder<CreateWeakCellStub>::BuildCodeStub() {
HValue* size = Add<HConstant>(WeakCell::kSize);
HInstruction* object =
Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE);
Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE,
graph()->GetConstant0());
Handle<Map> weak_cell_map = isolate()->factory()->weak_cell_map();
AddStoreMapConstant(object, weak_cell_map);
@ -1167,7 +1169,7 @@ void CodeStubGraphBuilderBase::BuildStoreNamedField(
// TODO(hpayer): Allocation site pretenuring support.
HInstruction* heap_number =
Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
MUTABLE_HEAP_NUMBER_TYPE);
MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
AddStoreMapConstant(heap_number,
isolate()->factory()->mutable_heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
@ -1379,7 +1381,6 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
? JSArrayBuilder::FILL_WITH_HOLE
: JSArrayBuilder::DONT_FILL_WITH_HOLE;
HValue* new_object = array_builder->AllocateArray(checked_length,
max_alloc_length,
checked_length,
fill_mode);
HValue* elements = array_builder->GetElementsLocation();
@ -1896,7 +1897,8 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
// Create a new closure from the given function info in new space
HValue* size = Add<HConstant>(JSFunction::kSize);
HInstruction* js_function =
Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE);
Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE,
graph()->GetConstant0());
int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
casted_stub()->kind());
@ -1949,7 +1951,8 @@ HValue* CodeStubGraphBuilder<FastNewContextStub>::BuildCodeStub() {
// Allocate the context in new space.
HAllocate* function_context = Add<HAllocate>(
Add<HConstant>(length * kPointerSize + FixedArray::kHeaderSize),
HType::HeapObject(), NOT_TENURED, FIXED_ARRAY_TYPE);
HType::HeapObject(), NOT_TENURED, FIXED_ARRAY_TYPE,
graph()->GetConstant0());
// Set up the object header.
AddStoreMapConstant(function_context,

View File

@ -2351,13 +2351,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
if (instr->IsAllocationFolded()) {
LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
return DefineAsRegister(result);
} else {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
}

View File

@ -67,6 +67,7 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@ -151,7 +152,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@ -2391,6 +2391,21 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
public:
LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[0] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* size() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:

View File

@ -5109,6 +5109,14 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
if (instr->hydrogen()->IsAllocationFolded()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDED);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@ -5176,6 +5184,50 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
Register top_address = scratch0();
__ sub(r0, r0, Operand(kHeapObjectTag));
__ mov(top_address, Operand(allocation_top));
__ str(r0, MemOperand(top_address));
__ add(r0, r0, Operand(kHeapObjectTag));
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
Register result = ToRegister(instr->result());
Register scratch1 = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (!instr->hydrogen()->IsAllocationFoldingDominator()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, scratch1, scratch2, flags);
}
}
}

View File

@ -841,14 +841,20 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2, temp3);
return AssignPointerMap(DefineAsRegister(result));
if (instr->IsAllocationFolded()) {
LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
return DefineAsRegister(result);
} else {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
LAllocate* result =
new (zone()) LAllocate(context, size, temp1, temp2, temp3);
return AssignPointerMap(DefineAsRegister(result));
}
}

View File

@ -70,6 +70,7 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@ -163,7 +164,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@ -626,6 +626,21 @@ class LAllocate final : public LTemplateInstruction<1, 2, 3> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
public:
LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[0] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* size() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
public:

View File

@ -1426,6 +1426,14 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
if (instr->hydrogen()->IsAllocationFolded()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDED);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@ -1487,6 +1495,50 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
Register top_address = x10;
__ Sub(x0, x0, Operand(kHeapObjectTag));
__ Mov(top_address, Operand(allocation_top));
__ Str(x0, MemOperand(top_address));
__ Add(x0, x0, Operand(kHeapObjectTag));
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
Register result = ToRegister(instr->result());
Register scratch1 = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (!instr->hydrogen()->IsAllocationFoldingDominator()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, scratch1, scratch2, flags);
}
}
}

View File

@ -3126,6 +3126,7 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
DCHECK(side_effect == kNewSpacePromotion);
DCHECK(!IsAllocationFolded());
Zone* zone = block()->zone();
Isolate* isolate = block()->isolate();
if (!FLAG_use_allocation_folding) return false;
@ -3153,7 +3154,8 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* current_size = size();
// TODO(hpayer): Add support for non-constant allocation in dominator.
if (!dominator_size->IsInteger32Constant()) {
if (!current_size->IsInteger32Constant() ||
!dominator_size->IsInteger32Constant()) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s), "
"dynamic allocation size in dominator\n",
@ -3171,32 +3173,6 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return false;
}
if (!has_size_upper_bound()) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s), "
"can't estimate total allocation size\n",
id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
}
return false;
}
if (!current_size->IsInteger32Constant()) {
// If it's not constant then it is a size_in_bytes calculation graph
// like this: (const_header_size + const_element_size * size).
DCHECK(current_size->IsInstruction());
HInstruction* current_instr = HInstruction::cast(current_size);
if (!current_instr->Dominates(dominator_allocate)) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s), dynamic size "
"value does not dominate target allocation\n",
id(), Mnemonic(), dominator_allocate->id(),
dominator_allocate->Mnemonic());
}
return false;
}
}
DCHECK(
(IsNewSpaceAllocation() && dominator_allocate->IsNewSpaceAllocation()) ||
(IsOldSpaceAllocation() && dominator_allocate->IsOldSpaceAllocation()));
@ -3213,7 +3189,7 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
int32_t current_size_max_value = size_upper_bound()->GetInteger32Constant();
int32_t current_size_max_value = size()->GetInteger32Constant();
int32_t new_dominator_size = dominator_size_constant + current_size_max_value;
// Since we clear the first word after folded memory, we cannot use the
@ -3227,27 +3203,9 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return false;
}
HInstruction* new_dominator_size_value;
if (current_size->IsInteger32Constant()) {
new_dominator_size_value = HConstant::CreateAndInsertBefore(
isolate, zone, context(), new_dominator_size, Representation::None(),
dominator_allocate);
} else {
HValue* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
isolate, zone, context(), dominator_size_constant,
Representation::Integer32(), dominator_allocate);
// Add old and new size together and insert.
current_size->ChangeRepresentation(Representation::Integer32());
new_dominator_size_value = HAdd::New(
isolate, zone, context(), new_dominator_size_constant, current_size);
new_dominator_size_value->ClearFlag(HValue::kCanOverflow);
new_dominator_size_value->ChangeRepresentation(Representation::Integer32());
new_dominator_size_value->InsertBefore(dominator_allocate);
}
HInstruction* new_dominator_size_value = HConstant::CreateAndInsertBefore(
isolate, zone, context(), new_dominator_size, Representation::None(),
dominator_allocate);
dominator_allocate->UpdateSize(new_dominator_size_value);
@ -3257,103 +3215,45 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
bool keep_heap_iterable = FLAG_log_gc || FLAG_heap_stats;
#ifdef VERIFY_HEAP
keep_heap_iterable = keep_heap_iterable || FLAG_verify_heap;
#endif
if (keep_heap_iterable) {
dominator_allocate->MakePrefillWithFiller();
} else {
// TODO(hpayer): This is a short-term hack to make allocation mementos
// work again in new space.
dominator_allocate->ClearNextMapWord(original_object_size);
if (IsAllocationFoldingDominator()) {
DeleteAndReplaceWith(dominator_allocate);
if (FLAG_trace_allocation_folding) {
PrintF(
"#%d (%s) folded dominator into #%d (%s), new dominator size: %d\n",
id(), Mnemonic(), dominator_allocate->id(),
dominator_allocate->Mnemonic(), new_dominator_size);
}
return true;
}
dominator_allocate->UpdateClearNextMapWord(MustClearNextMapWord());
if (!dominator_allocate->IsAllocationFoldingDominator()) {
HAllocate* first_alloc =
HAllocate::New(isolate, zone, dominator_allocate->context(),
dominator_size, dominator_allocate->type(),
IsNewSpaceAllocation() ? NOT_TENURED : TENURED,
JS_OBJECT_TYPE, block()->graph()->GetConstant0());
first_alloc->InsertAfter(dominator_allocate);
dominator_allocate->ReplaceAllUsesWith(first_alloc);
dominator_allocate->MakeAllocationFoldingDominator();
first_alloc->MakeFoldedAllocation(dominator_allocate);
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) inserted for dominator #%d (%s)\n", first_alloc->id(),
first_alloc->Mnemonic(), dominator_allocate->id(),
dominator_allocate->Mnemonic());
}
}
// After that replace the dominated allocate instruction.
HInstruction* inner_offset = HConstant::CreateAndInsertBefore(
isolate, zone, context(), dominator_size_constant, Representation::None(),
this);
MakeFoldedAllocation(dominator_allocate);
HInstruction* dominated_allocate_instr = HInnerAllocatedObject::New(
isolate, zone, context(), dominator_allocate, inner_offset, type());
dominated_allocate_instr->InsertBefore(this);
DeleteAndReplaceWith(dominated_allocate_instr);
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) folded into #%d (%s)\n",
id(), Mnemonic(), dominator_allocate->id(),
dominator_allocate->Mnemonic());
PrintF("#%d (%s) folded into #%d (%s), new dominator size: %d\n", id(),
Mnemonic(), dominator_allocate->id(), dominator_allocate->Mnemonic(),
new_dominator_size);
}
return true;
}
void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
DCHECK(filler_free_space_size_ != NULL);
Zone* zone = block()->zone();
// We must explicitly force Smi representation here because on x64 we
// would otherwise automatically choose int32, but the actual store
// requires a Smi-tagged value.
HConstant* new_free_space_size = HConstant::CreateAndInsertBefore(
block()->isolate(), zone, context(),
filler_free_space_size_->value()->GetInteger32Constant() +
free_space_size,
Representation::Smi(), filler_free_space_size_);
filler_free_space_size_->UpdateValue(new_free_space_size);
}
void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
DCHECK(filler_free_space_size_ == NULL);
Isolate* isolate = block()->isolate();
Zone* zone = block()->zone();
HInstruction* free_space_instr =
HInnerAllocatedObject::New(isolate, zone, context(), dominating_allocate_,
dominating_allocate_->size(), type());
free_space_instr->InsertBefore(this);
HConstant* filler_map = HConstant::CreateAndInsertAfter(
zone, Unique<Map>::CreateImmovable(isolate->factory()->free_space_map()),
true, free_space_instr);
HInstruction* store_map =
HStoreNamedField::New(isolate, zone, context(), free_space_instr,
HObjectAccess::ForMap(), filler_map);
store_map->SetFlag(HValue::kHasNoObservableSideEffects);
store_map->InsertAfter(filler_map);
// We must explicitly force Smi representation here because on x64 we
// would otherwise automatically choose int32, but the actual store
// requires a Smi-tagged value.
HConstant* filler_size =
HConstant::CreateAndInsertAfter(isolate, zone, context(), free_space_size,
Representation::Smi(), store_map);
// Must force Smi representation for x64 (see comment above).
HObjectAccess access = HObjectAccess::ForMapAndOffset(
isolate->factory()->free_space_map(), FreeSpace::kSizeOffset,
Representation::Smi());
HStoreNamedField* store_size = HStoreNamedField::New(
isolate, zone, context(), free_space_instr, access, filler_size);
store_size->SetFlag(HValue::kHasNoObservableSideEffects);
store_size->InsertAfter(filler_size);
filler_free_space_size_ = store_size;
}
void HAllocate::ClearNextMapWord(int offset) {
if (MustClearNextMapWord()) {
Zone* zone = block()->zone();
HObjectAccess access =
HObjectAccess::ForObservableJSObjectOffset(offset);
HStoreNamedField* clear_next_map =
HStoreNamedField::New(block()->isolate(), zone, context(), this, access,
block()->graph()->GetConstant0());
clear_next_map->ClearAllSideEffects();
clear_next_map->InsertAfter(this);
}
}
std::ostream& HAllocate::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(size()) << " (";
if (IsNewSpaceAllocation()) os << "N";

View File

@ -4939,8 +4939,7 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
FeedbackVectorSlot slot_;
};
class HAllocate final : public HTemplateInstruction<2> {
class HAllocate final : public HTemplateInstruction<3> {
public:
static bool CompatibleInstanceTypes(InstanceType type1,
InstanceType type2) {
@ -4951,9 +4950,10 @@ class HAllocate final : public HTemplateInstruction<2> {
static HAllocate* New(
Isolate* isolate, Zone* zone, HValue* context, HValue* size, HType type,
PretenureFlag pretenure_flag, InstanceType instance_type,
HValue* dominator,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null()) {
return new(zone) HAllocate(context, size, type, pretenure_flag,
instance_type, allocation_site);
return new (zone) HAllocate(context, size, type, pretenure_flag,
instance_type, dominator, allocation_site);
}
// Maximum instance size for which allocations will be inlined.
@ -4961,13 +4961,7 @@ class HAllocate final : public HTemplateInstruction<2> {
HValue* context() const { return OperandAt(0); }
HValue* size() const { return OperandAt(1); }
bool has_size_upper_bound() { return size_upper_bound_ != NULL; }
HConstant* size_upper_bound() { return size_upper_bound_; }
void set_size_upper_bound(HConstant* value) {
DCHECK(size_upper_bound_ == NULL);
size_upper_bound_ = value;
}
HValue* allocation_folding_dominator() const { return OperandAt(2); }
Representation RequiredInputRepresentation(int index) override {
if (index == 0) {
@ -5005,14 +4999,28 @@ class HAllocate final : public HTemplateInstruction<2> {
flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
}
bool MustClearNextMapWord() const {
return (flags_ & CLEAR_NEXT_MAP_WORD) != 0;
}
void MakeDoubleAligned() {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
}
void MakeAllocationFoldingDominator() {
flags_ =
static_cast<HAllocate::Flags>(flags_ | ALLOCATION_FOLDING_DOMINATOR);
}
bool IsAllocationFoldingDominator() {
return (flags_ & ALLOCATION_FOLDING_DOMINATOR) != 0;
}
void MakeFoldedAllocation(HAllocate* dominator) {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATION_FOLDED);
ClearFlag(kTrackSideEffectDominators);
ClearChangesFlag(kNewSpacePromotion);
SetOperandAt(2, dominator);
}
bool IsAllocationFolded() { return (flags_ & ALLOCATION_FOLDED) != 0; }
bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) override;
@ -5026,23 +5034,19 @@ class HAllocate final : public HTemplateInstruction<2> {
ALLOCATE_IN_OLD_SPACE = 1 << 2,
ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
PREFILL_WITH_FILLER = 1 << 4,
CLEAR_NEXT_MAP_WORD = 1 << 5
ALLOCATION_FOLDING_DOMINATOR = 1 << 5,
ALLOCATION_FOLDED = 1 << 6
};
HAllocate(HValue* context,
HValue* size,
HType type,
PretenureFlag pretenure_flag,
InstanceType instance_type,
Handle<AllocationSite> allocation_site =
Handle<AllocationSite>::null())
: HTemplateInstruction<2>(type),
flags_(ComputeFlags(pretenure_flag, instance_type)),
dominating_allocate_(NULL),
filler_free_space_size_(NULL),
size_upper_bound_(NULL) {
HAllocate(
HValue* context, HValue* size, HType type, PretenureFlag pretenure_flag,
InstanceType instance_type, HValue* dominator,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null())
: HTemplateInstruction<3>(type),
flags_(ComputeFlags(pretenure_flag, instance_type)) {
SetOperandAt(0, context);
UpdateSize(size);
SetOperandAt(2, dominator);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
SetChangesFlag(kNewSpacePromotion);
@ -5072,46 +5076,20 @@ class HAllocate final : public HTemplateInstruction<2> {
if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
flags = static_cast<Flags>(flags | PREFILL_WITH_FILLER);
}
if (pretenure_flag == NOT_TENURED &&
AllocationSite::CanTrack(instance_type)) {
flags = static_cast<Flags>(flags | CLEAR_NEXT_MAP_WORD);
}
return flags;
}
void UpdateClearNextMapWord(bool clear_next_map_word) {
flags_ = static_cast<Flags>(clear_next_map_word
? flags_ | CLEAR_NEXT_MAP_WORD
: flags_ & ~CLEAR_NEXT_MAP_WORD);
}
void UpdateSize(HValue* size) {
SetOperandAt(1, size);
if (size->IsInteger32Constant()) {
size_upper_bound_ = HConstant::cast(size);
} else {
size_upper_bound_ = NULL;
}
}
HAllocate* GetFoldableDominator(HAllocate* dominator);
void UpdateFreeSpaceFiller(int32_t filler_size);
void CreateFreeSpaceFiller(int32_t filler_size);
bool IsFoldable(HAllocate* allocate) {
return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) ||
(IsOldSpaceAllocation() && allocate->IsOldSpaceAllocation());
}
void ClearNextMapWord(int offset);
Flags flags_;
Handle<Map> known_initial_map_;
HAllocate* dominating_allocate_;
HStoreNamedField* filler_free_space_size_;
HConstant* size_upper_bound_;
};
@ -5183,9 +5161,20 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* value,
HValue* dominator) {
// There may be multiple inner allocates dominated by one allocate.
while (object->IsInnerAllocatedObject()) {
object = HInnerAllocatedObject::cast(object)->base_object();
}
if (object->IsAllocate()) {
HAllocate* allocate = HAllocate::cast(object);
if (allocate->IsAllocationFolded()) {
HValue* dominator = allocate->allocation_folding_dominator();
DCHECK(HAllocate::cast(dominator)->IsAllocationFoldingDominator());
object = dominator;
}
}
if (object->IsConstant() &&
HConstant::cast(object)->HasExternalReferenceValue()) {
// Stores to external references require no write barriers

View File

@ -2040,7 +2040,7 @@ HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
// Allocate the JSIteratorResult object.
HValue* result =
Add<HAllocate>(Add<HConstant>(JSIteratorResult::kSize), HType::JSObject(),
NOT_TENURED, JS_OBJECT_TYPE);
NOT_TENURED, JS_OBJECT_TYPE, graph()->GetConstant0());
// Initialize the JSIteratorResult object.
HValue* native_context = BuildGetNativeContext();
@ -2077,9 +2077,9 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HValue* size = BuildCalculateElementsSize(elements_kind, length);
// Allocate the JSRegExpResult and the FixedArray in one step.
HValue* result = Add<HAllocate>(
Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
NOT_TENURED, JS_ARRAY_TYPE);
HValue* result =
Add<HAllocate>(Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
NOT_TENURED, JS_ARRAY_TYPE, graph()->GetConstant0());
// Initialize the JSRegExpResult header.
HValue* native_context = Add<HLoadNamedField>(
@ -2113,12 +2113,6 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HAllocate* elements = BuildAllocateElements(elements_kind, size);
BuildInitializeElementsHeader(elements, elements_kind, length);
if (!elements->has_size_upper_bound()) {
HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
elements_kind, max_length->Integer32Value());
elements->set_size_upper_bound(size_in_bytes_upper_bound);
}
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
elements);
@ -2394,8 +2388,8 @@ HAllocate* HGraphBuilder::BuildAllocate(
// Perform the actual allocation.
HAllocate* object = Add<HAllocate>(
size, type, allocation_mode.GetPretenureMode(),
instance_type, allocation_mode.feedback_site());
size, type, allocation_mode.GetPretenureMode(), instance_type,
graph()->GetConstant0(), allocation_mode.feedback_site());
// Setup the allocation memento.
if (allocation_mode.CreateAllocationMementos()) {
@ -2890,7 +2884,6 @@ HValue* HGraphBuilder::BuildAllocateArrayFromLength(
return array_builder->AllocateEmptyArray();
} else {
return array_builder->AllocateArray(length_argument,
array_length,
length_argument);
}
}
@ -2923,7 +2916,7 @@ HValue* HGraphBuilder::BuildAllocateArrayFromLength(
// Figure out total size
HValue* length = Pop();
HValue* capacity = Pop();
return array_builder->AllocateArray(capacity, max_alloc_length, length);
return array_builder->AllocateArray(capacity, length);
}
@ -2955,8 +2948,8 @@ HAllocate* HGraphBuilder::AllocateJSArrayObject(AllocationSiteMode mode) {
base_size += AllocationMemento::kSize;
}
HConstant* size_in_bytes = Add<HConstant>(base_size);
return Add<HAllocate>(
size_in_bytes, HType::JSArray(), NOT_TENURED, JS_OBJECT_TYPE);
return Add<HAllocate>(size_in_bytes, HType::JSArray(), NOT_TENURED,
JS_OBJECT_TYPE, graph()->GetConstant0());
}
@ -2978,7 +2971,7 @@ HAllocate* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
: FIXED_ARRAY_TYPE;
return Add<HAllocate>(size_in_bytes, HType::HeapObject(), NOT_TENURED,
instance_type);
instance_type, graph()->GetConstant0());
}
@ -3366,14 +3359,6 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
HAllocate* elements = BuildAllocateElements(kind, elements_size);
// This function implicitly relies on the fact that the
// FastCloneShallowArrayStub is called only for literals shorter than
// JSArray::kInitialMaxFastElementArray.
// Can't add HBoundsCheck here because otherwise the stub will eager a frame.
HConstant* size_upper_bound = EstablishElementsAllocationSize(
kind, JSArray::kInitialMaxFastElementArray);
elements->set_size_upper_bound(size_upper_bound);
Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
// The allocation for the cloned array above causes register pressure on
@ -3613,40 +3598,10 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
HAllocate* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
return AllocateArray(capacity,
capacity,
builder()->graph()->GetConstant0());
}
HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* capacity,
HConstant* capacity_upper_bound,
HValue* length_field,
FillMode fill_mode) {
return AllocateArray(capacity,
capacity_upper_bound->GetInteger32Constant(),
length_field,
fill_mode);
}
HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* capacity,
int capacity_upper_bound,
HValue* length_field,
FillMode fill_mode) {
HConstant* elememts_size_upper_bound = capacity->IsInteger32Constant()
? HConstant::cast(capacity)
: builder()->EstablishElementsAllocationSize(kind_, capacity_upper_bound);
HAllocate* array = AllocateArray(capacity, length_field, fill_mode);
if (!elements_location_->has_size_upper_bound()) {
elements_location_->set_size_upper_bound(elememts_size_upper_bound);
}
return array;
}
HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* capacity,
HValue* length_field,
@ -6369,10 +6324,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
// TODO(hpayer): Allocation site pretenuring support.
HInstruction* heap_number = Add<HAllocate>(heap_number_size,
HType::HeapObject(),
NOT_TENURED,
MUTABLE_HEAP_NUMBER_TYPE);
HInstruction* heap_number =
Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
AddStoreMapConstant(
heap_number, isolate()->factory()->mutable_heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
@ -10397,7 +10351,8 @@ HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
HValue* elements = Add<HAllocate>(
Add<HConstant>(FixedTypedArrayBase::kHeaderSize), HType::HeapObject(),
NOT_TENURED, external_array_map->instance_type());
NOT_TENURED, external_array_map->instance_type(),
graph()->GetConstant0());
AddStoreMapConstant(elements, external_array_map);
Add<HStoreNamedField>(elements,
@ -10453,9 +10408,9 @@ HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
Handle<Map> fixed_typed_array_map(
isolate()->heap()->MapForFixedTypedArray(array_type));
HAllocate* elements =
Add<HAllocate>(total_size, HType::HeapObject(), NOT_TENURED,
fixed_typed_array_map->instance_type());
HAllocate* elements = Add<HAllocate>(
total_size, HType::HeapObject(), NOT_TENURED,
fixed_typed_array_map->instance_type(), graph()->GetConstant0());
#ifndef V8_HOST_ARCH_64_BIT
if (array_type == kExternalFloat64Array) {
@ -12032,8 +11987,9 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
}
top_info()->dependencies()->AssumeTransitionStable(current_site);
HInstruction* object = Add<HAllocate>(
object_size_constant, type, pretenure_flag, instance_type, top_site);
HInstruction* object =
Add<HAllocate>(object_size_constant, type, pretenure_flag, instance_type,
graph()->GetConstant0(), top_site);
// If allocation folding reaches Page::kMaxRegularHeapObjectSize the
// elements array may not get folded into the object. Hence, we set the
@ -12074,7 +12030,8 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
InstanceType instance_type = boilerplate_object->HasFastDoubleElements()
? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
object_elements = Add<HAllocate>(object_elements_size, HType::HeapObject(),
pretenure_flag, instance_type, top_site);
pretenure_flag, instance_type,
graph()->GetConstant0(), top_site);
BuildEmitElements(boilerplate_object, elements, object_elements,
site_context);
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
@ -12175,9 +12132,9 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
HInstruction* double_box =
Add<HAllocate>(heap_number_constant, HType::HeapObject(),
pretenure_flag, MUTABLE_HEAP_NUMBER_TYPE);
HInstruction* double_box = Add<HAllocate>(
heap_number_constant, HType::HeapObject(), pretenure_flag,
MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
AddStoreMapConstant(double_box,
isolate()->factory()->mutable_heap_number_map());
// Unwrap the mutable heap number from the boilerplate.
@ -12991,7 +12948,7 @@ HValue* HOptimizedGraphBuilder::BuildAllocateOrderedHashTable() {
// Allocate the table and add the proper map.
HValue* table =
Add<HAllocate>(Add<HConstant>(kSizeInBytes), HType::HeapObject(),
NOT_TENURED, FIXED_ARRAY_TYPE);
NOT_TENURED, FIXED_ARRAY_TYPE, graph()->GetConstant0());
AddStoreMapConstant(table, isolate()->factory()->ordered_hash_table_map());
// Initialize the FixedArray...

View File

@ -1790,18 +1790,6 @@ class HGraphBuilder {
HAllocate* AllocateArray(HValue* capacity,
HValue* length_field,
FillMode fill_mode = FILL_WITH_HOLE);
// Use these allocators when capacity could be unknown at compile time
// but its limit is known. For constant |capacity| the value of
// |capacity_upper_bound| is ignored and the actual |capacity|
// value is used as an upper bound.
HAllocate* AllocateArray(HValue* capacity,
int capacity_upper_bound,
HValue* length_field,
FillMode fill_mode = FILL_WITH_HOLE);
HAllocate* AllocateArray(HValue* capacity,
HConstant* capacity_upper_bound,
HValue* length_field,
FillMode fill_mode = FILL_WITH_HOLE);
HValue* GetElementsLocation() { return elements_location_; }
HValue* EmitMapCode();

View File

@ -4870,6 +4870,14 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
if (instr->hydrogen()->IsAllocationFolded()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDED);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@ -4899,6 +4907,30 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (!instr->hydrogen()->IsAllocationFoldingDominator()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, temp, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, temp, flags);
}
}
}
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
@ -4938,6 +4970,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
__ sub(eax, Immediate(kHeapObjectTag));
__ mov(Operand::StaticVariable(allocation_top), eax);
__ add(eax, Immediate(kHeapObjectTag));
}
}

View File

@ -2406,14 +2406,19 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
: UseRegister(instr->size());
if (instr->IsAllocationFolded()) {
LOperand* temp = TempRegister();
LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
return DefineAsRegister(result);
} else {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* temp = TempRegister();
LAllocate* result = new (zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
}
}

View File

@ -71,6 +71,7 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@ -2401,6 +2402,19 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
public:
LFastAllocate(LOperand* size, LOperand* temp) {
inputs_[0] = size;
temps_[0] = temp;
}
LOperand* size() const { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:

View File

@ -5076,6 +5076,15 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
if (instr->hydrogen()->IsAllocationFolded()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDED);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@ -5144,6 +5153,50 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
Register top_address = scratch0();
__ Subu(v0, v0, Operand(kHeapObjectTag));
__ li(top_address, Operand(allocation_top));
__ sw(v0, MemOperand(top_address));
__ Addu(v0, v0, Operand(kHeapObjectTag));
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
Register result = ToRegister(instr->result());
Register scratch1 = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (!instr->hydrogen()->IsAllocationFoldingDominator()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, scratch1, scratch2, flags);
}
}
}

View File

@ -2298,13 +2298,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
if (instr->IsAllocationFolded()) {
LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
return DefineAsRegister(result);
} else {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
}

View File

@ -67,6 +67,7 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@ -2350,6 +2351,21 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
public:
LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[0] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* size() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:

View File

@ -5280,6 +5280,15 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
if (instr->hydrogen()->IsAllocationFolded()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDED);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@ -5350,6 +5359,50 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
Register top_address = scratch0();
__ Dsubu(v0, v0, Operand(kHeapObjectTag));
__ li(top_address, Operand(allocation_top));
__ sd(v0, MemOperand(top_address));
__ Daddu(v0, v0, Operand(kHeapObjectTag));
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
Register result = ToRegister(instr->result());
Register scratch1 = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (!instr->hydrogen()->IsAllocationFoldingDominator()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, scratch1, scratch2, flags);
}
}
}

View File

@ -69,6 +69,7 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@ -2396,6 +2397,21 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
public:
LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[0] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* size() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:

View File

@ -5170,6 +5170,14 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
if (instr->hydrogen()->IsAllocationFolded()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDED);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@ -5199,6 +5207,30 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (!instr->hydrogen()->IsAllocationFoldingDominator()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, temp, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, temp, flags);
}
}
}
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
@ -5231,6 +5263,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
__ subp(rax, Immediate(kHeapObjectTag));
__ Store(allocation_top, rax);
__ addp(rax, Immediate(kHeapObjectTag));
}
}

View File

@ -2408,14 +2408,19 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
: UseRegister(instr->size());
if (instr->IsAllocationFolded()) {
LOperand* temp = TempRegister();
LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
return DefineAsRegister(result);
} else {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* temp = TempRegister();
LAllocate* result = new (zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
}
}

View File

@ -67,6 +67,7 @@ class LCodeGen;
V(Drop) \
V(DummyUse) \
V(Dummy) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@ -2385,6 +2386,19 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
public:
LFastAllocate(LOperand* size, LOperand* temp) {
inputs_[0] = size;
temps_[0] = temp;
}
LOperand* size() const { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:

View File

@ -5419,6 +5419,14 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
if (instr->hydrogen()->IsAllocationFolded()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDED);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@ -5487,6 +5495,47 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
__ sub(eax, Immediate(kHeapObjectTag));
__ mov(Operand::StaticVariable(allocation_top), eax);
__ add(eax, Immediate(kHeapObjectTag));
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (!instr->hydrogen()->IsAllocationFoldingDominator()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, temp, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, temp, flags);
}
}
}

View File

@ -2399,14 +2399,19 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
: UseRegister(instr->size());
if (instr->IsAllocationFolded()) {
LOperand* temp = TempRegister();
LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
return DefineAsRegister(result);
} else {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* temp = TempRegister();
LAllocate* result = new (zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
}
}

View File

@ -72,6 +72,7 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@ -153,7 +154,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@ -2398,6 +2398,19 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
public:
LFastAllocate(LOperand* size, LOperand* temp) {
inputs_[0] = size;
temps_[0] = temp;
}
LOperand* size() const { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:

View File

@ -2486,7 +2486,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
} else if (bytes_left >= 0) {
} else {
DCHECK(bytes_left >= 0);
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,

View File

@ -1527,6 +1527,7 @@ void MacroAssembler::Allocate(int object_size,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1568,6 +1569,7 @@ void MacroAssembler::Allocate(int object_size,
// Calculate new top and bail out if space is exhausted.
Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
mov(top_reg, result);
}
@ -1575,8 +1577,10 @@ void MacroAssembler::Allocate(int object_size,
cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(top_reg, scratch, flags);
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
UpdateAllocationTopHelper(top_reg, scratch, flags);
}
if (top_reg.is(result)) {
sub(result, Immediate(object_size - kHeapObjectTag));
@ -1598,6 +1602,8 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1649,9 +1655,9 @@ void MacroAssembler::Allocate(int header_size,
} else {
DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
}
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
@ -1659,7 +1665,6 @@ void MacroAssembler::Allocate(int header_size,
DCHECK(kHeapObjectTag == 1);
inc(result);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
@ -1671,6 +1676,7 @@ void MacroAssembler::Allocate(Register object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1714,7 +1720,6 @@ void MacroAssembler::Allocate(Register object_size,
mov(result_end, object_size);
}
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
@ -1722,8 +1727,58 @@ void MacroAssembler::Allocate(Register object_size,
DCHECK(kHeapObjectTag == 1);
inc(result);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register result_end, AllocationFlags flags) {
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, no_reg, flags);
if ((flags & DOUBLE_ALIGNMENT) != 0) {
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
lea(result_end, Operand(result, object_size));
UpdateAllocationTopHelper(result_end, no_reg, flags);
DCHECK(kHeapObjectTag == 1);
inc(result);
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, AllocationFlags flags) {
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, no_reg, flags);
if ((flags & DOUBLE_ALIGNMENT) != 0) {
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
lea(result_end, Operand(result, object_size, times_1, 0));
UpdateAllocationTopHelper(result_end, no_reg, flags);
DCHECK(kHeapObjectTag == 1);
inc(result);
}

View File

@ -640,6 +640,14 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(int object_size, Register result, Register result_end,
AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_end,
AllocationFlags flags);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or

View File

@ -29,6 +29,10 @@ enum AllocationFlags {
DOUBLE_ALIGNMENT = 1 << 2,
// Directly allocate in old space
PRETENURE = 1 << 3,
// Allocation folding dominator
ALLOCATION_FOLDING_DOMINATOR = 1 << 4,
// Folded allocation
ALLOCATION_FOLDED = 1 << 5
};
#if V8_TARGET_ARCH_IA32

View File

@ -4085,6 +4085,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -4157,7 +4158,11 @@ void MacroAssembler::Allocate(int object_size,
// to calculate the new top.
Addu(result_end, result, Operand(object_size));
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
sw(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
sw(result_end, MemOperand(top_address));
}
// Tag object.
Addu(result, result, Operand(kHeapObjectTag));
@ -4167,6 +4172,7 @@ void MacroAssembler::Allocate(int object_size,
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -4239,6 +4245,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
Addu(result_end, result, Operand(object_size));
}
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
@ -4246,12 +4253,104 @@ void MacroAssembler::Allocate(Register object_size, Register result,
And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
}
sw(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
sw(result_end, MemOperand(top_address));
}
// Tag object.
Addu(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
DCHECK_EQ(0, object_size & kObjectAlignmentMask);
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address and allocation limit registers.
Register top_address = scratch1;
// This code stores a temporary value in t9.
Register result_end = scratch2;
li(top_address, Operand(allocation_top));
lw(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
And(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
Branch(&aligned, eq, result_end, Operand(zero_reg));
li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
sw(result_end, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
Addu(result_end, result, Operand(object_size));
// The top pointer is not updated for allocation folding dominators.
sw(result_end, MemOperand(top_address));
Addu(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, Register scratch,
AllocationFlags flags) {
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
DCHECK(!AreAliased(object_size, result, scratch, t9, at));
DCHECK(!AreAliased(result_end, result, scratch, t9, at));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
// This code stores a temporary value in t9.
li(top_address, Operand(allocation_top));
lw(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
And(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
Branch(&aligned, eq, result_end, Operand(zero_reg));
li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
sw(result_end, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
Lsa(result_end, result, object_size, kPointerSizeLog2);
} else {
Addu(result_end, result, Operand(object_size));
}
// The top pointer is not updated for allocation folding dominators.
sw(result_end, MemOperand(top_address));
Addu(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,

View File

@ -555,6 +555,15 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_new,
Register scratch, Label* gc_required, AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_new,
Register scratch, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,

View File

@ -4330,7 +4330,11 @@ void MacroAssembler::Allocate(int object_size,
// to calculate the new top.
Daddu(result_end, result, Operand(object_size));
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
sd(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
sd(result_end, MemOperand(top_address));
}
// Tag object.
Daddu(result, result, Operand(kHeapObjectTag));
@ -4403,6 +4407,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
Daddu(result_end, result, Operand(object_size));
}
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
@ -4410,12 +4415,91 @@ void MacroAssembler::Allocate(Register object_size, Register result,
And(at, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
}
sd(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
sd(result_end, MemOperand(top_address));
}
// Tag object if.
Daddu(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, at));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
DCHECK(0 == (object_size & kObjectAlignmentMask));
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
Register top_address = scratch1;
Register result_end = scratch2;
li(top_address, Operand(allocation_top));
ld(result, MemOperand(top_address));
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on MIPS64.
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
if (emit_debug_code()) {
And(at, result, Operand(kDoubleAlignmentMask));
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
}
// Calculate new top and write it back.
Daddu(result_end, result, Operand(object_size));
sd(result_end, MemOperand(top_address));
Daddu(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, Register scratch,
AllocationFlags flags) {
// |object_size| and |result_end| may overlap, other registers must not.
DCHECK(!AreAliased(object_size, result, scratch, at));
DCHECK(!AreAliased(result_end, result, scratch, at));
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address and object size registers.
Register top_address = scratch;
li(top_address, Operand(allocation_top));
ld(result, MemOperand(top_address));
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on MIPS64.
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
if (emit_debug_code()) {
And(at, result, Operand(kDoubleAlignmentMask));
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
}
// Calculate new top and write it back
if ((flags & SIZE_IN_WORDS) != 0) {
Dlsa(result_end, result, object_size, kPointerSizeLog2);
} else {
Daddu(result_end, result, Operand(object_size));
}
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
And(at, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
}
Daddu(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,

View File

@ -587,6 +587,15 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_new,
Register scratch, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,

View File

@ -4830,7 +4830,7 @@ void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
Label aligned;
testl(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
if (((flags & ALLOCATION_FOLDED) == 0) && ((flags & PRETENURE) != 0)) {
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
cmpp(result, ExternalOperand(allocation_limit));
@ -4873,6 +4873,7 @@ void MacroAssembler::Allocate(int object_size,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -4910,8 +4911,10 @@ void MacroAssembler::Allocate(int object_size,
cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(top_reg, scratch, flags);
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
UpdateAllocationTopHelper(top_reg, scratch, flags);
}
if (top_reg.is(result)) {
subp(result, Immediate(object_size - kHeapObjectTag));
@ -4932,6 +4935,8 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@ -4944,6 +4949,7 @@ void MacroAssembler::Allocate(Register object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -4966,25 +4972,58 @@ void MacroAssembler::Allocate(Register object_size,
MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) {
movp(result_end, object_size);
}
addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
// Tag the result.
addp(result, Immediate(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register result_end, AllocationFlags flags) {
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, no_reg, flags);
if ((flags & DOUBLE_ALIGNMENT) != 0) {
MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
}
leap(result_end, Operand(result, object_size));
UpdateAllocationTopHelper(result_end, no_reg, flags);
addp(result, Immediate(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, AllocationFlags flags) {
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, no_reg, flags);
if ((flags & DOUBLE_ALIGNMENT) != 0) {
MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
}
leap(result_end, Operand(result, object_size, times_1, 0));
UpdateAllocationTopHelper(result_end, no_reg, flags);
addp(result, Immediate(kHeapObjectTag));
}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,

View File

@ -1307,6 +1307,15 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(int object_size, Register result, Register result_end,
AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_end,
AllocationFlags flags);
// Allocate a heap number in new space with undefined value. Returns
// tagged pointer in result register, or jumps to gc_required if new
// space is full.

View File

@ -1469,6 +1469,7 @@ void MacroAssembler::Allocate(int object_size,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1510,6 +1511,7 @@ void MacroAssembler::Allocate(int object_size,
// Calculate new top and bail out if space is exhausted.
Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
mov(top_reg, result);
}
@ -1517,8 +1519,10 @@ void MacroAssembler::Allocate(int object_size,
cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(top_reg, scratch, flags);
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
UpdateAllocationTopHelper(top_reg, scratch, flags);
}
if (top_reg.is(result)) {
sub(result, Immediate(object_size - kHeapObjectTag));
@ -1540,6 +1544,8 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1605,7 +1611,6 @@ void MacroAssembler::Allocate(int header_size,
UpdateAllocationTopHelper(result_end, scratch, flags);
}
void MacroAssembler::Allocate(Register object_size,
Register result,
Register result_end,
@ -1613,6 +1618,7 @@ void MacroAssembler::Allocate(Register object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1656,7 +1662,6 @@ void MacroAssembler::Allocate(Register object_size,
mov(result_end, object_size);
}
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
@ -1664,10 +1669,59 @@ void MacroAssembler::Allocate(Register object_size,
DCHECK(kHeapObjectTag == 1);
inc(result);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register result_end, AllocationFlags flags) {
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, no_reg, flags);
if ((flags & DOUBLE_ALIGNMENT) != 0) {
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
lea(result_end, Operand(result, object_size));
UpdateAllocationTopHelper(result_end, no_reg, flags);
DCHECK(kHeapObjectTag == 1);
inc(result);
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, AllocationFlags flags) {
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, no_reg, flags);
if ((flags & DOUBLE_ALIGNMENT) != 0) {
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
lea(result_end, Operand(result, object_size, times_1, 0));
UpdateAllocationTopHelper(result_end, no_reg, flags);
DCHECK(kHeapObjectTag == 1);
inc(result);
}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,

View File

@ -629,6 +629,11 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
void FastAllocate(int object_size, Register result, Register result_end,
AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_end,
AllocationFlags flags);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or