S390: [crankshaft] Fragmentation-free allocation folding.

Port 61f5fbbb19
Port 6e15433db4

Original commit message:

      The new allocation folding implementation avoids fragmentation between folded allocation.
      As a consequence, our heap will always be iterable i.e. we do not have to perform a
      garbage collection before iterating the heap.

R=hpayer@chromium.org, joransiu@ca.ibm.com, bjaideep@ca.ibm.com, michael_dawson@ca.ibm.com, mbrandy@us.ibm.com
BUG=

Review-Url: https://codereview.chromium.org/1973883003
Cr-Commit-Position: refs/heads/master@{#36243}
This commit is contained in:
jyan 2016-05-13 06:44:12 -07:00 committed by Commit bot
parent cc340be945
commit 41dbaefb4a
5 changed files with 197 additions and 7 deletions

View File

@ -5257,6 +5257,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
DCHECK(!instr->hydrogen()->IsAllocationFolded());
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@ -5335,6 +5341,49 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
instr->context());
__ StoreToSafepointRegisterSlot(r2, result);
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
Register top_address = scratch0();
__ SubP(r2, r2, Operand(kHeapObjectTag));
__ mov(top_address, Operand(allocation_top));
__ StoreP(r2, MemOperand(top_address));
__ AddP(r2, r2, Operand(kHeapObjectTag));
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
Register result = ToRegister(instr->result());
Register scratch1 = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
AllocationFlags flags = ALLOCATION_FOLDED;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, scratch1, scratch2, flags);
}
}
void LCodeGen::DoTypeof(LTypeof* instr) {

View File

@ -2124,13 +2124,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
if (instr->IsAllocationFolded()) {
LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
return DefineAsRegister(result);
} else {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {

View File

@ -67,6 +67,7 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@ -2165,6 +2166,22 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
public:
LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[0] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* size() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {

View File

@ -1706,6 +1706,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1767,7 +1768,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
AndP(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned);
beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
CmpLogicalP(result, alloc_limit);
bge(gc_required);
@ -1792,7 +1793,11 @@ void MacroAssembler::Allocate(int object_size, Register result,
blt(gc_required);
AddP(result_end, result, result_end);
}
StoreP(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
}
// Tag object.
AddP(result, result, Operand(kHeapObjectTag));
@ -1801,6 +1806,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1858,7 +1864,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
AndP(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned);
beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
CmpLogicalP(result, alloc_limit);
bge(gc_required);
@ -1890,6 +1896,110 @@ void MacroAssembler::Allocate(Register object_size, Register result,
AndP(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
}
// Tag object.
AddP(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, Register scratch,
AllocationFlags flags) {
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
DCHECK(!AreAliased(object_size, result, scratch, ip));
DCHECK(!AreAliased(result_end, result, scratch, ip));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
Register top_address = scratch;
mov(top_address, Operand(allocation_top));
LoadP(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
AndP(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, Label::kNear);
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
StoreW(result_end, MemOperand(result));
AddP(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
}
// Calculate new top using result. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
AddP(result_end, result, result_end);
} else {
AddP(result_end, result, object_size);
}
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
AndP(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
StoreP(result_end, MemOperand(top_address));
// Tag object.
AddP(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
DCHECK_EQ(0, object_size & kObjectAlignmentMask);
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address register.
Register top_address = scratch1;
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
LoadP(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
AndP(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, Label::kNear);
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
StoreW(result_end, MemOperand(result));
AddP(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
}
// Calculate new top using result.
AddP(result_end, result, Operand(object_size));
// The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
// Tag object.

View File

@ -965,6 +965,15 @@ class MacroAssembler : public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);