Revert of Clean up aligned allocation code in preparation for SIMD alignments. (patchset #14 id:300001 of https://codereview.chromium.org/1150593003/)

Reason for revert:
Breaks mjsunit, webkit, mozilla, benchmarks.

TBR=hpayer@chromium.org

Original issue's description:
> Clean up aligned allocation code in preparation for SIMD alignments.
>
> Moves alignment fill calculations into two static Heap methods.
> Adds a Heap method to handle the complex case where filler is potentially needed before and after a heap object.
> Makes DoubleAlignForDeserialization explicitly fill after an already
> aligned object.
>
> LOG=N
> BUG=v8:4124
>
> Committed: https://crrev.com/fcfb080eb9a637f0ae066bed4c45095e60df8a84
> Cr-Commit-Position: refs/heads/master@{#28687}

TBR=hpayer@chromium.org,bmeurer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=v8:4124

Review URL: https://codereview.chromium.org/1159123002

Cr-Commit-Position: refs/heads/master@{#28688}
This commit is contained in:
bbudge 2015-05-28 11:17:44 -07:00 committed by Commit bot
parent fcfb080eb9
commit 3ee926e2a6
6 changed files with 80 additions and 245 deletions

View File

@ -1986,54 +1986,31 @@ STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
#endif
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
switch (alignment) {
case kWordAligned:
return 0;
case kDoubleAligned:
case kDoubleUnaligned:
return kDoubleSize - kPointerSize;
default:
UNREACHABLE();
HeapObject* Heap::EnsureAligned(HeapObject* object, int size,
AllocationAlignment alignment) {
if (alignment == kDoubleAligned &&
(OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(object->address()) & kDoubleAlignmentMask) == 0) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else {
CreateFillerObjectAt(object->address() + size - kPointerSize, kPointerSize);
return object;
}
return 0;
}
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
intptr_t offset = OffsetFrom(address);
if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
return kPointerSize;
if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
return kDoubleSize - kPointerSize; // No fill if double is always aligned.
return 0;
}
HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
CreateFillerObjectAt(object->address(), filler_size);
return HeapObject::FromAddress(object->address() + filler_size);
}
HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
int allocation_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
DCHECK(filler_size > 0);
int pre_filler = GetFillToAlign(object->address(), alignment);
if (pre_filler) {
object = PrecedeWithFiller(object, pre_filler);
filler_size -= pre_filler;
}
if (filler_size)
CreateFillerObjectAt(object->address() + object_size, filler_size);
return object;
HeapObject* Heap::PrecedeWithFiller(HeapObject* object) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
}
HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
return AlignWithFiller(object, size, size + kPointerSize, kDoubleAligned);
return EnsureAligned(object, size, kDoubleAligned);
}

View File

@ -716,23 +716,13 @@ class Heap {
MUST_USE_RESULT AllocationResult
CopyJSObject(JSObject* source, AllocationSite* site = NULL);
// Calculates the maximum amount of filler that could be required by the
// given alignment.
static int GetMaximumFillToAlign(AllocationAlignment alignment);
// Calculates the actual amount of filler required for a given address at the
// given alignment.
static int GetFillToAlign(Address address, AllocationAlignment alignment);
// This method assumes overallocation of one word. It will store a filler
// before the object if the given object is not double aligned, otherwise
// it will place the filler after the object.
MUST_USE_RESULT HeapObject* EnsureAligned(HeapObject* object, int size,
AllocationAlignment alignment);
// Creates a filler object and returns a heap object immediately after it.
MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
int filler_size);
// Creates a filler object if needed for alignment and returns a heap object
// immediately after it. If any space is left after the returned object,
// another filler object is created so the over allocated memory is iterable.
MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
int object_size,
int allocation_size,
AllocationAlignment alignment);
MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object);
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();

View File

@ -250,21 +250,28 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
}
HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
HeapObject* PagedSpace::AllocateLinearlyAligned(int size_in_bytes,
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int alignment_size = 0;
Address new_top = current_top + filler_size + *size_in_bytes;
if (alignment == kDoubleAligned &&
(OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
alignment_size = kPointerSize;
size_in_bytes += alignment_size;
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(current_top) & kDoubleAlignmentMask) == 0) {
alignment_size = kPointerSize;
size_in_bytes += alignment_size;
}
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
filler_size);
if (alignment_size > 0) {
return heap()->EnsureAligned(HeapObject::FromAddress(current_top),
size_in_bytes, alignment);
}
return HeapObject::FromAddress(current_top);
}
@ -296,26 +303,21 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
int allocation_size = size_in_bytes;
HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
HeapObject* object = AllocateLinearlyAligned(size_in_bytes, alignment);
int aligned_size_in_bytes = size_in_bytes + kPointerSize;
if (object == NULL) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
object = free_list_.Allocate(allocation_size);
object = free_list_.Allocate(aligned_size_in_bytes);
if (object == NULL) {
object = SlowAllocateRaw(allocation_size);
object = SlowAllocateRaw(aligned_size_in_bytes);
}
if (object != NULL && filler_size != 0) {
object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
alignment);
if (object != NULL) {
object = heap()->EnsureAligned(object, aligned_size_in_bytes, alignment);
}
}
if (object != NULL) {
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
}
@ -342,8 +344,19 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
int alignment_size = 0;
int aligned_size_in_bytes = 0;
// If double alignment is required and top pointer is not aligned, we allocate
// additional memory to take care of the alignment.
if (alignment == kDoubleAligned &&
(OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
alignment_size += kPointerSize;
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(old_top) & kDoubleAlignmentMask) == 0) {
alignment_size += kPointerSize;
}
aligned_size_in_bytes = size_in_bytes + alignment_size;
if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
return SlowAllocateRaw(size_in_bytes, alignment);
@ -353,13 +366,16 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
if (alignment_size > 0) {
obj = heap()->PrecedeWithFiller(obj);
}
// The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
DCHECK((kDoubleAligned && (OffsetFrom(obj) & kDoubleAlignmentMask) == 0) ||
(kDoubleUnaligned && (OffsetFrom(obj) & kDoubleAlignmentMask) != 0));
return obj;
}

View File

@ -1455,28 +1455,33 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
int alignment_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + alignment_size;
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
int aligned_size = size_in_bytes;
aligned_size += (alignment != kWordAligned) ? kPointerSize : 0;
Address new_top = old_top + aligned_size;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
UpdateInlineAllocationLimit(aligned_size);
top_on_previous_step_ = new_top;
if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
return AllocateRawAligned(size_in_bytes, alignment);
if (alignment == kDoubleAligned)
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRawUnaligned(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
return AllocateRawAligned(size_in_bytes, alignment);
if (alignment == kDoubleAligned)
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRawUnaligned(size_in_bytes);
} else {
return AllocationResult::Retry();
}

View File

@ -1931,10 +1931,9 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
// Generic fast case allocation function that tries aligned linear allocation
// at the address denoted by top in allocation_info_. Writes the aligned
// allocation size, which includes the filler size, to size_in_bytes.
inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
// Generic fast case allocation function that tries double aligned linear
// allocation at the address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearlyAligned(int size_in_bytes,
AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is

View File

@ -1784,158 +1784,6 @@ TEST(TestSizeOfObjects) {
}
TEST(TestAlignmentCalculations) {
// Maximum fill amounts should be consistent.
int maximum_double_misalignment = kDoubleSize - kPointerSize;
int max_word_fill = Heap::GetMaximumFillToAlign(kWordAligned);
CHECK_EQ(0, max_word_fill);
int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned);
CHECK_EQ(maximum_double_misalignment, max_double_fill);
int max_double_unaligned_fill = Heap::GetMaximumFillToAlign(kDoubleUnaligned);
CHECK_EQ(maximum_double_misalignment, max_double_unaligned_fill);
Address base = reinterpret_cast<Address>(NULL);
int fill = 0;
// Word alignment never requires fill.
fill = Heap::GetFillToAlign(base, kWordAligned);
CHECK_EQ(0, fill);
fill = Heap::GetFillToAlign(base + kPointerSize, kWordAligned);
CHECK_EQ(0, fill);
// No fill is required when address is double aligned.
fill = Heap::GetFillToAlign(base, kDoubleAligned);
CHECK_EQ(0, fill);
// Fill is required if address is not double aligned.
fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleAligned);
CHECK_EQ(maximum_double_misalignment, fill);
// kDoubleUnaligned has the opposite fill amounts.
fill = Heap::GetFillToAlign(base, kDoubleUnaligned);
CHECK_EQ(maximum_double_misalignment, fill);
fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleUnaligned);
CHECK_EQ(0, fill);
}
static HeapObject* NewSpaceAllocateAligned(int size,
AllocationAlignment alignment) {
Heap* heap = CcTest::heap();
AllocationResult allocation =
heap->new_space()->AllocateRawAligned(size, alignment);
HeapObject* obj = NULL;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj->address(), size);
return obj;
}
TEST(TestAlignedAllocation) {
// Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
const intptr_t double_misalignment = kDoubleSize - kPointerSize;
if (double_misalignment) {
// Allocate a pointer sized object that must be double aligned.
Address* top_addr = CcTest::heap()->new_space()->allocation_top_address();
Address start = *top_addr;
HeapObject* obj1 = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned);
CHECK(IsAddressAligned(obj1->address(), kDoubleAlignment));
// Allocate a second pointer sized object. These two allocations should
// cause exactly one filler object to be created.
HeapObject* obj2 = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned);
CHECK(IsAddressAligned(obj2->address(), kDoubleAlignment));
CHECK_EQ(2 * kPointerSize + double_misalignment, *top_addr - start);
// There should be 3 filler objects now (the two HeapObjects we created and
// the filler.)
CHECK(HeapObject::FromAddress(start)->IsFiller() &&
HeapObject::FromAddress(start + kPointerSize)->IsFiller() &&
HeapObject::FromAddress(start + 2 * kPointerSize)->IsFiller());
// Similarly for kDoubleUnaligned.
start = *top_addr;
obj1 = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
CHECK(IsAddressAligned(obj1->address(), kDoubleAlignment, kPointerSize));
obj2 = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
CHECK(IsAddressAligned(obj2->address(), kDoubleAlignment, kPointerSize));
CHECK_EQ(2 * kPointerSize + double_misalignment, *top_addr - start);
CHECK(HeapObject::FromAddress(start)->IsFiller() &&
HeapObject::FromAddress(start + kPointerSize)->IsFiller() &&
HeapObject::FromAddress(start + 2 * kPointerSize)->IsFiller());
}
}
// Force allocation to happen from the free list, at a desired misalignment.
static Address SetUpFreeListAllocation(int misalignment) {
Heap* heap = CcTest::heap();
OldSpace* old_space = heap->old_space();
Address top = old_space->top();
// First, allocate enough filler to get the linear area into the desired
// misalignment.
const intptr_t maximum_misalignment = 2 * kPointerSize;
const intptr_t maximum_misalignment_mask = maximum_misalignment - 1;
intptr_t top_alignment = OffsetFrom(top) & maximum_misalignment_mask;
int filler_size = misalignment - static_cast<int>(top_alignment);
if (filler_size < 0) filler_size += maximum_misalignment;
if (filler_size) {
// Create the filler object.
AllocationResult allocation = old_space->AllocateRawUnaligned(filler_size);
HeapObject* obj = NULL;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj->address(), filler_size);
}
top = old_space->top();
old_space->EmptyAllocationInfo();
return top;
}
static HeapObject* OldSpaceAllocateAligned(int size,
AllocationAlignment alignment) {
Heap* heap = CcTest::heap();
AllocationResult allocation =
heap->old_space()->AllocateRawAligned(size, alignment);
HeapObject* obj = NULL;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj->address(), size);
return obj;
}
// Test the case where allocation must be done from the free list, so filler
// may precede or follow the object.
TEST(TestAlignedOverAllocation) {
// Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
const intptr_t double_misalignment = kDoubleSize - kPointerSize;
if (double_misalignment) {
Address start = SetUpFreeListAllocation(0);
HeapObject* obj1 = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
// The object should be aligned, and a filler object should be created.
CHECK(IsAddressAligned(obj1->address(), kDoubleAlignment));
CHECK(HeapObject::FromAddress(start)->IsFiller() &&
HeapObject::FromAddress(start + kPointerSize)->IsFiller());
// Try the opposite alignment case.
start = SetUpFreeListAllocation(kPointerSize);
HeapObject* obj2 = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
CHECK(IsAddressAligned(obj2->address(), kDoubleAlignment));
CHECK(HeapObject::FromAddress(start)->IsFiller() &&
HeapObject::FromAddress(start + kPointerSize)->IsFiller());
// Similarly for kDoubleUnaligned.
start = SetUpFreeListAllocation(0);
obj1 = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
// The object should be aligned, and a filler object should be created.
CHECK(IsAddressAligned(obj1->address(), kDoubleAlignment, kPointerSize));
CHECK(HeapObject::FromAddress(start)->IsFiller() &&
HeapObject::FromAddress(start + kPointerSize)->IsFiller());
// Try the opposite alignment case.
start = SetUpFreeListAllocation(kPointerSize);
obj2 = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
CHECK(IsAddressAligned(obj2->address(), kDoubleAlignment, kPointerSize));
CHECK(HeapObject::FromAddress(start)->IsFiller() &&
HeapObject::FromAddress(start + kPointerSize)->IsFiller());
}
}
TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
CcTest::InitializeVM();
HeapIterator iterator(CcTest::heap());