[heap] Fix read-only space metrics for aligned allocations

Fix returning from TryAllocateLinearlyAligned without updating the
allocation stats if a preceding filler was required. Also makes
AllocateRaw take an int instead of size_t in line with other Spaces.

Bug: v8:8875, chromium:1097389
Change-Id: If0932caa94dce1cd45b41f44fa225a2007772ea1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2264354
Auto-Submit: Dan Elphick <delphick@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68516}
This commit is contained in:
Dan Elphick 2020-06-24 14:47:32 +01:00 committed by Commit Bot
parent 98a9c44be9
commit 3e3403ea0d
3 changed files with 70 additions and 17 deletions

View File

@ -387,6 +387,12 @@ HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
Address new_top = current_top + filler_size + size_in_bytes;
if (new_top > limit_) return HeapObject();
// Allocation always occurs in the last chunk for RO_SPACE.
BasicMemoryChunk* chunk = pages_.back();
int allocated_size = filler_size + size_in_bytes;
accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
chunk->IncreaseAllocatedBytes(allocated_size);
top_ = new_top;
if (filler_size > 0) {
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
@ -394,12 +400,6 @@ HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
filler_size);
}
// Allocation always occurs in the last chunk for RO_SPACE.
BasicMemoryChunk* chunk = pages_.back();
int allocated_size = filler_size + size_in_bytes;
accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
chunk->IncreaseAllocatedBytes(allocated_size);
return HeapObject::FromAddress(current_top);
}
@ -443,15 +443,14 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
return object;
}
AllocationResult ReadOnlySpace::AllocateRaw(size_t size_in_bytes,
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result = alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
#else
AllocationResult result =
AllocateRawUnaligned(static_cast<int>(size_in_bytes));
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj)) {

View File

@ -89,7 +89,7 @@ class ReadOnlySpace : public BaseSpace {
bool Contains(Object o) = delete;
V8_EXPORT_PRIVATE
AllocationResult AllocateRaw(size_t size_in_bytes,
AllocationResult AllocateRaw(int size_in_bytes,
AllocationAlignment alignment);
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();

View File

@ -817,15 +817,68 @@ TEST(ReadOnlySpaceMetrics_OnePage) {
// Allocated objects size.
CHECK_EQ(faked_space.Size(), 16);
// Capacity will be one OS page minus the page header.
CHECK_EQ(faked_space.Capacity(),
allocator->GetCommitPageSize() -
MemoryChunkLayout::ObjectStartOffsetInDataPage());
// Amount of OS allocated memory.
CHECK_EQ(faked_space.CommittedMemory(), allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedPhysicalMemory(),
allocator->GetCommitPageSize());
// Capacity will be one OS page minus the page header.
CHECK_EQ(faked_space.Capacity(),
allocator->GetCommitPageSize() -
MemoryChunkLayout::ObjectStartOffsetInDataPage());
}
TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
// Create a read-only space and allocate some memory, shrink the pages and
// check the allocated object size is as expected.
ReadOnlySpace faked_space(heap);
// Initially no memory.
CHECK_EQ(faked_space.Size(), 0);
CHECK_EQ(faked_space.Capacity(), 0);
CHECK_EQ(faked_space.CommittedMemory(), 0);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);
MemoryAllocator* allocator = heap->memory_allocator();
// Allocate an object just under an OS page in size.
int object_size =
static_cast<int>(allocator->GetCommitPageSize() - kApiTaggedSize);
// TODO(v8:8875): Pointer compression does not enable aligned memory allocation
// yet.
#ifdef V8_COMPRESS_POINTERS
int alignment = kInt32Size;
#else
int alignment = kDoubleSize;
#endif
HeapObject object =
faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
CHECK_EQ(object.address() % alignment, 0);
object =
faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
CHECK_EQ(object.address() % alignment, 0);
faked_space.ShrinkPages();
faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
// Allocated objects size may will contain 4 bytes of padding on 32-bit or
// with pointer compression.
CHECK_EQ(faked_space.Size(), object_size + RoundUp(object_size, alignment));
// Amount of OS allocated memory will be 3 OS pages.
CHECK_EQ(faked_space.CommittedMemory(), 3 * allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedPhysicalMemory(),
3 * allocator->GetCommitPageSize());
// Capacity will be 3 OS pages minus the page header.
CHECK_EQ(faked_space.Capacity(),
3 * allocator->GetCommitPageSize() -
MemoryChunkLayout::ObjectStartOffsetInDataPage());
}
TEST(ReadOnlySpaceMetrics_TwoPages) {
@ -846,8 +899,9 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
MemoryAllocator* allocator = heap->memory_allocator();
// Allocate an object that's too big to have more than one on a page.
size_t object_size =
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE) / 2 + 16;
int object_size = static_cast<int>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE) / 2 + 16);
CHECK_GT(object_size * 2,
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE));
faked_space.AllocateRaw(object_size, kWordAligned);