diff --git a/src/heap/read-only-spaces.cc b/src/heap/read-only-spaces.cc index a88753edf9..2552d09667 100644 --- a/src/heap/read-only-spaces.cc +++ b/src/heap/read-only-spaces.cc @@ -387,6 +387,12 @@ HeapObject ReadOnlySpace::TryAllocateLinearlyAligned( Address new_top = current_top + filler_size + size_in_bytes; if (new_top > limit_) return HeapObject(); + // Allocation always occurs in the last chunk for RO_SPACE. + BasicMemoryChunk* chunk = pages_.back(); + int allocated_size = filler_size + size_in_bytes; + accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk); + chunk->IncreaseAllocatedBytes(allocated_size); + top_ = new_top; if (filler_size > 0) { return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), @@ -394,12 +400,6 @@ HeapObject ReadOnlySpace::TryAllocateLinearlyAligned( filler_size); } - // Allocation always occurs in the last chunk for RO_SPACE. - BasicMemoryChunk* chunk = pages_.back(); - int allocated_size = filler_size + size_in_bytes; - accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk); - chunk->IncreaseAllocatedBytes(allocated_size); - return HeapObject::FromAddress(current_top); } @@ -443,15 +443,14 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) { return object; } -AllocationResult ReadOnlySpace::AllocateRaw(size_t size_in_bytes, +AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes, AllocationAlignment alignment) { #ifdef V8_HOST_ARCH_32_BIT AllocationResult result = alignment != kWordAligned ? AllocateRawAligned(size_in_bytes, alignment) : AllocateRawUnaligned(size_in_bytes); #else - AllocationResult result = - AllocateRawUnaligned(static_cast(size_in_bytes)); + AllocationResult result = AllocateRawUnaligned(size_in_bytes); #endif HeapObject heap_obj; if (!result.IsRetry() && result.To(&heap_obj)) { diff --git a/src/heap/read-only-spaces.h b/src/heap/read-only-spaces.h index ae2e685944..3a510081f3 100644 --- a/src/heap/read-only-spaces.h +++ b/src/heap/read-only-spaces.h @@ -89,7 +89,7 @@ class ReadOnlySpace : public BaseSpace { bool Contains(Object o) = delete; V8_EXPORT_PRIVATE - AllocationResult AllocateRaw(size_t size_in_bytes, + AllocationResult AllocateRaw(int size_in_bytes, AllocationAlignment alignment); V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded(); diff --git a/test/cctest/heap/test-spaces.cc b/test/cctest/heap/test-spaces.cc index 50af682c8c..7e23cdf3dd 100644 --- a/test/cctest/heap/test-spaces.cc +++ b/test/cctest/heap/test-spaces.cc @@ -817,15 +817,68 @@ TEST(ReadOnlySpaceMetrics_OnePage) { // Allocated objects size. CHECK_EQ(faked_space.Size(), 16); - // Capacity will be one OS page minus the page header. - CHECK_EQ(faked_space.Capacity(), - allocator->GetCommitPageSize() - - MemoryChunkLayout::ObjectStartOffsetInDataPage()); - // Amount of OS allocated memory. CHECK_EQ(faked_space.CommittedMemory(), allocator->GetCommitPageSize()); CHECK_EQ(faked_space.CommittedPhysicalMemory(), allocator->GetCommitPageSize()); + + // Capacity will be one OS page minus the page header. + CHECK_EQ(faked_space.Capacity(), + allocator->GetCommitPageSize() - + MemoryChunkLayout::ObjectStartOffsetInDataPage()); +} + +TEST(ReadOnlySpaceMetrics_AlignedAllocations) { + Isolate* isolate = CcTest::i_isolate(); + Heap* heap = isolate->heap(); + + // Create a read-only space and allocate some memory, shrink the pages and + // check the allocated object size is as expected. + + ReadOnlySpace faked_space(heap); + + // Initially no memory. + CHECK_EQ(faked_space.Size(), 0); + CHECK_EQ(faked_space.Capacity(), 0); + CHECK_EQ(faked_space.CommittedMemory(), 0); + CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0); + + MemoryAllocator* allocator = heap->memory_allocator(); + // Allocate an object just under an OS page in size. + int object_size = + static_cast(allocator->GetCommitPageSize() - kApiTaggedSize); + +// TODO(v8:8875): Pointer compression does not enable aligned memory allocation +// yet. +#ifdef V8_COMPRESS_POINTERS + int alignment = kInt32Size; +#else + int alignment = kDoubleSize; +#endif + + HeapObject object = + faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked(); + CHECK_EQ(object.address() % alignment, 0); + object = + faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked(); + CHECK_EQ(object.address() % alignment, 0); + + faked_space.ShrinkPages(); + faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap); + + // Allocated objects size may will contain 4 bytes of padding on 32-bit or + // with pointer compression. + CHECK_EQ(faked_space.Size(), object_size + RoundUp(object_size, alignment)); + + // Amount of OS allocated memory will be 3 OS pages. + CHECK_EQ(faked_space.CommittedMemory(), 3 * allocator->GetCommitPageSize()); + CHECK_EQ(faked_space.CommittedPhysicalMemory(), + 3 * allocator->GetCommitPageSize()); + + // Capacity will be 3 OS pages minus the page header. + CHECK_EQ(faked_space.Capacity(), + 3 * allocator->GetCommitPageSize() - + MemoryChunkLayout::ObjectStartOffsetInDataPage()); } TEST(ReadOnlySpaceMetrics_TwoPages) { @@ -846,8 +899,9 @@ TEST(ReadOnlySpaceMetrics_TwoPages) { MemoryAllocator* allocator = heap->memory_allocator(); // Allocate an object that's too big to have more than one on a page. - size_t object_size = - MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE) / 2 + 16; + + int object_size = static_cast( + MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE) / 2 + 16); CHECK_GT(object_size * 2, MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)); faked_space.AllocateRaw(object_size, kWordAligned);