[heap] More explicit fast path for new space allocation
Introduce explicit fast path for allocation from LAB. The slow path refills the LAB and allocates again. Other changes: 1) Move slow path methods out of the header file 2) AllocateRaw(Aligned|Unaligned) are now private methods. All allocations need to go through AllocateRaw for NewSpace now. Bug: v8:10315 Change-Id: Iee2bd7b74aa49be8b20d89fefeb2e087575d532c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2319987 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org> Cr-Commit-Position: refs/heads/master@{#69061}
This commit is contained in:
parent
f32972f870
commit
9d4dcce70c
@ -1934,7 +1934,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
|
|||||||
AllocateRaw(size, type, AllocationOrigin::kRuntime, align);
|
AllocateRaw(size, type, AllocationOrigin::kRuntime, align);
|
||||||
#else
|
#else
|
||||||
if (space == NEW_SPACE) {
|
if (space == NEW_SPACE) {
|
||||||
allocation = new_space()->AllocateRawUnaligned(size);
|
allocation = new_space()->AllocateRaw(
|
||||||
|
size, AllocationAlignment::kWordAligned);
|
||||||
} else if (space == RO_SPACE) {
|
} else if (space == RO_SPACE) {
|
||||||
allocation = read_only_space()->AllocateRaw(
|
allocation = read_only_space()->AllocateRaw(
|
||||||
size, AllocationAlignment::kWordAligned);
|
size, AllocationAlignment::kWordAligned);
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
#ifndef V8_HEAP_NEW_SPACES_INL_H_
|
#ifndef V8_HEAP_NEW_SPACES_INL_H_
|
||||||
#define V8_HEAP_NEW_SPACES_INL_H_
|
#define V8_HEAP_NEW_SPACES_INL_H_
|
||||||
|
|
||||||
|
#include "src/common/globals.h"
|
||||||
|
#include "src/heap/heap.h"
|
||||||
#include "src/heap/new-spaces.h"
|
#include "src/heap/new-spaces.h"
|
||||||
#include "src/heap/spaces-inl.h"
|
#include "src/heap/spaces-inl.h"
|
||||||
#include "src/objects/tagged-impl.h"
|
#include "src/objects/tagged-impl.h"
|
||||||
@ -82,7 +84,45 @@ HeapObject SemiSpaceObjectIterator::Next() {
|
|||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// NewSpace
|
// NewSpace
|
||||||
|
|
||||||
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
|
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
|
||||||
|
AllocationAlignment alignment,
|
||||||
|
AllocationOrigin origin) {
|
||||||
|
AllocationResult result;
|
||||||
|
|
||||||
|
if (alignment != kWordAligned) {
|
||||||
|
result = AllocateFastAligned(size_in_bytes, alignment, origin);
|
||||||
|
} else {
|
||||||
|
result = AllocateFastUnaligned(size_in_bytes, origin);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!result.IsRetry()) {
|
||||||
|
return result;
|
||||||
|
} else {
|
||||||
|
return AllocateRawSlow(size_in_bytes, alignment, origin);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
|
||||||
|
AllocationOrigin origin) {
|
||||||
|
Address top = allocation_info_.top();
|
||||||
|
if (allocation_info_.limit() < top + size_in_bytes) {
|
||||||
|
return AllocationResult::Retry();
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapObject obj = HeapObject::FromAddress(top);
|
||||||
|
allocation_info_.set_top(top + size_in_bytes);
|
||||||
|
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||||
|
|
||||||
|
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
|
||||||
|
|
||||||
|
if (FLAG_trace_allocations_origins) {
|
||||||
|
UpdateAllocationOrigins(origin);
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocationResult NewSpace::AllocateFastAligned(int size_in_bytes,
|
||||||
AllocationAlignment alignment,
|
AllocationAlignment alignment,
|
||||||
AllocationOrigin origin) {
|
AllocationOrigin origin) {
|
||||||
Address top = allocation_info_.top();
|
Address top = allocation_info_.top();
|
||||||
@ -91,16 +131,9 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
|
|||||||
|
|
||||||
if (allocation_info_.limit() - top <
|
if (allocation_info_.limit() - top <
|
||||||
static_cast<uintptr_t>(aligned_size_in_bytes)) {
|
static_cast<uintptr_t>(aligned_size_in_bytes)) {
|
||||||
// See if we can create room.
|
|
||||||
if (!EnsureAllocation(size_in_bytes, alignment)) {
|
|
||||||
return AllocationResult::Retry();
|
return AllocationResult::Retry();
|
||||||
}
|
}
|
||||||
|
|
||||||
top = allocation_info_.top();
|
|
||||||
filler_size = Heap::GetFillToAlign(top, alignment);
|
|
||||||
aligned_size_in_bytes = size_in_bytes + filler_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapObject obj = HeapObject::FromAddress(top);
|
HeapObject obj = HeapObject::FromAddress(top);
|
||||||
allocation_info_.set_top(top + aligned_size_in_bytes);
|
allocation_info_.set_top(top + aligned_size_in_bytes);
|
||||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||||
@ -118,55 +151,6 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
|
|||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
|
|
||||||
AllocationOrigin origin) {
|
|
||||||
Address top = allocation_info_.top();
|
|
||||||
if (allocation_info_.limit() < top + size_in_bytes) {
|
|
||||||
// See if we can create room.
|
|
||||||
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
|
|
||||||
return AllocationResult::Retry();
|
|
||||||
}
|
|
||||||
|
|
||||||
top = allocation_info_.top();
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapObject obj = HeapObject::FromAddress(top);
|
|
||||||
allocation_info_.set_top(top + size_in_bytes);
|
|
||||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
|
||||||
|
|
||||||
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
|
|
||||||
|
|
||||||
if (FLAG_trace_allocations_origins) {
|
|
||||||
UpdateAllocationOrigins(origin);
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
|
|
||||||
AllocationAlignment alignment,
|
|
||||||
AllocationOrigin origin) {
|
|
||||||
if (top() < top_on_previous_step_) {
|
|
||||||
// Generated code decreased the top() pointer to do folded allocations
|
|
||||||
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
|
|
||||||
Page::FromAllocationAreaAddress(top_on_previous_step_));
|
|
||||||
top_on_previous_step_ = top();
|
|
||||||
}
|
|
||||||
#ifdef V8_HOST_ARCH_32_BIT
|
|
||||||
return alignment != kWordAligned
|
|
||||||
? AllocateRawAligned(size_in_bytes, alignment, origin)
|
|
||||||
: AllocateRawUnaligned(size_in_bytes, origin);
|
|
||||||
#else
|
|
||||||
#ifdef V8_COMPRESS_POINTERS
|
|
||||||
// TODO(ishell, v8:8875): Consider using aligned allocations once the
|
|
||||||
// allocation alignment inconsistency is fixed. For now we keep using
|
|
||||||
// unaligned access since both x64 and arm64 architectures (where pointer
|
|
||||||
// compression is supported) allow unaligned access to doubles and full words.
|
|
||||||
#endif // V8_COMPRESS_POINTERS
|
|
||||||
return AllocateRawUnaligned(size_in_bytes, origin);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
|
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
|
||||||
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
|
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
|
||||||
base::MutexGuard guard(&mutex_);
|
base::MutexGuard guard(&mutex_);
|
||||||
|
@ -563,6 +563,91 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
|
|||||||
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
|
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
|
||||||
|
AllocationAlignment alignment,
|
||||||
|
AllocationOrigin origin) {
|
||||||
|
if (top() < top_on_previous_step_) {
|
||||||
|
// Generated code decreased the top() pointer to do folded allocations
|
||||||
|
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
|
||||||
|
Page::FromAllocationAreaAddress(top_on_previous_step_));
|
||||||
|
top_on_previous_step_ = top();
|
||||||
|
}
|
||||||
|
#ifdef V8_HOST_ARCH_32_BIT
|
||||||
|
return alignment != kWordAligned
|
||||||
|
? AllocateRawAligned(size_in_bytes, alignment, origin)
|
||||||
|
: AllocateRawUnaligned(size_in_bytes, origin);
|
||||||
|
#else
|
||||||
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
|
// TODO(ishell, v8:8875): Consider using aligned allocations once the
|
||||||
|
// allocation alignment inconsistency is fixed. For now we keep using
|
||||||
|
// unaligned access since both x64 and arm64 architectures (where pointer
|
||||||
|
// compression is supported) allow unaligned access to doubles and full words.
|
||||||
|
#endif // V8_COMPRESS_POINTERS
|
||||||
|
return AllocateRawUnaligned(size_in_bytes, origin);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
|
||||||
|
AllocationOrigin origin) {
|
||||||
|
Address top = allocation_info_.top();
|
||||||
|
if (allocation_info_.limit() < top + size_in_bytes) {
|
||||||
|
// See if we can create room.
|
||||||
|
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
|
||||||
|
return AllocationResult::Retry();
|
||||||
|
}
|
||||||
|
|
||||||
|
top = allocation_info_.top();
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapObject obj = HeapObject::FromAddress(top);
|
||||||
|
allocation_info_.set_top(top + size_in_bytes);
|
||||||
|
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||||
|
|
||||||
|
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
|
||||||
|
|
||||||
|
if (FLAG_trace_allocations_origins) {
|
||||||
|
UpdateAllocationOrigins(origin);
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
|
||||||
|
AllocationAlignment alignment,
|
||||||
|
AllocationOrigin origin) {
|
||||||
|
Address top = allocation_info_.top();
|
||||||
|
int filler_size = Heap::GetFillToAlign(top, alignment);
|
||||||
|
int aligned_size_in_bytes = size_in_bytes + filler_size;
|
||||||
|
|
||||||
|
if (allocation_info_.limit() - top <
|
||||||
|
static_cast<uintptr_t>(aligned_size_in_bytes)) {
|
||||||
|
// See if we can create room.
|
||||||
|
if (!EnsureAllocation(size_in_bytes, alignment)) {
|
||||||
|
return AllocationResult::Retry();
|
||||||
|
}
|
||||||
|
|
||||||
|
top = allocation_info_.top();
|
||||||
|
filler_size = Heap::GetFillToAlign(top, alignment);
|
||||||
|
aligned_size_in_bytes = size_in_bytes + filler_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapObject obj = HeapObject::FromAddress(top);
|
||||||
|
allocation_info_.set_top(top + aligned_size_in_bytes);
|
||||||
|
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||||
|
|
||||||
|
if (filler_size > 0) {
|
||||||
|
obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
|
||||||
|
|
||||||
|
if (FLAG_trace_allocations_origins) {
|
||||||
|
UpdateAllocationOrigins(origin);
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef VERIFY_HEAP
|
#ifdef VERIFY_HEAP
|
||||||
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
|
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
|
||||||
// that it works (it depends on the invariants we are checking).
|
// that it works (it depends on the invariants we are checking).
|
||||||
|
@ -391,13 +391,6 @@ class V8_EXPORT_PRIVATE NewSpace
|
|||||||
// Set the age mark in the active semispace.
|
// Set the age mark in the active semispace.
|
||||||
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
|
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
|
||||||
|
|
||||||
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
|
|
||||||
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
|
|
||||||
AllocationOrigin origin = AllocationOrigin::kRuntime);
|
|
||||||
|
|
||||||
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
|
|
||||||
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
|
|
||||||
|
|
||||||
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
|
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
|
||||||
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
|
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
|
||||||
AllocationOrigin origin = AllocationOrigin::kRuntime);
|
AllocationOrigin origin = AllocationOrigin::kRuntime);
|
||||||
@ -482,6 +475,25 @@ class V8_EXPORT_PRIVATE NewSpace
|
|||||||
SemiSpace from_space_;
|
SemiSpace from_space_;
|
||||||
VirtualMemory reservation_;
|
VirtualMemory reservation_;
|
||||||
|
|
||||||
|
// Internal allocation methods.
|
||||||
|
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
|
||||||
|
AllocateFastAligned(int size_in_bytes, AllocationAlignment alignment,
|
||||||
|
AllocationOrigin origin);
|
||||||
|
|
||||||
|
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
|
||||||
|
AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
|
||||||
|
|
||||||
|
V8_WARN_UNUSED_RESULT AllocationResult
|
||||||
|
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
|
||||||
|
AllocationOrigin origin);
|
||||||
|
|
||||||
|
V8_WARN_UNUSED_RESULT AllocationResult
|
||||||
|
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
|
||||||
|
AllocationOrigin origin = AllocationOrigin::kRuntime);
|
||||||
|
|
||||||
|
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
|
||||||
|
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
|
||||||
|
|
||||||
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
|
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
|
||||||
bool SupportsInlineAllocation() override { return true; }
|
bool SupportsInlineAllocation() override { return true; }
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include "src/codegen/assembler-inl.h"
|
#include "src/codegen/assembler-inl.h"
|
||||||
#include "src/codegen/compilation-cache.h"
|
#include "src/codegen/compilation-cache.h"
|
||||||
#include "src/codegen/macro-assembler-inl.h"
|
#include "src/codegen/macro-assembler-inl.h"
|
||||||
|
#include "src/common/globals.h"
|
||||||
#include "src/debug/debug.h"
|
#include "src/debug/debug.h"
|
||||||
#include "src/deoptimizer/deoptimizer.h"
|
#include "src/deoptimizer/deoptimizer.h"
|
||||||
#include "src/execution/execution.h"
|
#include "src/execution/execution.h"
|
||||||
@ -1712,8 +1713,7 @@ TEST(TestAlignmentCalculations) {
|
|||||||
static HeapObject NewSpaceAllocateAligned(int size,
|
static HeapObject NewSpaceAllocateAligned(int size,
|
||||||
AllocationAlignment alignment) {
|
AllocationAlignment alignment) {
|
||||||
Heap* heap = CcTest::heap();
|
Heap* heap = CcTest::heap();
|
||||||
AllocationResult allocation =
|
AllocationResult allocation = heap->new_space()->AllocateRaw(size, alignment);
|
||||||
heap->new_space()->AllocateRawAligned(size, alignment);
|
|
||||||
HeapObject obj;
|
HeapObject obj;
|
||||||
allocation.To(&obj);
|
allocation.To(&obj);
|
||||||
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
|
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
|
||||||
@ -3662,9 +3662,9 @@ TEST(Regress169928) {
|
|||||||
// We need filler the size of AllocationMemento object, plus an extra
|
// We need filler the size of AllocationMemento object, plus an extra
|
||||||
// fill pointer value.
|
// fill pointer value.
|
||||||
HeapObject obj;
|
HeapObject obj;
|
||||||
AllocationResult allocation =
|
AllocationResult allocation = CcTest::heap()->new_space()->AllocateRaw(
|
||||||
CcTest::heap()->new_space()->AllocateRawUnaligned(
|
AllocationMemento::kSize + kTaggedSize,
|
||||||
AllocationMemento::kSize + kTaggedSize);
|
AllocationAlignment::kWordAligned);
|
||||||
CHECK(allocation.To(&obj));
|
CHECK(allocation.To(&obj));
|
||||||
Address addr_obj = obj.address();
|
Address addr_obj = obj.address();
|
||||||
CcTest::heap()->CreateFillerObjectAt(addr_obj,
|
CcTest::heap()->CreateFillerObjectAt(addr_obj,
|
||||||
|
@ -274,8 +274,9 @@ TEST(NewSpace) {
|
|||||||
CHECK(new_space.MaximumCapacity());
|
CHECK(new_space.MaximumCapacity());
|
||||||
|
|
||||||
while (new_space.Available() >= kMaxRegularHeapObjectSize) {
|
while (new_space.Available() >= kMaxRegularHeapObjectSize) {
|
||||||
CHECK(new_space.Contains(
|
CHECK(new_space.Contains(new_space
|
||||||
new_space.AllocateRawUnaligned(kMaxRegularHeapObjectSize)
|
.AllocateRaw(kMaxRegularHeapObjectSize,
|
||||||
|
AllocationAlignment::kWordAligned)
|
||||||
.ToObjectChecked()));
|
.ToObjectChecked()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user