[heap] Introduce fast path in PagedSpace::AllocateRaw (Reland)

Introduce explicit fast path for allocation in PagedSpace. The slow path
is moved into AllocateRawSlow which refills the LAB and retries the
allocation.

Reland of https://crrev.com/c/2277808, reverted because it broke the
MSAN build in https://crrev.com/c/2275969.

Bug: v8:10315
Change-Id: I7a3d32525fa12ea672c62f6297c92aaafc3d8157
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2280081
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68667}
This commit is contained in:
Dominik Inführ 2020-07-02 19:37:54 +02:00 committed by Commit Bot
parent b62c7d8167
commit 5ab06e7b95
4 changed files with 89 additions and 58 deletions

View File

@ -6,6 +6,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
#include "src/heap/heap-inl.h"
#include "src/heap/large-spaces.h"
#include "src/heap/paged-spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"

View File

@ -5,6 +5,7 @@
#ifndef V8_HEAP_PAGED_SPACES_INL_H_
#define V8_HEAP_PAGED_SPACES_INL_H_
#include "src/common/globals.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/paged-spaces.h"
#include "src/objects/code-inl.h"
@ -96,31 +97,35 @@ bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
AllocationResult PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit())
return AllocationResult::Retry(identity());
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
return AllocationResult(HeapObject::FromAddress(current_top));
}
HeapObject PagedSpace::TryAllocateLinearlyAligned(
AllocationResult PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return HeapObject();
if (new_top > allocation_info_.limit())
return AllocationResult::Retry(identity());
allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top),
filler_size);
HeapObject object = Heap::PrecedeWithFiller(
ReadOnlyRoots(heap()), HeapObject::FromAddress(current_top),
filler_size);
return AllocationResult(object);
}
return HeapObject::FromAddress(current_top);
return AllocationResult(HeapObject::FromAddress(current_top));
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
@ -128,15 +133,16 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
AllocationResult result = AllocateLinearly(size_in_bytes);
DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
return result;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
@ -144,62 +150,44 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK_EQ(identity(), OLD_SPACE);
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!object.is_null());
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
allocation_size = size_in_bytes;
AllocationResult result =
TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
return result;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
AllocationResult result;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
if (alignment != kWordAligned) {
int allocation_size = size_in_bytes;
result = TryAllocateLinearlyAligned(&allocation_size, alignment);
} else {
result = AllocateLinearly(size_in_bytes);
}
if (!result.IsRetry()) {
return result;
} else {
return AllocateRawSlow(size_in_bytes, alignment, origin);
}
return result;
}
} // namespace internal

View File

@ -1004,6 +1004,43 @@ bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
return false;
}
AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
// -----------------------------------------------------------------------------
// MapSpace implementation

View File

@ -327,6 +327,11 @@ class V8_EXPORT_PRIVATE PagedSpace
return identity() == OLD_SPACE && !is_local_space();
}
// Slow path of allocation function
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
@ -352,13 +357,13 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline HeapObject AllocateLinearly(int size_in_bytes);
inline AllocationResult AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
inline AllocationResult TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin);