[heap] Switch to LinearAllocationArea in ConcurrentAllocator

Switch to LinearAllocationArea instead of LocalAllocationBuffer for
tracking the LAB in ConcurrentAllocator.

Eventually this should make it easier to replace the spaces LABs with
the ConcurrentAllocator class since those use LinearAllocationArea
as well.

Bug: v8:13375
Change-Id: I4574a30d54fa74b054c5bab2e8a2ab398112c028
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3942256
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83641}
This commit is contained in:
Dominik Inführ 2022-10-12 10:38:01 +02:00 committed by V8 LUCI CQ
parent c090428634
commit 555d150280
3 changed files with 82 additions and 25 deletions

View File

@ -13,7 +13,6 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/local-heap.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
namespace v8 {
@ -35,15 +34,47 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = lab_.AllocateRawAligned(size_in_bytes, alignment);
result = AllocateInLabFastAligned(size_in_bytes, alignment);
} else {
result = lab_.AllocateRawUnaligned(size_in_bytes);
result = AllocateInLabFastUnaligned(size_in_bytes);
}
return result.IsFailure()
? AllocateInLabSlow(size_in_bytes, alignment, origin)
: result;
}
AllocationResult ConcurrentAllocator::AllocateInLabFastUnaligned(
int size_in_bytes) {
size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
if (!lab_.CanIncrementTop(size_in_bytes)) {
return AllocationResult::Failure();
}
HeapObject object = HeapObject::FromAddress(lab_.IncrementTop(size_in_bytes));
return AllocationResult::FromObject(object);
}
AllocationResult ConcurrentAllocator::AllocateInLabFastAligned(
int size_in_bytes, AllocationAlignment alignment) {
size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
Address current_top = lab_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
if (!lab_.CanIncrementTop(aligned_size)) {
return AllocationResult::Failure();
}
HeapObject object = HeapObject::FromAddress(lab_.IncrementTop(aligned_size));
if (filler_size > 0) {
object = owning_heap()->PrecedeWithFiller(object, filler_size);
}
return AllocationResult::FromObject(object);
}
} // namespace internal
} // namespace v8

View File

@ -9,6 +9,7 @@
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/heap.h"
#include "src/heap/linear-allocation-area.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/local-heap.h"
#include "src/heap/marking.h"
@ -77,11 +78,15 @@ void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
kDelayInSeconds);
}
ConcurrentAllocator::ConcurrentAllocator(LocalHeap* local_heap,
PagedSpace* space)
: local_heap_(local_heap), space_(space), owning_heap_(space_->heap()) {}
void ConcurrentAllocator::FreeLinearAllocationArea() {
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
base::Optional<CodePageMemoryModificationScope> optional_scope;
if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
if (IsLabValid() && space_->identity() == CODE_SPACE) {
optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
}
if (lab_.top() != lab_.limit() &&
@ -89,17 +94,19 @@ void ConcurrentAllocator::FreeLinearAllocationArea() {
Page::FromAddress(lab_.top())
->DestroyBlackAreaBackground(lab_.top(), lab_.limit());
}
lab_.CloseAndMakeIterable();
MakeLabIterable();
ResetLab();
}
void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
base::Optional<CodePageMemoryModificationScope> optional_scope;
if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
if (IsLabValid() && space_->identity() == CODE_SPACE) {
optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
}
lab_.MakeIterable();
MakeLabIterable();
}
void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
@ -137,7 +144,7 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow(
return AllocationResult::Failure();
}
AllocationResult allocation =
lab_.AllocateRawAligned(size_in_bytes, alignment);
AllocateInLabFastAligned(size_in_bytes, alignment);
DCHECK(!allocation.IsFailure());
return allocation;
}
@ -151,10 +158,10 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
FreeLinearAllocationArea();
HeapObject object = HeapObject::FromAddress(result->first);
lab_ = LocalAllocationBuffer::FromResult(
owning_heap(), AllocationResult::FromObject(object), result->second);
DCHECK(lab_.IsValid());
Address lab_start = result->first;
Address lab_end = lab_start + result->second;
lab_ = LinearAllocationArea(lab_start, lab_end);
DCHECK(IsLabValid());
if (IsBlackAllocationEnabled()) {
Address top = lab_.top();
@ -179,12 +186,12 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
DCHECK_GE(result->second, aligned_size_in_bytes);
HeapObject object =
(requested_filler_size)
? owning_heap()->AlignWithFiller(
HeapObject::FromAddress(result->first), size_in_bytes,
static_cast<int>(result->second), alignment)
: HeapObject::FromAddress(result->first);
HeapObject object = HeapObject::FromAddress(result->first);
if (requested_filler_size > 0) {
object = owning_heap()->AlignWithFiller(
object, size_in_bytes, static_cast<int>(result->second), alignment);
}
if (IsBlackAllocationEnabled()) {
owning_heap()->incremental_marking()->MarkBlackBackground(object,
size_in_bytes);
@ -196,7 +203,12 @@ bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
return owning_heap()->incremental_marking()->black_allocation();
}
Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
void ConcurrentAllocator::MakeLabIterable() {
if (IsLabValid()) {
owning_heap()->CreateFillerObjectAtBackground(
lab_.top(), static_cast<int>(lab_.limit() - lab_.top()));
}
}
} // namespace internal
} // namespace v8

View File

@ -7,6 +7,7 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/linear-allocation-area.h"
#include "src/heap/spaces.h"
#include "src/tasks/cancelable-task.h"
@ -37,10 +38,7 @@ class ConcurrentAllocator {
static constexpr int kMaxLabSize = 32 * KB;
static constexpr int kMaxLabObjectSize = 2 * KB;
ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space)
: local_heap_(local_heap),
space_(space),
lab_(LocalAllocationBuffer::InvalidBuffer()) {}
ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space);
inline AllocationResult AllocateRaw(int object_size,
AllocationAlignment alignment,
@ -59,6 +57,12 @@ class ConcurrentAllocator {
"size <= kMaxLabObjectSize will fit into a newly allocated LAB of size "
"kLabSize after computing the alignment requirements.");
V8_EXPORT_PRIVATE V8_INLINE AllocationResult
AllocateInLabFastUnaligned(int size_in_bytes);
V8_EXPORT_PRIVATE V8_INLINE AllocationResult
AllocateInLabFastAligned(int size_in_bytes, AllocationAlignment alignment);
V8_EXPORT_PRIVATE AllocationResult
AllocateInLabSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
@ -70,13 +74,23 @@ class ConcurrentAllocator {
bool IsBlackAllocationEnabled() const;
// Checks whether the LAB is currently in use.
V8_INLINE bool IsLabValid() { return lab_.top() != kNullAddress; }
// Resets the LAB.
void ResetLab() { lab_ = LinearAllocationArea(kNullAddress, kNullAddress); }
// Installs a filler object between the LABs top and limit pointers.
void MakeLabIterable();
// Returns the Heap of space_. This might differ from the LocalHeap's Heap for
// shared spaces.
Heap* owning_heap() const;
Heap* owning_heap() const { return owning_heap_; }
LocalHeap* const local_heap_;
PagedSpace* const space_;
LocalAllocationBuffer lab_;
Heap* const owning_heap_;
LinearAllocationArea lab_;
};
} // namespace internal