heap: Place LABs in IsolateData
Until now, LABs were accessed from generated code via external references, e.g., see NewSpaceAllocationTopAddress() and NewSpaceAllocationLimitAddress(). This patch places them in the IsolateData, so they can be accessed using Isolate-constant offsets. It affects the hot path of all TF generated code. Bug: v8:12428 Change-Id: I7bfd54bea4febead404829d8e0b058b6cf53a374 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3303800 Commit-Queue: Nikolaos Papaspyrou <nikolaos@chromium.org> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/main@{#78176}
This commit is contained in:
parent
48dea8d83b
commit
427a67993c
5
src/execution/DEPS
Normal file
5
src/execution/DEPS
Normal file
@ -0,0 +1,5 @@
|
||||
specific_include_rules = {
|
||||
"isolate-data\.h": [
|
||||
"+src/heap/linear-allocation-area.h",
|
||||
],
|
||||
}
|
@ -10,6 +10,7 @@
|
||||
#include "src/codegen/external-reference-table.h"
|
||||
#include "src/execution/stack-guard.h"
|
||||
#include "src/execution/thread-local-top.h"
|
||||
#include "src/heap/linear-allocation-area.h"
|
||||
#include "src/roots/roots.h"
|
||||
#include "src/security/external-pointer-table.h"
|
||||
#include "src/utils/utils.h"
|
||||
@ -48,6 +49,9 @@ class Isolate;
|
||||
builtin_entry_table) \
|
||||
V(kBuiltinTableOffset, Builtins::kBuiltinCount* kSystemPointerSize, \
|
||||
builtin_table) \
|
||||
/* Linear allocation areas for the heap's new and old space */ \
|
||||
V(kNewAllocationInfo, LinearAllocationArea::kSize, new_allocation_info) \
|
||||
V(kOldAllocationInfo, LinearAllocationArea::kSize, old_allocation_info) \
|
||||
ISOLATE_DATA_FIELDS_EXTERNAL_CODE_SPACE(V) \
|
||||
ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
|
||||
V(kStackIsIterableOffset, kUInt8Size, stack_is_iterable)
|
||||
@ -229,6 +233,9 @@ class IsolateData final {
|
||||
// The entries in this array are tagged pointers to Code objects.
|
||||
Address builtin_table_[Builtins::kBuiltinCount] = {};
|
||||
|
||||
LinearAllocationArea new_allocation_info_;
|
||||
LinearAllocationArea old_allocation_info_;
|
||||
|
||||
#ifdef V8_EXTERNAL_CODE_SPACE
|
||||
Address builtin_code_data_container_table_[Builtins::kBuiltinCount] = {};
|
||||
#endif
|
||||
|
@ -3784,7 +3784,8 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
|
||||
DCHECK(!heap_.HasBeenSetUp());
|
||||
heap_.SetUp(main_thread_local_heap());
|
||||
ReadOnlyHeap::SetUp(this, read_only_snapshot_data, can_rehash);
|
||||
heap_.SetUpSpaces();
|
||||
heap_.SetUpSpaces(&isolate_data_.new_allocation_info_,
|
||||
&isolate_data_.old_allocation_info_);
|
||||
|
||||
if (OwnsStringTable()) {
|
||||
string_table_ = std::make_shared<StringTable>(this);
|
||||
|
@ -5688,16 +5688,17 @@ class StressConcurrentAllocationObserver : public AllocationObserver {
|
||||
Heap* heap_;
|
||||
};
|
||||
|
||||
void Heap::SetUpSpaces() {
|
||||
void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
|
||||
LinearAllocationArea* old_allocation_info) {
|
||||
// Ensure SetUpFromReadOnlySpace has been ran.
|
||||
DCHECK_NOT_NULL(read_only_space_);
|
||||
const bool has_young_gen = !FLAG_single_generation && !IsShared();
|
||||
if (has_young_gen) {
|
||||
space_[NEW_SPACE] = new_space_ =
|
||||
new NewSpace(this, memory_allocator_->data_page_allocator(),
|
||||
initial_semispace_size_, max_semi_space_size_);
|
||||
space_[NEW_SPACE] = new_space_ = new NewSpace(
|
||||
this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
|
||||
max_semi_space_size_, new_allocation_info);
|
||||
}
|
||||
space_[OLD_SPACE] = old_space_ = new OldSpace(this);
|
||||
space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info);
|
||||
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
|
||||
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
|
||||
space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
|
||||
|
@ -86,6 +86,7 @@ class HeapObjectsFilter;
|
||||
class HeapStats;
|
||||
class Isolate;
|
||||
class JSFinalizationRegistry;
|
||||
class LinearAllocationArea;
|
||||
class LocalEmbedderHeapTracer;
|
||||
class LocalHeap;
|
||||
class MarkingBarrier;
|
||||
@ -842,7 +843,8 @@ class Heap {
|
||||
void ReplaceReadOnlySpace(SharedReadOnlySpace* shared_ro_space);
|
||||
|
||||
// Sets up the heap memory without creating any objects.
|
||||
void SetUpSpaces();
|
||||
void SetUpSpaces(LinearAllocationArea* new_allocation_info,
|
||||
LinearAllocationArea* old_allocation_info);
|
||||
|
||||
// Prepares the heap, setting up for deserialization.
|
||||
void InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap);
|
||||
|
@ -5,6 +5,8 @@
|
||||
#ifndef V8_HEAP_LINEAR_ALLOCATION_AREA_H_
|
||||
#define V8_HEAP_LINEAR_ALLOCATION_AREA_H_
|
||||
|
||||
// This header file is included outside of src/heap/.
|
||||
// Avoid including src/heap/ internals.
|
||||
#include "include/v8-internal.h"
|
||||
#include "src/common/checks.h"
|
||||
|
||||
@ -100,6 +102,8 @@ class LinearAllocationArea final {
|
||||
#endif // DEBUG
|
||||
}
|
||||
|
||||
static constexpr int kSize = 3 * kSystemPointerSize;
|
||||
|
||||
private:
|
||||
// The start of the LAB. Initially coincides with `top_`. As top is moved
|
||||
// ahead, the area [start_, top_[ denotes a range of new objects. This range
|
||||
@ -111,6 +115,10 @@ class LinearAllocationArea final {
|
||||
Address limit_ = kNullAddress;
|
||||
};
|
||||
|
||||
static_assert(sizeof(LinearAllocationArea) == LinearAllocationArea::kSize,
|
||||
"LinearAllocationArea's size must be small because it "
|
||||
"is included in IsolateData.");
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -111,11 +111,11 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
|
||||
|
||||
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
|
||||
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
}
|
||||
HeapObject obj =
|
||||
HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes));
|
||||
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
|
||||
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
|
||||
@ -130,15 +130,15 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
|
||||
AllocationResult NewSpace::AllocateFastAligned(
|
||||
int size_in_bytes, int* result_aligned_size_in_bytes,
|
||||
AllocationAlignment alignment, AllocationOrigin origin) {
|
||||
Address top = allocation_info_.top();
|
||||
Address top = allocation_info_->top();
|
||||
int filler_size = Heap::GetFillToAlign(top, alignment);
|
||||
int aligned_size_in_bytes = size_in_bytes + filler_size;
|
||||
|
||||
if (!allocation_info_.CanIncrementTop(aligned_size_in_bytes)) {
|
||||
if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
}
|
||||
HeapObject obj = HeapObject::FromAddress(
|
||||
allocation_info_.IncrementTop(aligned_size_in_bytes));
|
||||
allocation_info_->IncrementTop(aligned_size_in_bytes));
|
||||
if (result_aligned_size_in_bytes)
|
||||
*result_aligned_size_in_bytes = aligned_size_in_bytes;
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
|
@ -387,7 +387,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
|
||||
|
||||
size_t NewSpace::CommittedPhysicalMemory() {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
|
||||
size_t size = to_space_.CommittedPhysicalMemory();
|
||||
if (from_space_.IsCommitted()) {
|
||||
size += from_space_.CommittedPhysicalMemory();
|
||||
@ -400,8 +400,9 @@ size_t NewSpace::CommittedPhysicalMemory() {
|
||||
|
||||
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
|
||||
size_t initial_semispace_capacity,
|
||||
size_t max_semispace_capacity)
|
||||
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
|
||||
size_t max_semispace_capacity,
|
||||
LinearAllocationArea* allocation_info)
|
||||
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info),
|
||||
to_space_(heap, kToSpace),
|
||||
from_space_(heap, kFromSpace) {
|
||||
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
|
||||
@ -416,7 +417,7 @@ NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
|
||||
}
|
||||
|
||||
void NewSpace::TearDown() {
|
||||
allocation_info_.Reset(kNullAddress, kNullAddress);
|
||||
allocation_info_->Reset(kNullAddress, kNullAddress);
|
||||
|
||||
to_space_.TearDown();
|
||||
from_space_.TearDown();
|
||||
@ -468,8 +469,8 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
|
||||
AdvanceAllocationObservers();
|
||||
|
||||
Address new_top = known_top == 0 ? to_space_.page_low() : known_top;
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
allocation_info_.Reset(new_top, to_space_.page_high());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
|
||||
allocation_info_->Reset(new_top, to_space_.page_high());
|
||||
// The order of the following two stores is important.
|
||||
// See the corresponding loads in ConcurrentMarking::Run.
|
||||
{
|
||||
@ -499,7 +500,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
|
||||
Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
|
||||
DCHECK_LE(top(), new_limit);
|
||||
DCHECK_LE(new_limit, to_space_.page_high());
|
||||
allocation_info_.SetLimit(new_limit);
|
||||
allocation_info_->SetLimit(new_limit);
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
|
||||
#if DEBUG
|
||||
@ -508,7 +509,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
|
||||
}
|
||||
|
||||
bool NewSpace::AddFreshPage() {
|
||||
Address top = allocation_info_.top();
|
||||
Address top = allocation_info_->top();
|
||||
DCHECK(!OldSpace::IsAtPageStart(top));
|
||||
|
||||
// Clear remainder of current page.
|
||||
@ -566,7 +567,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
|
||||
AllocationAlignment alignment) {
|
||||
AdvanceAllocationObservers();
|
||||
|
||||
Address old_top = allocation_info_.top();
|
||||
Address old_top = allocation_info_->top();
|
||||
Address high = to_space_.page_high();
|
||||
int filler_size = Heap::GetFillToAlign(old_top, alignment);
|
||||
int aligned_size_in_bytes = size_in_bytes + filler_size;
|
||||
@ -584,7 +585,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
|
||||
return false;
|
||||
}
|
||||
|
||||
old_top = allocation_info_.top();
|
||||
old_top = allocation_info_->top();
|
||||
high = to_space_.page_high();
|
||||
filler_size = Heap::GetFillToAlign(old_top, alignment);
|
||||
aligned_size_in_bytes = size_in_bytes + filler_size;
|
||||
@ -595,8 +596,8 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
|
||||
}
|
||||
|
||||
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
|
||||
if (allocation_info_.MergeIfAdjacent(info)) {
|
||||
original_top_.store(allocation_info_.top(), std::memory_order_release);
|
||||
if (allocation_info_->MergeIfAdjacent(info)) {
|
||||
original_top_.store(allocation_info_->top(), std::memory_order_release);
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
@ -623,7 +624,7 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
}
|
||||
|
||||
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
|
||||
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
|
||||
|
||||
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
|
||||
DCHECK(!result.IsRetry());
|
||||
@ -642,7 +643,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
}
|
||||
|
||||
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
|
||||
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
|
||||
|
||||
int aligned_size_in_bytes;
|
||||
|
||||
@ -673,16 +674,16 @@ void NewSpace::FreeLinearAllocationArea() {
|
||||
|
||||
void NewSpace::VerifyTop() {
|
||||
// Ensure validity of LAB: start <= top <= limit
|
||||
DCHECK_LE(allocation_info_.start(), allocation_info_.top());
|
||||
DCHECK_LE(allocation_info_.top(), allocation_info_.limit());
|
||||
DCHECK_LE(allocation_info_->start(), allocation_info_->top());
|
||||
DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
|
||||
|
||||
// Ensure that original_top_ always >= LAB start. The delta between start_
|
||||
// and top_ is still to be processed by allocation observers.
|
||||
DCHECK_GE(original_top_, allocation_info_.start());
|
||||
DCHECK_GE(original_top_, allocation_info_->start());
|
||||
|
||||
// Ensure that limit() is <= original_limit_, original_limit_ always needs
|
||||
// to be end of curent to space page.
|
||||
DCHECK_LE(allocation_info_.limit(), original_limit_);
|
||||
DCHECK_LE(allocation_info_->limit(), original_limit_);
|
||||
DCHECK_EQ(original_limit_, to_space_.page_high());
|
||||
}
|
||||
|
||||
|
@ -233,7 +233,8 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
using const_iterator = ConstPageIterator;
|
||||
|
||||
NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
|
||||
size_t initial_semispace_capacity, size_t max_semispace_capacity);
|
||||
size_t initial_semispace_capacity, size_t max_semispace_capacity,
|
||||
LinearAllocationArea* allocation_info);
|
||||
|
||||
~NewSpace() override { TearDown(); }
|
||||
|
||||
@ -527,9 +528,9 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
// For contiguous spaces, top should be in the space (or at the end) and limit
|
||||
// should be the end of the space.
|
||||
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
|
||||
SLOW_DCHECK((space).page_low() <= (info).top() && \
|
||||
(info).top() <= (space).page_high() && \
|
||||
(info).limit() <= (space).page_high())
|
||||
SLOW_DCHECK((space).page_low() <= (info)->top() && \
|
||||
(info)->top() <= (space).page_high() && \
|
||||
(info)->limit() <= (space).page_high())
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -79,38 +79,39 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
|
||||
}
|
||||
|
||||
bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
|
||||
if (allocation_info_.top() != kNullAddress) {
|
||||
return allocation_info_.DecrementTopIfAdjacent(object_address, object_size);
|
||||
if (allocation_info_->top() != kNullAddress) {
|
||||
return allocation_info_->DecrementTopIfAdjacent(object_address,
|
||||
object_size);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
|
||||
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
|
||||
if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
|
||||
return true;
|
||||
}
|
||||
return RefillLabMain(size_in_bytes, origin);
|
||||
}
|
||||
|
||||
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
|
||||
if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
|
||||
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
|
||||
return AllocationResult::Retry(identity());
|
||||
}
|
||||
return AllocationResult(
|
||||
HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes)));
|
||||
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
|
||||
}
|
||||
|
||||
AllocationResult PagedSpace::AllocateFastAligned(
|
||||
int size_in_bytes, int* aligned_size_in_bytes,
|
||||
AllocationAlignment alignment) {
|
||||
Address current_top = allocation_info_.top();
|
||||
Address current_top = allocation_info_->top();
|
||||
int filler_size = Heap::GetFillToAlign(current_top, alignment);
|
||||
int aligned_size = filler_size + size_in_bytes;
|
||||
if (!allocation_info_.CanIncrementTop(aligned_size)) {
|
||||
if (!allocation_info_->CanIncrementTop(aligned_size)) {
|
||||
return AllocationResult::Retry(identity());
|
||||
}
|
||||
HeapObject obj =
|
||||
HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
|
||||
HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
|
||||
if (aligned_size_in_bytes) *aligned_size_in_bytes = aligned_size;
|
||||
if (filler_size > 0) {
|
||||
obj = heap()->PrecedeWithFiller(obj, filler_size);
|
||||
|
@ -90,8 +90,9 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
|
||||
|
||||
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
|
||||
Executability executable, FreeList* free_list,
|
||||
LinearAllocationArea* allocation_info_,
|
||||
CompactionSpaceKind compaction_space_kind)
|
||||
: SpaceWithLinearArea(heap, space, free_list),
|
||||
: SpaceWithLinearArea(heap, space, free_list, allocation_info_),
|
||||
executable_(executable),
|
||||
compaction_space_kind_(compaction_space_kind) {
|
||||
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
|
||||
@ -211,7 +212,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
|
||||
size_t PagedSpace::CommittedPhysicalMemory() {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
|
||||
base::MutexGuard guard(mutex());
|
||||
size_t size = 0;
|
||||
for (Page* page : *this) {
|
||||
@ -282,8 +283,8 @@ void PagedSpace::RemovePage(Page* page) {
|
||||
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
|
||||
DCHECK(top == limit ||
|
||||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
allocation_info_.Reset(top, limit);
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
|
||||
allocation_info_->Reset(top, limit);
|
||||
|
||||
base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard;
|
||||
if (!is_compaction_space())
|
||||
@ -308,7 +309,7 @@ void PagedSpace::ResetFreeList() {
|
||||
|
||||
void PagedSpace::ShrinkImmortalImmovablePages() {
|
||||
DCHECK(!heap()->deserialization_complete());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
|
||||
FreeLinearAllocationArea();
|
||||
ResetFreeList();
|
||||
for (Page* page : *this) {
|
||||
@ -482,7 +483,7 @@ void PagedSpace::ReleasePage(Page* page) {
|
||||
|
||||
free_list_->EvictFreeListItems(page);
|
||||
|
||||
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
|
||||
if (Page::FromAllocationAreaAddress(allocation_info_->top()) == page) {
|
||||
SetTopAndLimit(kNullAddress, kNullAddress);
|
||||
}
|
||||
|
||||
@ -558,7 +559,7 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
|
||||
Page* page = Page::FromHeapObject(new_node);
|
||||
IncreaseAllocatedBytes(new_node_size, page);
|
||||
|
||||
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
|
||||
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
|
||||
Address start = new_node.address();
|
||||
Address end = new_node.address() + new_node_size;
|
||||
Address limit = ComputeLimit(start, end, size_in_bytes);
|
||||
@ -709,7 +710,7 @@ void PagedSpace::Print() {}
|
||||
#ifdef VERIFY_HEAP
|
||||
void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
|
||||
bool allocation_pointer_found_in_space =
|
||||
(allocation_info_.top() == allocation_info_.limit());
|
||||
(allocation_info_->top() == allocation_info_->limit());
|
||||
size_t external_space_bytes[kNumTypes];
|
||||
size_t external_page_bytes[kNumTypes];
|
||||
|
||||
@ -725,7 +726,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
|
||||
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
|
||||
}
|
||||
|
||||
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
|
||||
if (page == Page::FromAllocationAreaAddress(allocation_info_->top())) {
|
||||
allocation_pointer_found_in_space = true;
|
||||
}
|
||||
CHECK(page->SweepingDone());
|
||||
@ -860,7 +861,7 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
|
||||
|
||||
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
|
||||
// Ensure there are no unaccounted allocations.
|
||||
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
|
||||
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
|
||||
|
||||
Address new_limit = ComputeLimit(top(), limit(), min_size);
|
||||
DCHECK_LE(top(), new_limit);
|
||||
|
@ -86,7 +86,7 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
// Creates a space with an id.
|
||||
PagedSpace(
|
||||
Heap* heap, AllocationSpace id, Executability executable,
|
||||
FreeList* free_list,
|
||||
FreeList* free_list, LinearAllocationArea* allocation_info_,
|
||||
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
|
||||
|
||||
~PagedSpace() override { TearDown(); }
|
||||
@ -457,12 +457,15 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
|
||||
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
|
||||
CompactionSpaceKind compaction_space_kind)
|
||||
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
|
||||
compaction_space_kind) {
|
||||
&allocation_info_, compaction_space_kind) {
|
||||
DCHECK(is_compaction_space());
|
||||
}
|
||||
|
||||
const std::vector<Page*>& GetNewPages() { return new_pages_; }
|
||||
|
||||
private:
|
||||
LinearAllocationArea allocation_info_;
|
||||
|
||||
protected:
|
||||
V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
|
||||
AllocationOrigin origin) override;
|
||||
@ -509,9 +512,9 @@ class OldSpace : public PagedSpace {
|
||||
public:
|
||||
// Creates an old space object. The constructor does not allocate pages
|
||||
// from OS.
|
||||
explicit OldSpace(Heap* heap)
|
||||
: PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
|
||||
FreeList::CreateFreeList()) {}
|
||||
explicit OldSpace(Heap* heap, LinearAllocationArea* allocation_info)
|
||||
: PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
|
||||
allocation_info) {}
|
||||
|
||||
static bool IsAtPageStart(Address addr) {
|
||||
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
|
||||
@ -533,7 +536,11 @@ class CodeSpace : public PagedSpace {
|
||||
// Creates an old space object. The constructor does not allocate pages
|
||||
// from OS.
|
||||
explicit CodeSpace(Heap* heap)
|
||||
: PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
|
||||
: PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList(),
|
||||
&paged_allocation_info_) {}
|
||||
|
||||
private:
|
||||
LinearAllocationArea paged_allocation_info_;
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
@ -543,8 +550,8 @@ class MapSpace : public PagedSpace {
|
||||
public:
|
||||
// Creates a map space object.
|
||||
explicit MapSpace(Heap* heap)
|
||||
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
|
||||
FreeList::CreateFreeList()) {}
|
||||
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
|
||||
&paged_allocation_info_) {}
|
||||
|
||||
int RoundSizeDownToObjectAlignment(int size) override {
|
||||
if (base::bits::IsPowerOfTwo(Map::kSize)) {
|
||||
@ -559,6 +566,9 @@ class MapSpace : public PagedSpace {
|
||||
#ifdef VERIFY_HEAP
|
||||
void VerifyObject(HeapObject obj) override;
|
||||
#endif
|
||||
|
||||
private:
|
||||
LinearAllocationArea paged_allocation_info_;
|
||||
};
|
||||
|
||||
// Iterates over the chunks (pages and large object pages) that can contain
|
||||
|
@ -270,7 +270,7 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
|
||||
return start + min_size;
|
||||
} else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
|
||||
// Ensure there are no unaccounted allocations.
|
||||
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
|
||||
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
|
||||
|
||||
// Generated code may allocate inline from the linear allocation area for.
|
||||
// To make sure we can observe these allocations, we use a lower ©limit.
|
||||
@ -374,16 +374,16 @@ void SpaceWithLinearArea::ResumeAllocationObservers() {
|
||||
}
|
||||
|
||||
void SpaceWithLinearArea::AdvanceAllocationObservers() {
|
||||
if (allocation_info_.top() &&
|
||||
allocation_info_.start() != allocation_info_.top()) {
|
||||
allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
|
||||
allocation_info_.start());
|
||||
if (allocation_info_->top() &&
|
||||
allocation_info_->start() != allocation_info_->top()) {
|
||||
allocation_counter_.AdvanceAllocationObservers(allocation_info_->top() -
|
||||
allocation_info_->start());
|
||||
MarkLabStartInitialized();
|
||||
}
|
||||
}
|
||||
|
||||
void SpaceWithLinearArea::MarkLabStartInitialized() {
|
||||
allocation_info_.ResetStart();
|
||||
allocation_info_->ResetStart();
|
||||
if (identity() == NEW_SPACE) {
|
||||
heap()->new_space()->MoveOriginalTopForward();
|
||||
|
||||
@ -413,12 +413,12 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
|
||||
|
||||
if (allocation_size >= allocation_counter_.NextBytes()) {
|
||||
// Only the first object in a LAB should reach the next step.
|
||||
DCHECK_EQ(soon_object,
|
||||
allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
|
||||
DCHECK_EQ(soon_object, allocation_info_->start() + aligned_size_in_bytes -
|
||||
size_in_bytes);
|
||||
|
||||
// Right now the LAB only contains that one object.
|
||||
DCHECK_EQ(allocation_info_.top() + allocation_size - aligned_size_in_bytes,
|
||||
allocation_info_.limit());
|
||||
DCHECK_EQ(allocation_info_->top() + allocation_size - aligned_size_in_bytes,
|
||||
allocation_info_->limit());
|
||||
|
||||
// Ensure that there is a valid object
|
||||
if (identity() == CODE_SPACE) {
|
||||
@ -432,7 +432,7 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
|
||||
#if DEBUG
|
||||
// Ensure that allocation_info_ isn't modified during one of the
|
||||
// AllocationObserver::Step methods.
|
||||
LinearAllocationArea saved_allocation_info = allocation_info_;
|
||||
LinearAllocationArea saved_allocation_info = *allocation_info_;
|
||||
#endif
|
||||
|
||||
// Run AllocationObserver::Step through the AllocationCounter.
|
||||
@ -440,13 +440,13 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
|
||||
allocation_size);
|
||||
|
||||
// Ensure that start/top/limit didn't change.
|
||||
DCHECK_EQ(saved_allocation_info.start(), allocation_info_.start());
|
||||
DCHECK_EQ(saved_allocation_info.top(), allocation_info_.top());
|
||||
DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
|
||||
DCHECK_EQ(saved_allocation_info.start(), allocation_info_->start());
|
||||
DCHECK_EQ(saved_allocation_info.top(), allocation_info_->top());
|
||||
DCHECK_EQ(saved_allocation_info.limit(), allocation_info_->limit());
|
||||
}
|
||||
|
||||
DCHECK_IMPLIES(allocation_counter_.IsActive(),
|
||||
(allocation_info_.limit() - allocation_info_.start()) <
|
||||
(allocation_info_->limit() - allocation_info_->start()) <
|
||||
allocation_counter_.NextBytes());
|
||||
}
|
||||
|
||||
|
@ -433,23 +433,24 @@ class LocalAllocationBuffer {
|
||||
|
||||
class SpaceWithLinearArea : public Space {
|
||||
public:
|
||||
SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
|
||||
: Space(heap, id, free_list) {
|
||||
allocation_info_.Reset(kNullAddress, kNullAddress);
|
||||
}
|
||||
SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list,
|
||||
LinearAllocationArea* allocation_info)
|
||||
: Space(heap, id, free_list), allocation_info_(allocation_info) {}
|
||||
|
||||
virtual bool SupportsAllocationObserver() = 0;
|
||||
|
||||
// Returns the allocation pointer in this space.
|
||||
Address top() { return allocation_info_.top(); }
|
||||
Address limit() { return allocation_info_.limit(); }
|
||||
Address top() const { return allocation_info_->top(); }
|
||||
Address limit() const { return allocation_info_->limit(); }
|
||||
|
||||
// The allocation top address.
|
||||
Address* allocation_top_address() { return allocation_info_.top_address(); }
|
||||
Address* allocation_top_address() const {
|
||||
return allocation_info_->top_address();
|
||||
}
|
||||
|
||||
// The allocation limit address.
|
||||
Address* allocation_limit_address() {
|
||||
return allocation_info_.limit_address();
|
||||
Address* allocation_limit_address() const {
|
||||
return allocation_info_->limit_address();
|
||||
}
|
||||
|
||||
// Methods needed for allocation observers.
|
||||
@ -483,7 +484,7 @@ class SpaceWithLinearArea : public Space {
|
||||
|
||||
protected:
|
||||
// TODO(ofrobots): make these private after refactoring is complete.
|
||||
LinearAllocationArea allocation_info_;
|
||||
LinearAllocationArea* const allocation_info_;
|
||||
|
||||
size_t allocations_origins_[static_cast<int>(
|
||||
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
|
||||
|
@ -194,9 +194,10 @@ TEST(MemoryAllocator) {
|
||||
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
LinearAllocationArea allocation_info;
|
||||
|
||||
int total_pages = 0;
|
||||
OldSpace faked_space(heap);
|
||||
OldSpace faked_space(heap, &allocation_info);
|
||||
CHECK(!faked_space.first_page());
|
||||
CHECK(!faked_space.last_page());
|
||||
Page* first_page = memory_allocator->AllocatePage(
|
||||
@ -275,10 +276,11 @@ TEST(NewSpace) {
|
||||
Heap* heap = isolate->heap();
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
LinearAllocationArea allocation_info;
|
||||
|
||||
NewSpace new_space(heap, memory_allocator->data_page_allocator(),
|
||||
CcTest::heap()->InitialSemiSpaceSize(),
|
||||
CcTest::heap()->InitialSemiSpaceSize());
|
||||
CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
|
||||
CHECK(new_space.MaximumCapacity());
|
||||
|
||||
while (new_space.Available() >= kMaxRegularHeapObjectSize) {
|
||||
@ -296,8 +298,9 @@ TEST(OldSpace) {
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
|
||||
LinearAllocationArea allocation_info;
|
||||
|
||||
OldSpace* s = new OldSpace(heap);
|
||||
OldSpace* s = new OldSpace(heap, &allocation_info);
|
||||
CHECK_NOT_NULL(s);
|
||||
|
||||
while (s->Available() > 0) {
|
||||
@ -802,7 +805,8 @@ TEST(NoMemoryForNewPage) {
|
||||
FailingPageAllocator failing_allocator;
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, 0, &failing_allocator);
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
OldSpace faked_space(heap);
|
||||
LinearAllocationArea allocation_info;
|
||||
OldSpace faked_space(heap, &allocation_info);
|
||||
Page* page = memory_allocator->AllocatePage(
|
||||
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
|
||||
NOT_EXECUTABLE);
|
||||
|
Loading…
Reference in New Issue
Block a user