[heap] Refactor and clean-up runtime allocation.

Bug: chromium:796896
Change-Id: I7f46f82d079502b8ec04c5e3be5f803ec9e62ffa
Reviewed-on: https://chromium-review.googlesource.com/854797
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50424}
This commit is contained in:
Hannes Payer 2018-01-09 09:56:07 +01:00 committed by Commit Bot
parent 8fbc6a05c1
commit 4bf8043148
11 changed files with 154 additions and 149 deletions

View File

@ -1845,7 +1845,7 @@ void Heap::EvacuateYoungGeneration() {
if (!new_space()->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance");
}
new_space()->ResetAllocationInfo();
new_space()->ResetLinearAllocationArea();
new_space()->set_age_mark(new_space()->top());
// Fix up special trackers.
@ -1956,7 +1956,7 @@ void Heap::Scavenge() {
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_->Flip();
new_space_->ResetAllocationInfo();
new_space_->ResetLinearAllocationArea();
ItemParallelJob job(isolate()->cancelable_task_manager(),
&parallel_scavenge_semaphore_);
@ -5497,7 +5497,7 @@ void Heap::DisableInlineAllocation() {
CodeSpaceMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
space->EmptyAllocationInfo();
space->FreeLinearAllocationArea();
}
}

View File

@ -435,9 +435,9 @@ void IncrementalMarking::StartBlackAllocation() {
DCHECK(!black_allocation_);
DCHECK(IsMarking());
black_allocation_ = true;
heap()->old_space()->MarkAllocationInfoBlack();
heap()->map_space()->MarkAllocationInfoBlack();
heap()->code_space()->MarkAllocationInfoBlack();
heap()->old_space()->MarkLinearAllocationAreaBlack();
heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack();
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
@ -447,9 +447,9 @@ void IncrementalMarking::StartBlackAllocation() {
void IncrementalMarking::PauseBlackAllocation() {
DCHECK(FLAG_black_allocation);
DCHECK(IsMarking());
heap()->old_space()->UnmarkAllocationInfo();
heap()->map_space()->UnmarkAllocationInfo();
heap()->code_space()->UnmarkAllocationInfo();
heap()->old_space()->UnmarkLinearAllocationArea();
heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea();
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");

View File

@ -33,7 +33,7 @@ class LocalAllocator {
compaction_spaces_.Get(CODE_SPACE));
// Give back remaining LAB space if this LocalAllocator's new space LAB
// sits right next to new space allocation top.
const AllocationInfo info = new_space_lab_.Close();
const LinearAllocationArea info = new_space_lab_.Close();
const Address top = new_space_->top();
if (info.limit() != nullptr && info.limit() == top) {
DCHECK_NOT_NULL(info.top());

View File

@ -2286,7 +2286,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
new_space->ResetLinearAllocationArea();
}
void MinorMarkCompactCollector::EvacuateEpilogue() {
@ -2932,7 +2932,7 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
new_space->ResetLinearAllocationArea();
// Old space.
DCHECK(old_space_evacuation_pages_.empty());

View File

@ -301,8 +301,7 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
return true;
if (free_list_.Allocate(size_in_bytes)) return true;
return SlowAllocateRaw(size_in_bytes);
return SlowRefillLinearAllocationArea(size_in_bytes);
}
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
@ -490,7 +489,7 @@ size_t LargeObjectSpace::Available() {
LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
return LocalAllocationBuffer(nullptr, LinearAllocationArea(nullptr, nullptr));
}
@ -503,7 +502,7 @@ LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
USE(ok);
DCHECK(ok);
Address top = HeapObject::cast(obj)->address();
return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
return LocalAllocationBuffer(heap, LinearAllocationArea(top, top + size));
}

View File

@ -1432,7 +1432,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// area_size_
// anchor_
other->EmptyAllocationInfo();
other->FreeLinearAllocationArea();
// The linear allocation area of {other} should be destroyed now.
DCHECK_NULL(other->top());
@ -1547,7 +1547,7 @@ void PagedSpace::ResetFreeList() {
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
EmptyAllocationInfo();
FreeLinearAllocationArea();
ResetFreeList();
for (Page* page : *this) {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
@ -1592,7 +1592,7 @@ void PagedSpace::ResetFreeListStatistics() {
}
}
void PagedSpace::SetAllocationInfo(Address top, Address limit) {
void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != nullptr && top != limit &&
heap()->incremental_marking()->black_allocation()) {
@ -1645,7 +1645,7 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
}
}
void PagedSpace::MarkAllocationInfoBlack() {
void PagedSpace::MarkLinearAllocationAreaBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
Address current_limit = limit();
@ -1655,7 +1655,7 @@ void PagedSpace::MarkAllocationInfoBlack() {
}
}
void PagedSpace::UnmarkAllocationInfo() {
void PagedSpace::UnmarkLinearAllocationArea() {
Address current_top = top();
Address current_limit = limit();
if (current_top != nullptr && current_top != current_limit) {
@ -1664,8 +1664,7 @@ void PagedSpace::UnmarkAllocationInfo() {
}
}
// Empty space allocation info, returning unused area to free list.
void PagedSpace::EmptyAllocationInfo() {
void PagedSpace::FreeLinearAllocationArea() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
Address current_top = top();
@ -1738,6 +1737,62 @@ std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
}
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK(IsAligned(size_in_bytes, kPointerSize));
DCHECK_LE(top(), limit());
#ifdef DEBUG
if (top() != limit()) {
DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
}
#endif
// Don't free list allocate if there is linear space available.
DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
FreeLinearAllocationArea();
if (!is_local()) {
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
}
size_t new_node_size = 0;
FreeSpace* new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
if (new_node == nullptr) return false;
DCHECK_GE(new_node_size, size_in_bytes);
#ifdef DEBUG
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
Smi::FromInt(kCodeZapValue);
}
#endif
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
IncreaseAllocatedBytes(new_node_size, Page::FromAddress(new_node->address()));
Address start = new_node->address();
Address end = new_node->address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes);
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
Free(limit, end - limit);
}
SetLinearAllocationArea(start, limit);
return true;
}
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@ -1871,7 +1926,7 @@ bool NewSpace::SetUp(size_t initial_semispace_capacity,
return false;
}
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
ResetAllocationInfo();
ResetLinearAllocationArea();
return true;
}
@ -1977,22 +2032,21 @@ bool SemiSpace::EnsureCurrentCapacity() {
return true;
}
AllocationInfo LocalAllocationBuffer::Close() {
LinearAllocationArea LocalAllocationBuffer::Close() {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
ClearRecordedSlots::kNo);
const AllocationInfo old_info = allocation_info_;
allocation_info_ = AllocationInfo(nullptr, nullptr);
const LinearAllocationArea old_info = allocation_info_;
allocation_info_ = LinearAllocationArea(nullptr, nullptr);
return old_info;
}
return AllocationInfo(nullptr, nullptr);
return LinearAllocationArea(nullptr, nullptr);
}
LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
AllocationInfo allocation_info)
LocalAllocationBuffer::LocalAllocationBuffer(
Heap* heap, LinearAllocationArea allocation_info)
: heap_(heap), allocation_info_(allocation_info) {
if (IsValid()) {
heap_->CreateFillerObjectAt(
@ -2023,8 +2077,7 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
return *this;
}
void NewSpace::UpdateAllocationInfo() {
void NewSpace::UpdateLinearAllocationArea() {
Address old_top = top();
Address new_top = to_space_.page_low();
@ -2040,10 +2093,9 @@ void NewSpace::UpdateAllocationInfo() {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::ResetAllocationInfo() {
void NewSpace::ResetLinearAllocationArea() {
to_space_.Reset();
UpdateAllocationInfo();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
@ -2078,7 +2130,7 @@ bool NewSpace::AddFreshPage() {
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
UpdateAllocationInfo();
UpdateLinearAllocationArea();
return true;
}
@ -2722,9 +2774,9 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
return node;
}
FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace* node = nullptr;
// First try the allocation fast path: try to allocate the minimum element
// size of a free list category. This operation is constant time.
FreeListCategoryType type =
@ -2754,66 +2806,6 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
return node;
}
bool FreeList::Allocate(size_t size_in_bytes) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
DCHECK(IsAligned(size_in_bytes, kPointerSize));
DCHECK_LE(owner_->top(), owner_->limit());
#ifdef DEBUG
if (owner_->top() != owner_->limit()) {
DCHECK_EQ(Page::FromAddress(owner_->top()),
Page::FromAddress(owner_->limit() - 1));
}
#endif
// Don't free list allocate if there is linear space available.
DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
size_in_bytes);
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
owner_->EmptyAllocationInfo();
if (!owner_->is_local()) {
owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
}
size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return false;
DCHECK_GE(new_node_size, size_in_bytes);
#ifdef DEBUG
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
Smi::FromInt(kCodeZapValue);
}
#endif
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
owner_->IncreaseAllocatedBytes(new_node_size,
Page::FromAddress(new_node->address()));
Address start = new_node->address();
Address end = new_node->address() + new_node_size;
Address limit = owner_->ComputeLimit(start, end, size_in_bytes);
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
owner_->Free(limit, end - limit);
}
owner_->SetAllocationInfo(start, limit);
return true;
}
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
@ -2943,7 +2935,7 @@ size_t FreeList::SumFreeLists() {
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
EmptyAllocationInfo();
FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
@ -2991,7 +2983,7 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
// After waiting for the sweeper threads, there may be new free-list
// entries.
return free_list_.Allocate(size_in_bytes);
return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
}
return false;
}
@ -3001,27 +2993,29 @@ bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
collector->sweeper()->ParallelSweepSpace(identity(), 0);
RefillFreeList();
return free_list_.Allocate(size_in_bytes);
return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
}
return false;
}
bool PagedSpace::SlowAllocateRaw(int size_in_bytes) {
bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
return RawSlowAllocateRaw(size_in_bytes);
return RawSlowRefillLinearAllocationArea(size_in_bytes);
}
bool CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
return RawSlowAllocateRaw(size_in_bytes);
bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
return RawSlowRefillLinearAllocationArea(size_in_bytes);
}
bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
@ -3035,14 +3029,18 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
RefillFreeList();
// Retry the free list allocation.
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes)))
return true;
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes)))
return true;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
@ -3051,14 +3049,17 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes)))
return true;
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes));
}
// If sweeper threads are active, wait for them at that point and steal

View File

@ -34,12 +34,12 @@ class HeapTester;
class TestCodeRangeScope;
} // namespace heap
class AllocationInfo;
class AllocationObserver;
class CompactionSpace;
class CompactionSpaceCollection;
class FreeList;
class Isolate;
class LinearAllocationArea;
class LocalArrayBufferTracker;
class MemoryAllocator;
class MemoryChunk;
@ -1562,10 +1562,10 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
// An abstraction of allocation and relocation pointers in a page-structured
// space.
class AllocationInfo {
class LinearAllocationArea {
public:
AllocationInfo() : top_(nullptr), limit_(nullptr) {}
AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
LinearAllocationArea() : top_(nullptr), limit_(nullptr) {}
LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
set_top(top);
@ -1763,10 +1763,11 @@ class V8_EXPORT_PRIVATE FreeList {
// and the size should be a non-zero multiple of the word size.
size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
// Finds a node of size at least size_in_bytes and sets up a linear allocation
// area using this node. Returns false if there is no such node and the caller
// has to retry allocation after collecting garbage.
MUST_USE_RESULT bool Allocate(size_t size_in_bytes);
// Allocates a free space node frome the free list of at least size_in_bytes
// bytes. Returns the actual node size in node_size which can be bigger than
// size_in_bytes. This method returns null if the allocation request cannot be
// handled by the free list.
MUST_USE_RESULT FreeSpace* Allocate(size_t size_in_bytes, size_t* node_size);
// Clear the free list.
void Reset();
@ -1865,8 +1866,6 @@ class V8_EXPORT_PRIVATE FreeList {
static const size_t kMediumAllocationMax = kSmallListMax;
static const size_t kLargeAllocationMax = kMediumListMax;
FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
@ -1953,13 +1952,13 @@ class LocalAllocationBuffer {
inline bool TryFreeLast(HeapObject* object, int object_size);
// Close a LAB, effectively invalidating it. Returns the unused area.
AllocationInfo Close();
LinearAllocationArea Close();
private:
LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
LocalAllocationBuffer(Heap* heap, LinearAllocationArea allocation_info);
Heap* heap_;
AllocationInfo allocation_info_;
LinearAllocationArea allocation_info_;
};
class SpaceWithLinearArea : public Space {
@ -2014,7 +2013,7 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
// TODO(ofrobots): make these private after refactoring is complete.
AllocationInfo allocation_info_;
LinearAllocationArea allocation_info_;
Address top_on_previous_step_;
};
@ -2131,11 +2130,11 @@ class V8_EXPORT_PRIVATE PagedSpace
void ResetFreeList();
// Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo();
// Empty space linear allocation area, returning unused area to free list.
void FreeLinearAllocationArea();
void MarkAllocationInfoBlack();
void UnmarkAllocationInfo();
void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea();
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
@ -2229,10 +2228,10 @@ class V8_EXPORT_PRIVATE PagedSpace
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
void SetAllocationInfo(Address top, Address limit);
void SetLinearAllocationArea(Address top, Address limit);
private:
// Set space allocation info.
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
@ -2274,6 +2273,10 @@ class V8_EXPORT_PRIVATE PagedSpace
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
MUST_USE_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
@ -2283,11 +2286,12 @@ class V8_EXPORT_PRIVATE PagedSpace
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
MUST_USE_RESULT virtual bool SlowAllocateRaw(int size_in_bytes);
MUST_USE_RESULT virtual bool SlowRefillLinearAllocationArea(
int size_in_bytes);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
MUST_USE_RESULT bool RawSlowAllocateRaw(int size_in_bytes);
MUST_USE_RESULT bool RawSlowRefillLinearAllocationArea(int size_in_bytes);
size_t area_size_;
@ -2671,7 +2675,7 @@ class NewSpace : public SpaceWithLinearArea {
int size_in_bytes, AllocationAlignment alignment);
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
void ResetLinearAllocationArea();
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
@ -2738,12 +2742,12 @@ class NewSpace : public SpaceWithLinearArea {
SemiSpace& to_space() { return to_space_; }
private:
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
// Update linear allocation area to match the current to-space page.
void UpdateLinearAllocationArea();
base::Mutex mutex_;
// The top and the limit at the time of setting the allocation info.
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
base::AtomicValue<Address> original_top_;
base::AtomicValue<Address> original_limit_;
@ -2785,7 +2789,8 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
MUST_USE_RESULT bool SlowAllocateRaw(int size_in_bytes) override;
MUST_USE_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes) override;
};

View File

@ -20,7 +20,7 @@ void SealCurrentObjects(Heap* heap) {
heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kTesting);
heap->mark_compact_collector()->EnsureSweepingCompleted();
heap->old_space()->EmptyAllocationInfo();
heap->old_space()->FreeLinearAllocationArea();
for (Page* page : *heap->old_space()) {
page->MarkNeverAllocateForTesting();
}
@ -68,7 +68,7 @@ std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
int length;
int free_memory = padding_size;
if (tenure == i::TENURED) {
heap->old_space()->EmptyAllocationInfo();
heap->old_space()->FreeLinearAllocationArea();
int overall_free_memory = static_cast<int>(heap->old_space()->Available());
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
} else {
@ -175,12 +175,12 @@ void SimulateFullSpace(v8::internal::PagedSpace* space) {
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
space->EmptyAllocationInfo();
space->FreeLinearAllocationArea();
space->ResetFreeList();
}
void AbandonCurrentlyFreeMemory(PagedSpace* space) {
space->EmptyAllocationInfo();
space->FreeLinearAllocationArea();
for (Page* page : *space) {
page->MarkNeverAllocateForTesting();
}
@ -204,7 +204,7 @@ void ForceEvacuationCandidate(Page* page) {
int remaining = static_cast<int>(limit - top);
space->heap()->CreateFillerObjectAt(top, remaining,
ClearRecordedSlots::kNo);
space->EmptyAllocationInfo();
space->FreeLinearAllocationArea();
}
}

View File

@ -1704,7 +1704,7 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
}
Address top = *top_addr;
// Now force the remaining allocation onto the free list.
CcTest::heap()->old_space()->EmptyAllocationInfo();
CcTest::heap()->old_space()->FreeLinearAllocationArea();
return top;
}

View File

@ -331,7 +331,7 @@ TEST(Regress5829) {
array->set_length(9);
heap->CreateFillerObjectAt(old_end - kPointerSize, kPointerSize,
ClearRecordedSlots::kNo);
heap->old_space()->EmptyAllocationInfo();
heap->old_space()->FreeLinearAllocationArea();
Page* page = Page::FromAddress(array->address());
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
for (auto object_and_size :

View File

@ -676,7 +676,7 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
// Reset space so high water mark is consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->EmptyAllocationInfo();
old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
HeapObject* filler =
@ -705,7 +705,7 @@ TEST(ShrinkPageToHighWaterMarkNoFiller) {
// Reset space so high water mark and fillers are consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->ResetFreeList();
old_space->EmptyAllocationInfo();
old_space->FreeLinearAllocationArea();
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
CHECK_EQ(0u, shrunk);
@ -727,7 +727,7 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
// Reset space so high water mark and fillers are consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->EmptyAllocationInfo();
old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
HeapObject* filler =
@ -754,7 +754,7 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
// Reset space so high water mark and fillers are consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->EmptyAllocationInfo();
old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
HeapObject* filler =