[heap] New live byte tracking.
This patch changes how space size and capacity are updated in GC: - space capacity changes only when a page added/removed from the space. - space size is reset to zero before sweeping and incremented by page->live_bytes_count_ for each to-be-swept page. - space size is refined after sweeping using the accurate page->allocated_bytes counter produces by the sweeper. Invariants: 1. space.capacity = sum [page.size | for page in space]. 2. After marking, before sweeping: a) space.size = sum [page.live_bytes_count | for page in space]. 3. After sweeping, before marking ends: a) space.size = sum [page.allocated_bytes | for page in space]. b) page.allocated_bytes >= (sum [object.size | for object in page] + page.linear_allocation_area). c) page.area_size = (page.allocated_bytes + page.wasted_memory + sum [free_list_entry.size | for free_list_entry in page]. 3.b becomes equality if the mutator is not doing array trimming, object slack tracking during sweeping. Bug: chromium:694255 Change-Id: Ic8d16a8171187a113fee2df8bf3c2a4c5e77bc08 Reviewed-on: https://chromium-review.googlesource.com/618889 Commit-Queue: Ulan Degenbaev <ulan@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/master@{#47409}
This commit is contained in:
parent
5c741cbd6b
commit
dfc6b4ddaa
@ -5257,6 +5257,23 @@ void Heap::VerifyRememberedSetFor(HeapObject* object) {
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
void Heap::VerifyCountersAfterSweeping() {
|
||||
PagedSpaces spaces(this);
|
||||
for (PagedSpace* space = spaces.next(); space != nullptr;
|
||||
space = spaces.next()) {
|
||||
space->VerifyCountersAfterSweeping();
|
||||
}
|
||||
}
|
||||
|
||||
void Heap::VerifyCountersBeforeConcurrentSweeping() {
|
||||
PagedSpaces spaces(this);
|
||||
for (PagedSpace* space = spaces.next(); space != nullptr;
|
||||
space = spaces.next()) {
|
||||
space->VerifyCountersBeforeConcurrentSweeping();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void Heap::ZapFromSpace() {
|
||||
if (!new_space_->IsFromSpaceCommitted()) return;
|
||||
|
@ -1496,6 +1496,9 @@ class Heap {
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
void VerifyCountersAfterSweeping();
|
||||
void VerifyCountersBeforeConcurrentSweeping();
|
||||
|
||||
void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
|
||||
|
||||
void Print();
|
||||
|
@ -1044,6 +1044,9 @@ void IncrementalMarking::FinalizeSweeping() {
|
||||
heap_->mark_compact_collector()->EnsureSweepingCompleted();
|
||||
}
|
||||
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
|
||||
#ifdef DEBUG
|
||||
heap_->VerifyCountersAfterSweeping();
|
||||
#endif
|
||||
StartMarking();
|
||||
}
|
||||
}
|
||||
|
@ -863,7 +863,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
|
||||
CHECK(p->SweepingDone());
|
||||
DCHECK(p->area_size() == area_size);
|
||||
pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
|
||||
pages.push_back(std::make_pair(p->allocated_bytes(), p));
|
||||
}
|
||||
|
||||
int candidate_count = 0;
|
||||
@ -1044,6 +1044,10 @@ void MarkCompactCollector::Prepare() {
|
||||
void MarkCompactCollector::Finish() {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
|
||||
|
||||
#ifdef DEBUG
|
||||
heap()->VerifyCountersBeforeConcurrentSweeping();
|
||||
#endif
|
||||
|
||||
if (!heap()->delay_sweeper_tasks_for_testing_) {
|
||||
sweeper().StartSweeperTasks();
|
||||
}
|
||||
@ -3703,10 +3707,15 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
skip_list->Clear();
|
||||
}
|
||||
|
||||
intptr_t live_bytes = 0;
|
||||
intptr_t freed_bytes = 0;
|
||||
intptr_t max_freed_bytes = 0;
|
||||
int curr_region = -1;
|
||||
|
||||
// Set the allocated_bytes counter to area_size. The free operations below
|
||||
// will decrease the counter to actual live bytes.
|
||||
p->ResetAllocatedBytes();
|
||||
|
||||
for (auto object_and_size :
|
||||
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
|
||||
HeapObject* const object = object_and_size.first;
|
||||
@ -3738,6 +3747,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
}
|
||||
Map* map = object->synchronized_map();
|
||||
int size = object->SizeFromMap(map);
|
||||
live_bytes += size;
|
||||
if (rebuild_skip_list) {
|
||||
int new_region_start = SkipList::RegionNumber(free_end);
|
||||
int new_region_end =
|
||||
@ -3788,9 +3798,18 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the mark bits of that page and reset live bytes count.
|
||||
marking_state_->ClearLiveness(p);
|
||||
|
||||
marking_state_->bitmap(p)->Clear();
|
||||
if (free_list_mode == IGNORE_FREE_LIST) {
|
||||
marking_state_->SetLiveBytes(p, 0);
|
||||
// We did not free memory, so have to adjust allocated bytes here.
|
||||
intptr_t freed_bytes = p->area_size() - live_bytes;
|
||||
p->DecreaseAllocatedBytes(freed_bytes);
|
||||
} else {
|
||||
// Keep the old live bytes counter of the page until RefillFreeList, where
|
||||
// the space size is refined.
|
||||
// The allocated_bytes() counter is precisely the total size of objects.
|
||||
DCHECK_EQ(live_bytes, p->allocated_bytes());
|
||||
}
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
|
||||
if (free_list_mode == IGNORE_FREE_LIST) return 0;
|
||||
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
|
||||
@ -4539,9 +4558,10 @@ void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
|
||||
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
|
||||
DCHECK_GE(page->area_size(),
|
||||
static_cast<size_t>(marking_state_->live_bytes(page)));
|
||||
size_t to_sweep = page->area_size() - marking_state_->live_bytes(page);
|
||||
if (space != NEW_SPACE)
|
||||
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
|
||||
if (space != NEW_SPACE) {
|
||||
heap_->paged_space(space)->IncreaseAllocatedBytes(
|
||||
marking_state_->live_bytes(page), page);
|
||||
}
|
||||
}
|
||||
|
||||
Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
|
||||
@ -4582,6 +4602,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
||||
Heap::ShouldZapGarbage()
|
||||
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
|
||||
: FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
|
||||
space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -168,7 +168,8 @@ intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
|
||||
added += category->available();
|
||||
category->Relink();
|
||||
});
|
||||
DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
|
||||
DCHECK_EQ(page->AvailableInFreeList(),
|
||||
page->AvailableInFreeListFromAllocatedBytes());
|
||||
return added;
|
||||
}
|
||||
|
||||
|
@ -561,7 +561,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
|
||||
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
|
||||
chunk->mutex_ = new base::RecursiveMutex();
|
||||
chunk->available_in_free_list_ = 0;
|
||||
chunk->allocated_bytes_ = chunk->area_size();
|
||||
chunk->wasted_memory_ = 0;
|
||||
chunk->young_generation_bitmap_ = nullptr;
|
||||
chunk->set_next_chunk(nullptr);
|
||||
@ -597,6 +597,7 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
|
||||
// In the case we do not free the memory, we effectively account for the whole
|
||||
// page as allocated memory that cannot be used for further allocations.
|
||||
if (mode == kFreeMemory) {
|
||||
owner->IncreaseAllocatedBytes(page->area_size(), page);
|
||||
owner->Free(page->area_start(), page->area_size());
|
||||
}
|
||||
page->InitializationMemoryFence();
|
||||
@ -867,10 +868,10 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
executable, owner, &reservation);
|
||||
}
|
||||
|
||||
void Page::ResetAllocatedBytes() { allocated_bytes_.SetValue(area_size()); }
|
||||
|
||||
void Page::ResetFreeListStatistics() {
|
||||
wasted_memory_ = 0;
|
||||
available_in_free_list_ = 0;
|
||||
}
|
||||
|
||||
size_t Page::AvailableInFreeList() {
|
||||
@ -1372,6 +1373,7 @@ void Space::ResumeAllocationObservers() {
|
||||
|
||||
void Space::AllocationStep(Address soon_object, int size) {
|
||||
if (!allocation_observers_paused_) {
|
||||
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
|
||||
for (int i = 0; i < allocation_observers_->length(); ++i) {
|
||||
AllocationObserver* o = (*allocation_observers_)[i];
|
||||
o->AllocationStep(size, soon_object, size);
|
||||
@ -1421,19 +1423,26 @@ void PagedSpace::RefillFreeList() {
|
||||
// Only during compaction pages can actually change ownership. This is
|
||||
// safe because there exists no other competing action on the page links
|
||||
// during compaction.
|
||||
if (is_local() && (p->owner() != this)) {
|
||||
base::LockGuard<base::Mutex> guard(
|
||||
reinterpret_cast<PagedSpace*>(p->owner())->mutex());
|
||||
if (is_local()) {
|
||||
DCHECK_NE(this, p->owner());
|
||||
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
|
||||
base::LockGuard<base::Mutex> guard(owner->mutex());
|
||||
owner->RefineAllocatedBytesAfterSweeping(p);
|
||||
p->Unlink();
|
||||
owner->AccountRemovedPage(p);
|
||||
p->set_owner(this);
|
||||
p->InsertAfter(anchor_.prev_page());
|
||||
AccountAddedPage(p);
|
||||
} else {
|
||||
base::LockGuard<base::Mutex> guard(mutex());
|
||||
DCHECK_EQ(this, p->owner());
|
||||
RefineAllocatedBytesAfterSweeping(p);
|
||||
}
|
||||
added += RelinkFreeListCategories(p);
|
||||
added += p->wasted_memory();
|
||||
if (is_local() && (added > kCompactionMemoryWanted)) break;
|
||||
}
|
||||
}
|
||||
accounting_stats_.IncreaseCapacity(added);
|
||||
}
|
||||
|
||||
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
@ -1446,29 +1455,26 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
|
||||
other->EmptyAllocationInfo();
|
||||
|
||||
// Update and clear accounting statistics.
|
||||
accounting_stats_.Merge(other->accounting_stats_);
|
||||
other->accounting_stats_.Clear();
|
||||
|
||||
// The linear allocation area of {other} should be destroyed now.
|
||||
DCHECK(other->top() == nullptr);
|
||||
DCHECK(other->limit() == nullptr);
|
||||
|
||||
AccountCommitted(other->CommittedMemory());
|
||||
|
||||
// Move over pages.
|
||||
for (auto it = other->begin(); it != other->end();) {
|
||||
Page* p = *(it++);
|
||||
|
||||
// Relinking requires the category to be unlinked.
|
||||
other->UnlinkFreeListCategories(p);
|
||||
|
||||
p->Unlink();
|
||||
other->AccountRemovedPage(p);
|
||||
p->set_owner(this);
|
||||
p->InsertAfter(anchor_.prev_page());
|
||||
this->AccountAddedPage(p);
|
||||
RelinkFreeListCategories(p);
|
||||
DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
|
||||
DCHECK_EQ(p->AvailableInFreeList(),
|
||||
p->AvailableInFreeListFromAllocatedBytes());
|
||||
}
|
||||
DCHECK_EQ(0u, other->Size());
|
||||
DCHECK_EQ(0u, other->Capacity());
|
||||
}
|
||||
|
||||
|
||||
@ -1490,9 +1496,34 @@ bool PagedSpace::ContainsSlow(Address addr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
|
||||
CHECK(page->SweepingDone());
|
||||
auto marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
// The live_byte on the page was accounted in the space allocated
|
||||
// bytes counter. After sweeping allocated_bytes() contains the
|
||||
// accurate live byte count on the page.
|
||||
DecreaseAllocatedBytes(marking_state->live_bytes(page), page);
|
||||
IncreaseAllocatedBytes(page->allocated_bytes(), page);
|
||||
marking_state->SetLiveBytes(page, 0);
|
||||
}
|
||||
|
||||
void PagedSpace::AccountAddedPage(Page* page) {
|
||||
CHECK(page->SweepingDone());
|
||||
AccountCommitted(page->size());
|
||||
IncreaseCapacity(page->area_size());
|
||||
IncreaseAllocatedBytes(page->allocated_bytes(), page);
|
||||
}
|
||||
|
||||
void PagedSpace::AccountRemovedPage(Page* page) {
|
||||
CHECK(page->SweepingDone());
|
||||
DecreaseAllocatedBytes(page->allocated_bytes(), page);
|
||||
DecreaseCapacity(page->area_size());
|
||||
AccountUncommitted(page->size());
|
||||
}
|
||||
|
||||
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
|
||||
base::LockGuard<base::Mutex> guard(mutex());
|
||||
|
||||
// Check for pages that still contain free list entries. Bail out for smaller
|
||||
// categories.
|
||||
const int minimum_category =
|
||||
@ -1509,22 +1540,24 @@ Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
|
||||
if (!page && static_cast<int>(kTiniest) >= minimum_category)
|
||||
page = free_list()->GetPageForCategoryType(kTiniest);
|
||||
if (!page) return nullptr;
|
||||
|
||||
AccountUncommitted(page->size());
|
||||
accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList());
|
||||
accounting_stats_.DecreaseCapacity(page->area_size());
|
||||
page->Unlink();
|
||||
AccountRemovedPage(page);
|
||||
UnlinkFreeListCategories(page);
|
||||
return page;
|
||||
}
|
||||
|
||||
void PagedSpace::AddPage(Page* page) {
|
||||
AccountCommitted(page->size());
|
||||
accounting_stats_.IncreaseCapacity(page->area_size());
|
||||
accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList());
|
||||
page->set_owner(this);
|
||||
RelinkFreeListCategories(page);
|
||||
page->InsertAfter(anchor()->prev_page());
|
||||
AccountAddedPage(page);
|
||||
RelinkFreeListCategories(page);
|
||||
}
|
||||
|
||||
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
|
||||
size_t unused = page->ShrinkToHighWaterMark();
|
||||
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
|
||||
AccountUncommitted(unused);
|
||||
return unused;
|
||||
}
|
||||
|
||||
void PagedSpace::ShrinkImmortalImmovablePages() {
|
||||
@ -1535,9 +1568,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
|
||||
|
||||
for (Page* page : *this) {
|
||||
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
|
||||
size_t unused = page->ShrinkToHighWaterMark();
|
||||
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
|
||||
AccountUncommitted(unused);
|
||||
ShrinkPageToHighWaterMark(page);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1640,10 +1671,6 @@ void PagedSpace::EmptyAllocationInfo() {
|
||||
Free(current_top, current_limit - current_top);
|
||||
}
|
||||
|
||||
void PagedSpace::IncreaseCapacity(size_t bytes) {
|
||||
accounting_stats_.ExpandSpace(bytes);
|
||||
}
|
||||
|
||||
void PagedSpace::ReleasePage(Page* page) {
|
||||
DCHECK_EQ(
|
||||
0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
|
||||
@ -1662,9 +1689,8 @@ void PagedSpace::ReleasePage(Page* page) {
|
||||
DCHECK(page->prev_chunk() != NULL);
|
||||
page->Unlink();
|
||||
}
|
||||
|
||||
AccountUncommitted(page->size());
|
||||
accounting_stats_.ShrinkSpace(page->area_size());
|
||||
accounting_stats_.DecreaseCapacity(page->area_size());
|
||||
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
|
||||
}
|
||||
|
||||
@ -1724,9 +1750,53 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
|
||||
CHECK_LE(black_size, marking_state->live_bytes(page));
|
||||
}
|
||||
CHECK(allocation_pointer_found_in_space);
|
||||
VerifyCountersAfterSweeping();
|
||||
}
|
||||
#endif // VERIFY_HEAP
|
||||
|
||||
#ifdef DEBUG
|
||||
void PagedSpace::VerifyCountersAfterSweeping() {
|
||||
size_t total_capacity = 0;
|
||||
size_t total_allocated = 0;
|
||||
for (Page* page : *this) {
|
||||
CHECK(page->SweepingDone());
|
||||
total_capacity += page->area_size();
|
||||
HeapObjectIterator it(page);
|
||||
size_t real_allocated = 0;
|
||||
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
|
||||
if (!object->IsFiller()) {
|
||||
real_allocated += object->Size();
|
||||
}
|
||||
}
|
||||
total_allocated += page->allocated_bytes();
|
||||
// The real size can be smaller than the accounted size if array trimming,
|
||||
// object slack tracking happened after sweeping.
|
||||
DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
|
||||
DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
|
||||
}
|
||||
DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
|
||||
DCHECK_EQ(total_allocated, accounting_stats_.Size());
|
||||
}
|
||||
|
||||
void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
|
||||
size_t total_capacity = 0;
|
||||
size_t total_allocated = 0;
|
||||
auto marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
for (Page* page : *this) {
|
||||
size_t page_allocated =
|
||||
page->SweepingDone()
|
||||
? page->allocated_bytes()
|
||||
: static_cast<size_t>(marking_state->live_bytes(page));
|
||||
total_capacity += page->area_size();
|
||||
total_allocated += page_allocated;
|
||||
DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
|
||||
}
|
||||
DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
|
||||
DCHECK_EQ(total_allocated, accounting_stats_.Size());
|
||||
}
|
||||
#endif
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// NewSpace implementation
|
||||
|
||||
@ -2384,8 +2454,6 @@ void SemiSpace::Verify() {
|
||||
CHECK(
|
||||
!page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
|
||||
}
|
||||
// TODO(gc): Check that the live_bytes_count_ field matches the
|
||||
// black marking on the page (if we make it match in new-space).
|
||||
}
|
||||
CHECK_EQ(page->prev_page()->next_page(), page);
|
||||
page = page->next_page();
|
||||
@ -2654,17 +2722,15 @@ FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
|
||||
void FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
|
||||
FreeMode mode) {
|
||||
if (!page()->CanAllocate()) return false;
|
||||
|
||||
CHECK(page()->CanAllocate());
|
||||
free_space->set_next(top());
|
||||
set_top(free_space);
|
||||
available_ += size_in_bytes;
|
||||
if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
|
||||
owner()->AddCategory(this);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -2687,7 +2753,6 @@ void FreeListCategory::Relink() {
|
||||
}
|
||||
|
||||
void FreeListCategory::Invalidate() {
|
||||
page()->remove_available_in_free_list(available());
|
||||
Reset();
|
||||
type_ = kInvalidCategory;
|
||||
}
|
||||
@ -2716,6 +2781,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
|
||||
ClearRecordedSlots::kNo);
|
||||
|
||||
Page* page = Page::FromAddress(start);
|
||||
page->DecreaseAllocatedBytes(size_in_bytes);
|
||||
|
||||
// Blocks have to be a minimum size to hold free list items.
|
||||
if (size_in_bytes < kMinBlockSize) {
|
||||
@ -2728,10 +2794,9 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
|
||||
// Insert other blocks at the head of a free list of the appropriate
|
||||
// magnitude.
|
||||
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
|
||||
if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
|
||||
page->add_available_in_free_list(size_in_bytes);
|
||||
}
|
||||
DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
|
||||
page->free_list_category(type)->Free(free_space, size_in_bytes, mode);
|
||||
DCHECK_EQ(page->AvailableInFreeList(),
|
||||
page->AvailableInFreeListFromAllocatedBytes());
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2742,8 +2807,6 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
|
||||
FreeListCategory* current = it.Next();
|
||||
node = current->PickNodeFromList(node_size);
|
||||
if (node != nullptr) {
|
||||
Page::FromAddress(node->address())
|
||||
->remove_available_in_free_list(*node_size);
|
||||
DCHECK(IsVeryLong() || Available() == SumFreeLists());
|
||||
return node;
|
||||
}
|
||||
@ -2758,8 +2821,6 @@ FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
|
||||
FreeSpace* node =
|
||||
categories_[type]->TryPickNodeFromList(minimum_size, node_size);
|
||||
if (node != nullptr) {
|
||||
Page::FromAddress(node->address())
|
||||
->remove_available_in_free_list(*node_size);
|
||||
DCHECK(IsVeryLong() || Available() == SumFreeLists());
|
||||
}
|
||||
return node;
|
||||
@ -2774,8 +2835,6 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
|
||||
FreeListCategory* current = it.Next();
|
||||
node = current->SearchForNodeInList(minimum_size, node_size);
|
||||
if (node != nullptr) {
|
||||
Page::FromAddress(node->address())
|
||||
->remove_available_in_free_list(*node_size);
|
||||
DCHECK(IsVeryLong() || Available() == SumFreeLists());
|
||||
return node;
|
||||
}
|
||||
@ -2793,28 +2852,27 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
|
||||
// size of a free list category. This operation is constant time.
|
||||
FreeListCategoryType type =
|
||||
SelectFastAllocationFreeListCategoryType(size_in_bytes);
|
||||
for (int i = type; i < kHuge; i++) {
|
||||
for (int i = type; i < kHuge && node == nullptr; i++) {
|
||||
node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
|
||||
if (node != nullptr) return node;
|
||||
}
|
||||
|
||||
// Next search the huge list for free list nodes. This takes linear time in
|
||||
// the number of huge elements.
|
||||
node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
|
||||
if (node == nullptr) {
|
||||
// Next search the huge list for free list nodes. This takes linear time in
|
||||
// the number of huge elements.
|
||||
node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
|
||||
}
|
||||
|
||||
if (node == nullptr && type != kHuge) {
|
||||
// We didn't find anything in the huge list. Now search the best fitting
|
||||
// free list for a node that has at least the requested size.
|
||||
type = SelectFreeListCategoryType(size_in_bytes);
|
||||
node = TryFindNodeIn(type, node_size, size_in_bytes);
|
||||
}
|
||||
|
||||
if (node != nullptr) {
|
||||
DCHECK(IsVeryLong() || Available() == SumFreeLists());
|
||||
return node;
|
||||
Page::FromAddress(node->address())->IncreaseAllocatedBytes(*node_size);
|
||||
}
|
||||
|
||||
// We need a huge block of memory, but we didn't find anything in the huge
|
||||
// list.
|
||||
if (type == kHuge) return nullptr;
|
||||
|
||||
// Now search the best fitting free list for a node that has at least the
|
||||
// requested size.
|
||||
type = SelectFreeListCategoryType(size_in_bytes);
|
||||
node = TryFindNodeIn(type, node_size, size_in_bytes);
|
||||
|
||||
DCHECK(IsVeryLong() || Available() == SumFreeLists());
|
||||
return node;
|
||||
}
|
||||
@ -2868,7 +2926,8 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
|
||||
|
||||
// Memory in the linear allocation area is counted as allocated. We may free
|
||||
// a little of this again immediately - see below.
|
||||
owner_->AccountAllocatedBytes(new_node_size);
|
||||
owner_->IncreaseAllocatedBytes(new_node_size,
|
||||
Page::FromAddress(new_node->address()));
|
||||
|
||||
if (owner_->heap()->inline_allocation_disabled()) {
|
||||
// Keep the linear allocation area empty if requested to do so, just
|
||||
|
@ -184,7 +184,7 @@ class FreeListCategory {
|
||||
// category is currently unlinked.
|
||||
void Relink();
|
||||
|
||||
bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
|
||||
void Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
|
||||
|
||||
// Picks a node from the list and stores its size in |node_size|. Returns
|
||||
// nullptr if the category is empty.
|
||||
@ -677,8 +677,10 @@ class MemoryChunk {
|
||||
|
||||
base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
|
||||
|
||||
// PagedSpace free-list statistics.
|
||||
base::AtomicNumber<intptr_t> available_in_free_list_;
|
||||
// Byte allocated on the page, which includes all objects on the page
|
||||
// and the linear allocation area.
|
||||
base::AtomicNumber<intptr_t> allocated_bytes_;
|
||||
// Freed memory that was not added to the free list.
|
||||
base::AtomicNumber<intptr_t> wasted_memory_;
|
||||
|
||||
// next_chunk_ holds a pointer of type MemoryChunk
|
||||
@ -704,6 +706,7 @@ class MemoryChunk {
|
||||
friend class MemoryChunkValidator;
|
||||
friend class MinorMarkingState;
|
||||
friend class MinorNonAtomicMarkingState;
|
||||
friend class PagedSpace;
|
||||
};
|
||||
|
||||
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
|
||||
@ -801,9 +804,9 @@ class Page : public MemoryChunk {
|
||||
|
||||
size_t AvailableInFreeList();
|
||||
|
||||
size_t LiveBytesFromFreeList() {
|
||||
DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
|
||||
return area_size() - wasted_memory() - available_in_free_list();
|
||||
size_t AvailableInFreeListFromAllocatedBytes() {
|
||||
DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
|
||||
return area_size() - wasted_memory() - allocated_bytes();
|
||||
}
|
||||
|
||||
FreeListCategory* free_list_category(FreeListCategoryType type) {
|
||||
@ -814,17 +817,19 @@ class Page : public MemoryChunk {
|
||||
|
||||
size_t wasted_memory() { return wasted_memory_.Value(); }
|
||||
void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
|
||||
size_t available_in_free_list() { return available_in_free_list_.Value(); }
|
||||
void add_available_in_free_list(size_t available) {
|
||||
DCHECK_LE(available, area_size());
|
||||
available_in_free_list_.Increment(available);
|
||||
size_t allocated_bytes() { return allocated_bytes_.Value(); }
|
||||
void IncreaseAllocatedBytes(size_t bytes) {
|
||||
DCHECK_LE(bytes, area_size());
|
||||
allocated_bytes_.Increment(bytes);
|
||||
}
|
||||
void remove_available_in_free_list(size_t available) {
|
||||
DCHECK_LE(available, area_size());
|
||||
DCHECK_GE(available_in_free_list(), available);
|
||||
available_in_free_list_.Decrement(available);
|
||||
void DecreaseAllocatedBytes(size_t bytes) {
|
||||
DCHECK_LE(bytes, area_size());
|
||||
DCHECK_GE(allocated_bytes(), bytes);
|
||||
allocated_bytes_.Decrement(bytes);
|
||||
}
|
||||
|
||||
void ResetAllocatedBytes();
|
||||
|
||||
size_t ShrinkToHighWaterMark();
|
||||
|
||||
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
|
||||
@ -1611,47 +1616,39 @@ class AllocationStats BASE_EMBEDDED {
|
||||
void Clear() {
|
||||
capacity_ = 0;
|
||||
max_capacity_ = 0;
|
||||
size_ = 0;
|
||||
ClearSize();
|
||||
}
|
||||
|
||||
void ClearSize() { size_ = capacity_; }
|
||||
void ClearSize() {
|
||||
size_ = 0;
|
||||
#ifdef DEBUG
|
||||
allocated_on_page_.clear();
|
||||
#endif
|
||||
}
|
||||
|
||||
// Accessors for the allocation statistics.
|
||||
size_t Capacity() { return capacity_; }
|
||||
size_t MaxCapacity() { return max_capacity_; }
|
||||
size_t Size() { return size_; }
|
||||
#ifdef DEBUG
|
||||
size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
|
||||
#endif
|
||||
|
||||
// Grow the space by adding available bytes. They are initially marked as
|
||||
// being in use (part of the size), but will normally be immediately freed,
|
||||
// putting them on the free list and removing them from size_.
|
||||
void ExpandSpace(size_t bytes) {
|
||||
DCHECK_GE(size_ + bytes, size_);
|
||||
DCHECK_GE(capacity_ + bytes, capacity_);
|
||||
capacity_ += bytes;
|
||||
size_ += bytes;
|
||||
if (capacity_ > max_capacity_) {
|
||||
max_capacity_ = capacity_;
|
||||
}
|
||||
}
|
||||
|
||||
// Shrink the space by removing available bytes. Since shrinking is done
|
||||
// during sweeping, bytes have been marked as being in use (part of the size)
|
||||
// and are hereby freed.
|
||||
void ShrinkSpace(size_t bytes) {
|
||||
DCHECK_GE(capacity_, bytes);
|
||||
DCHECK_GE(size_, bytes);
|
||||
capacity_ -= bytes;
|
||||
size_ -= bytes;
|
||||
}
|
||||
|
||||
void AllocateBytes(size_t bytes) {
|
||||
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
|
||||
DCHECK_GE(size_ + bytes, size_);
|
||||
size_ += bytes;
|
||||
#ifdef DEBUG
|
||||
allocated_on_page_[page] += bytes;
|
||||
#endif
|
||||
}
|
||||
|
||||
void DeallocateBytes(size_t bytes) {
|
||||
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
|
||||
DCHECK_GE(size_, bytes);
|
||||
size_ -= bytes;
|
||||
#ifdef DEBUG
|
||||
DCHECK_GE(allocated_on_page_[page], bytes);
|
||||
allocated_on_page_[page] -= bytes;
|
||||
#endif
|
||||
}
|
||||
|
||||
void DecreaseCapacity(size_t bytes) {
|
||||
@ -1663,16 +1660,8 @@ class AllocationStats BASE_EMBEDDED {
|
||||
void IncreaseCapacity(size_t bytes) {
|
||||
DCHECK_GE(capacity_ + bytes, capacity_);
|
||||
capacity_ += bytes;
|
||||
}
|
||||
|
||||
// Merge |other| into |this|.
|
||||
void Merge(const AllocationStats& other) {
|
||||
DCHECK_GE(capacity_ + other.capacity_, capacity_);
|
||||
DCHECK_GE(size_ + other.size_, size_);
|
||||
capacity_ += other.capacity_;
|
||||
size_ += other.size_;
|
||||
if (other.max_capacity_ > max_capacity_) {
|
||||
max_capacity_ = other.max_capacity_;
|
||||
if (capacity_ > max_capacity_) {
|
||||
max_capacity_ = capacity_;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1686,6 +1675,10 @@ class AllocationStats BASE_EMBEDDED {
|
||||
|
||||
// |size_|: The number of allocated bytes.
|
||||
size_t size_;
|
||||
|
||||
#ifdef DEBUG
|
||||
std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
|
||||
#endif
|
||||
};
|
||||
|
||||
// A free list maintaining free blocks of memory. The free list is organized in
|
||||
@ -2062,7 +2055,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
||||
// no attempt to add area to free list is made.
|
||||
size_t Free(Address start, size_t size_in_bytes) {
|
||||
size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
|
||||
accounting_stats_.DeallocateBytes(size_in_bytes);
|
||||
Page* page = Page::FromAddress(start);
|
||||
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
|
||||
DCHECK_GE(size_in_bytes, wasted);
|
||||
return size_in_bytes - wasted;
|
||||
}
|
||||
@ -2093,15 +2087,26 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
||||
void MarkAllocationInfoBlack();
|
||||
void UnmarkAllocationInfo();
|
||||
|
||||
void AccountAllocatedBytes(size_t bytes) {
|
||||
accounting_stats_.AllocateBytes(bytes);
|
||||
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
|
||||
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
|
||||
}
|
||||
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
|
||||
accounting_stats_.IncreaseAllocatedBytes(bytes, page);
|
||||
}
|
||||
void DecreaseCapacity(size_t bytes) {
|
||||
accounting_stats_.DecreaseCapacity(bytes);
|
||||
}
|
||||
void IncreaseCapacity(size_t bytes) {
|
||||
accounting_stats_.IncreaseCapacity(bytes);
|
||||
}
|
||||
|
||||
void IncreaseCapacity(size_t bytes);
|
||||
|
||||
// Releases an unused page and shrinks the space.
|
||||
void ReleasePage(Page* page);
|
||||
|
||||
void AccountAddedPage(Page* page);
|
||||
void AccountRemovedPage(Page* page);
|
||||
void RefineAllocatedBytesAfterSweeping(Page* page);
|
||||
|
||||
// The dummy page that anchors the linked list of pages.
|
||||
Page* anchor() { return &anchor_; }
|
||||
|
||||
@ -2116,6 +2121,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
void VerifyCountersAfterSweeping();
|
||||
void VerifyCountersBeforeConcurrentSweeping();
|
||||
// Print meta info and objects in this space.
|
||||
void Print() override;
|
||||
|
||||
@ -2162,6 +2169,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
||||
// using the high water mark.
|
||||
void ShrinkImmortalImmovablePages();
|
||||
|
||||
size_t ShrinkPageToHighWaterMark(Page* page);
|
||||
|
||||
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
|
||||
|
||||
// Remove a page if it has at least |size_in_bytes| bytes available that can
|
||||
|
@ -176,7 +176,6 @@ void SimulateFullSpace(v8::internal::PagedSpace* space) {
|
||||
}
|
||||
space->EmptyAllocationInfo();
|
||||
space->ResetFreeList();
|
||||
space->ClearStats();
|
||||
}
|
||||
|
||||
void AbandonCurrentlyFreeMemory(PagedSpace* space) {
|
||||
|
@ -609,13 +609,14 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
|
||||
Page* page = Page::FromAddress(array->address());
|
||||
|
||||
// Reset space so high water mark is consistent.
|
||||
CcTest::heap()->old_space()->ResetFreeList();
|
||||
CcTest::heap()->old_space()->EmptyAllocationInfo();
|
||||
PagedSpace* old_space = CcTest::heap()->old_space();
|
||||
old_space->ResetFreeList();
|
||||
old_space->EmptyAllocationInfo();
|
||||
|
||||
HeapObject* filler =
|
||||
HeapObject::FromAddress(array->address() + array->Size());
|
||||
CHECK(filler->IsFreeSpace());
|
||||
size_t shrunk = page->ShrinkToHighWaterMark();
|
||||
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
|
||||
size_t should_have_shrunk =
|
||||
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
|
||||
base::OS::CommitPageSize());
|
||||
@ -636,10 +637,11 @@ TEST(ShrinkPageToHighWaterMarkNoFiller) {
|
||||
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
|
||||
|
||||
// Reset space so high water mark and fillers are consistent.
|
||||
CcTest::heap()->old_space()->ResetFreeList();
|
||||
CcTest::heap()->old_space()->EmptyAllocationInfo();
|
||||
PagedSpace* old_space = CcTest::heap()->old_space();
|
||||
old_space->ResetFreeList();
|
||||
old_space->EmptyAllocationInfo();
|
||||
|
||||
const size_t shrunk = page->ShrinkToHighWaterMark();
|
||||
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
|
||||
CHECK_EQ(0u, shrunk);
|
||||
}
|
||||
|
||||
@ -658,14 +660,15 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
|
||||
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
|
||||
|
||||
// Reset space so high water mark and fillers are consistent.
|
||||
CcTest::heap()->old_space()->ResetFreeList();
|
||||
CcTest::heap()->old_space()->EmptyAllocationInfo();
|
||||
PagedSpace* old_space = CcTest::heap()->old_space();
|
||||
old_space->ResetFreeList();
|
||||
old_space->EmptyAllocationInfo();
|
||||
|
||||
HeapObject* filler =
|
||||
HeapObject::FromAddress(array->address() + array->Size());
|
||||
CHECK_EQ(filler->map(), CcTest::heap()->one_pointer_filler_map());
|
||||
|
||||
const size_t shrunk = page->ShrinkToHighWaterMark();
|
||||
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
|
||||
CHECK_EQ(0u, shrunk);
|
||||
}
|
||||
|
||||
@ -684,14 +687,15 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
|
||||
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
|
||||
|
||||
// Reset space so high water mark and fillers are consistent.
|
||||
CcTest::heap()->old_space()->ResetFreeList();
|
||||
CcTest::heap()->old_space()->EmptyAllocationInfo();
|
||||
PagedSpace* old_space = CcTest::heap()->old_space();
|
||||
old_space->ResetFreeList();
|
||||
old_space->EmptyAllocationInfo();
|
||||
|
||||
HeapObject* filler =
|
||||
HeapObject::FromAddress(array->address() + array->Size());
|
||||
CHECK_EQ(filler->map(), CcTest::heap()->two_pointer_filler_map());
|
||||
|
||||
const size_t shrunk = page->ShrinkToHighWaterMark();
|
||||
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
|
||||
CHECK_EQ(0u, shrunk);
|
||||
}
|
||||
|
||||
|
@ -53,6 +53,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
heap()->old_space()->UnlinkFreeListCategories(page);
|
||||
heap()->old_space()->AccountRemovedPage(page);
|
||||
EXPECT_NE(nullptr, page);
|
||||
const int page_size = getpagesize();
|
||||
void* start_address = static_cast<void*>(page->address());
|
||||
@ -72,6 +73,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
heap()->old_space()->UnlinkFreeListCategories(page);
|
||||
heap()->old_space()->AccountRemovedPage(page);
|
||||
EXPECT_NE(nullptr, page);
|
||||
const int page_size = getpagesize();
|
||||
void* start_address = static_cast<void*>(page->address());
|
||||
|
Loading…
Reference in New Issue
Block a user