[heap] Use size_t in free list and evacuation candidate selection.
BUG=chromium:652721 Review-Url: https://codereview.chromium.org/2406363002 Cr-Commit-Position: refs/heads/master@{#40250}
This commit is contained in:
parent
e89eef3029
commit
5831264356
@ -141,19 +141,20 @@ const int kMinUInt16 = 0;
|
||||
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
|
||||
const int kMinUInt32 = 0;
|
||||
|
||||
const int kCharSize = sizeof(char); // NOLINT
|
||||
const int kShortSize = sizeof(short); // NOLINT
|
||||
const int kIntSize = sizeof(int); // NOLINT
|
||||
const int kInt32Size = sizeof(int32_t); // NOLINT
|
||||
const int kInt64Size = sizeof(int64_t); // NOLINT
|
||||
const int kFloatSize = sizeof(float); // NOLINT
|
||||
const int kDoubleSize = sizeof(double); // NOLINT
|
||||
const int kIntptrSize = sizeof(intptr_t); // NOLINT
|
||||
const int kPointerSize = sizeof(void*); // NOLINT
|
||||
const int kCharSize = sizeof(char);
|
||||
const int kShortSize = sizeof(short); // NOLINT
|
||||
const int kIntSize = sizeof(int);
|
||||
const int kInt32Size = sizeof(int32_t);
|
||||
const int kInt64Size = sizeof(int64_t);
|
||||
const int kSizetSize = sizeof(size_t);
|
||||
const int kFloatSize = sizeof(float);
|
||||
const int kDoubleSize = sizeof(double);
|
||||
const int kIntptrSize = sizeof(intptr_t);
|
||||
const int kPointerSize = sizeof(void*);
|
||||
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
|
||||
const int kRegisterSize = kPointerSize + kPointerSize;
|
||||
const int kRegisterSize = kPointerSize + kPointerSize;
|
||||
#else
|
||||
const int kRegisterSize = kPointerSize;
|
||||
const int kRegisterSize = kPointerSize;
|
||||
#endif
|
||||
const int kPCOnStackSize = kRegisterSize;
|
||||
const int kFPOnStackSize = kRegisterSize;
|
||||
|
@ -575,22 +575,21 @@ const char* AllocationSpaceName(AllocationSpace space) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ComputeEvacuationHeuristics(
|
||||
int area_size, int* target_fragmentation_percent,
|
||||
int* max_evacuated_bytes) {
|
||||
size_t area_size, int* target_fragmentation_percent,
|
||||
size_t* max_evacuated_bytes) {
|
||||
// For memory reducing and optimize for memory mode we directly define both
|
||||
// constants.
|
||||
const int kTargetFragmentationPercentForReduceMemory = 20;
|
||||
const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
|
||||
const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
|
||||
const int kTargetFragmentationPercentForOptimizeMemory = 20;
|
||||
const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
|
||||
const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
|
||||
|
||||
// For regular mode (which is latency critical) we define less aggressive
|
||||
// defaults to start and switch to a trace-based (using compaction speed)
|
||||
// approach as soon as we have enough samples.
|
||||
const int kTargetFragmentationPercent = 70;
|
||||
const int kMaxEvacuatedBytes = 4 * MB;
|
||||
const size_t kMaxEvacuatedBytes = 4 * MB;
|
||||
// Time to take for a single area (=payload of page). Used as soon as there
|
||||
// exist enough compaction speed samples.
|
||||
const float kTargetMsPerArea = .5;
|
||||
@ -629,10 +628,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
|
||||
|
||||
int number_of_pages = space->CountTotalPages();
|
||||
int area_size = space->AreaSize();
|
||||
size_t area_size = space->AreaSize();
|
||||
|
||||
// Pairs of (live_bytes_in_page, page).
|
||||
typedef std::pair<int, Page*> LiveBytesPagePair;
|
||||
typedef std::pair<size_t, Page*> LiveBytesPagePair;
|
||||
std::vector<LiveBytesPagePair> pages;
|
||||
pages.reserve(number_of_pages);
|
||||
|
||||
@ -651,7 +650,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
}
|
||||
|
||||
int candidate_count = 0;
|
||||
int total_live_bytes = 0;
|
||||
size_t total_live_bytes = 0;
|
||||
|
||||
const bool reduce_memory = heap()->ShouldReduceMemory();
|
||||
if (FLAG_manual_evacuation_candidates_selection) {
|
||||
@ -687,12 +686,12 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
// them starting with the page with the most free memory, adding them to the
|
||||
// set of evacuation candidates as long as both conditions (fragmentation
|
||||
// and quota) hold.
|
||||
int max_evacuated_bytes;
|
||||
size_t max_evacuated_bytes;
|
||||
int target_fragmentation_percent;
|
||||
ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
|
||||
&max_evacuated_bytes);
|
||||
|
||||
const intptr_t free_bytes_threshold =
|
||||
const size_t free_bytes_threshold =
|
||||
target_fragmentation_percent * (area_size / 100);
|
||||
|
||||
// Sort pages from the most free to the least free, then select
|
||||
@ -705,8 +704,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
return a.first < b.first;
|
||||
});
|
||||
for (size_t i = 0; i < pages.size(); i++) {
|
||||
int live_bytes = pages[i].first;
|
||||
int free_bytes = area_size - live_bytes;
|
||||
size_t live_bytes = pages[i].first;
|
||||
DCHECK_GE(area_size, live_bytes);
|
||||
size_t free_bytes = area_size - live_bytes;
|
||||
if (FLAG_always_compact ||
|
||||
((free_bytes >= free_bytes_threshold) &&
|
||||
((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
|
||||
@ -715,10 +715,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
}
|
||||
if (FLAG_trace_fragmentation_verbose) {
|
||||
PrintIsolate(isolate(),
|
||||
"compaction-selection-page: space=%s free_bytes_page=%d "
|
||||
"compaction-selection-page: space=%s free_bytes_page=%zu "
|
||||
"fragmentation_limit_kb=%" V8PRIdPTR
|
||||
" fragmentation_limit_percent=%d sum_compaction_kb=%d "
|
||||
"compaction_limit_kb=%d\n",
|
||||
" fragmentation_limit_percent=%d sum_compaction_kb=%zu "
|
||||
"compaction_limit_kb=%zu\n",
|
||||
AllocationSpaceName(space->identity()), free_bytes / KB,
|
||||
free_bytes_threshold / KB, target_fragmentation_percent,
|
||||
total_live_bytes / KB, max_evacuated_bytes / KB);
|
||||
@ -726,7 +726,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
}
|
||||
// How many pages we will allocated for the evacuated objects
|
||||
// in the worst case: ceil(total_live_bytes / area_size)
|
||||
int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
|
||||
int estimated_new_pages =
|
||||
static_cast<int>((total_live_bytes + area_size - 1) / area_size);
|
||||
DCHECK_LE(estimated_new_pages, candidate_count);
|
||||
int estimated_released_pages = candidate_count - estimated_new_pages;
|
||||
// Avoid (compact -> expand) cycles.
|
||||
@ -741,7 +742,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
if (FLAG_trace_fragmentation) {
|
||||
PrintIsolate(isolate(),
|
||||
"compaction-selection: space=%s reduce_memory=%d pages=%d "
|
||||
"total_live_bytes=%d\n",
|
||||
"total_live_bytes=%zu\n",
|
||||
AllocationSpaceName(space->identity()), reduce_memory,
|
||||
candidate_count, total_live_bytes / KB);
|
||||
}
|
||||
@ -3354,7 +3355,8 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
|
||||
Address free_end = object->address();
|
||||
if (free_end != free_start) {
|
||||
int size = static_cast<int>(free_end - free_start);
|
||||
CHECK_GT(free_end, free_start);
|
||||
size_t size = static_cast<size_t>(free_end - free_start);
|
||||
if (free_space_mode == ZAP_FREE_SPACE) {
|
||||
memset(free_start, 0xcc, size);
|
||||
}
|
||||
@ -3363,7 +3365,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
free_start, size);
|
||||
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
||||
} else {
|
||||
p->heap()->CreateFillerObjectAt(free_start, size,
|
||||
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
|
||||
ClearRecordedSlots::kNo);
|
||||
}
|
||||
}
|
||||
@ -3385,7 +3387,8 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
p->ClearLiveness();
|
||||
|
||||
if (free_start != p->area_end()) {
|
||||
int size = static_cast<int>(p->area_end() - free_start);
|
||||
CHECK_GT(p->area_end(), free_start);
|
||||
size_t size = static_cast<size_t>(p->area_end() - free_start);
|
||||
if (free_space_mode == ZAP_FREE_SPACE) {
|
||||
memset(free_start, 0xcc, size);
|
||||
}
|
||||
@ -3394,7 +3397,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
free_start, size);
|
||||
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
||||
} else {
|
||||
p->heap()->CreateFillerObjectAt(free_start, size,
|
||||
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
|
||||
ClearRecordedSlots::kNo);
|
||||
}
|
||||
}
|
||||
@ -3856,7 +3859,8 @@ void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
|
||||
void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
|
||||
Page* page) {
|
||||
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
|
||||
int to_sweep = page->area_size() - page->LiveBytes();
|
||||
DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes()));
|
||||
size_t to_sweep = page->area_size() - page->LiveBytes();
|
||||
if (space != NEW_SPACE)
|
||||
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
|
||||
}
|
||||
|
@ -503,9 +503,9 @@ class MarkCompactCollector {
|
||||
|
||||
bool WillBeDeoptimized(Code* code);
|
||||
|
||||
void ComputeEvacuationHeuristics(int area_size,
|
||||
void ComputeEvacuationHeuristics(size_t area_size,
|
||||
int* target_fragmentation_percent,
|
||||
int* max_evacuated_bytes);
|
||||
size_t* max_evacuated_bytes);
|
||||
|
||||
void VisitAllObjects(HeapObjectVisitor* visitor);
|
||||
|
||||
|
@ -279,6 +279,7 @@ intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
|
||||
added += category->available();
|
||||
category->Relink();
|
||||
});
|
||||
DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
|
||||
return added;
|
||||
}
|
||||
|
||||
|
@ -776,6 +776,14 @@ void Page::ResetFreeListStatistics() {
|
||||
available_in_free_list_ = 0;
|
||||
}
|
||||
|
||||
size_t Page::AvailableInFreeList() {
|
||||
size_t sum = 0;
|
||||
ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
|
||||
sum += category->available();
|
||||
});
|
||||
return sum;
|
||||
}
|
||||
|
||||
size_t Page::ShrinkToHighWaterMark() {
|
||||
// Shrink pages to high water mark. The water mark points either to a filler
|
||||
// or the area_end.
|
||||
@ -1245,6 +1253,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
p->set_owner(this);
|
||||
p->InsertAfter(anchor_.prev_page());
|
||||
RelinkFreeListCategories(p);
|
||||
DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1389,7 +1398,8 @@ void PagedSpace::EmptyAllocationInfo() {
|
||||
}
|
||||
|
||||
SetTopAndLimit(NULL, NULL);
|
||||
Free(current_top, static_cast<int>(current_limit - current_top));
|
||||
DCHECK_GE(current_limit, current_top);
|
||||
Free(current_top, current_limit - current_top);
|
||||
}
|
||||
|
||||
void PagedSpace::IncreaseCapacity(size_t bytes) {
|
||||
@ -1599,7 +1609,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
|
||||
current_page->SetFlags(anchor()->prev_page()->GetFlags(),
|
||||
Page::kCopyAllFlags);
|
||||
heap()->CreateFillerObjectAt(current_page->area_start(),
|
||||
current_page->area_size(),
|
||||
static_cast<int>(current_page->area_size()),
|
||||
ClearRecordedSlots::kNo);
|
||||
}
|
||||
}
|
||||
@ -2338,7 +2348,7 @@ void FreeListCategory::Reset() {
|
||||
available_ = 0;
|
||||
}
|
||||
|
||||
FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
|
||||
FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
|
||||
DCHECK(page()->CanAllocate());
|
||||
|
||||
FreeSpace* node = top();
|
||||
@ -2349,8 +2359,8 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
|
||||
return node;
|
||||
}
|
||||
|
||||
FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
|
||||
int* node_size) {
|
||||
FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
|
||||
size_t* node_size) {
|
||||
DCHECK(page()->CanAllocate());
|
||||
|
||||
FreeSpace* node = PickNodeFromList(node_size);
|
||||
@ -2362,15 +2372,16 @@ FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
|
||||
return node;
|
||||
}
|
||||
|
||||
FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
|
||||
int* node_size) {
|
||||
FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
|
||||
size_t* node_size) {
|
||||
DCHECK(page()->CanAllocate());
|
||||
|
||||
FreeSpace* prev_non_evac_node = nullptr;
|
||||
for (FreeSpace* cur_node = top(); cur_node != nullptr;
|
||||
cur_node = cur_node->next()) {
|
||||
int size = cur_node->size();
|
||||
size_t size = cur_node->size();
|
||||
if (size >= minimum_size) {
|
||||
DCHECK_GE(available_, size);
|
||||
available_ -= size;
|
||||
if (cur_node == top()) {
|
||||
set_top(cur_node->next());
|
||||
@ -2387,7 +2398,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
|
||||
bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
|
||||
FreeMode mode) {
|
||||
if (!page()->CanAllocate()) return false;
|
||||
|
||||
@ -2420,7 +2431,7 @@ void FreeListCategory::Relink() {
|
||||
}
|
||||
|
||||
void FreeListCategory::Invalidate() {
|
||||
page()->add_available_in_free_list(-available());
|
||||
page()->remove_available_in_free_list(available());
|
||||
Reset();
|
||||
type_ = kInvalidCategory;
|
||||
}
|
||||
@ -2442,10 +2453,10 @@ void FreeList::Reset() {
|
||||
ResetStats();
|
||||
}
|
||||
|
||||
int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
|
||||
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
|
||||
if (size_in_bytes == 0) return 0;
|
||||
|
||||
owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
|
||||
owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
|
||||
ClearRecordedSlots::kNo);
|
||||
|
||||
Page* page = Page::FromAddress(start);
|
||||
@ -2464,10 +2475,11 @@ int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
|
||||
if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
|
||||
page->add_available_in_free_list(size_in_bytes);
|
||||
}
|
||||
DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
|
||||
return 0;
|
||||
}
|
||||
|
||||
FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
|
||||
FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
|
||||
FreeListCategoryIterator it(this, type);
|
||||
FreeSpace* node = nullptr;
|
||||
while (it.HasNext()) {
|
||||
@ -2475,7 +2487,7 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
|
||||
node = current->PickNodeFromList(node_size);
|
||||
if (node != nullptr) {
|
||||
Page::FromAddress(node->address())
|
||||
->add_available_in_free_list(-(*node_size));
|
||||
->remove_available_in_free_list(*node_size);
|
||||
DCHECK(IsVeryLong() || Available() == SumFreeLists());
|
||||
return node;
|
||||
}
|
||||
@ -2484,21 +2496,22 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
|
||||
return node;
|
||||
}
|
||||
|
||||
FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
|
||||
int minimum_size) {
|
||||
FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
|
||||
size_t minimum_size) {
|
||||
if (categories_[type] == nullptr) return nullptr;
|
||||
FreeSpace* node =
|
||||
categories_[type]->TryPickNodeFromList(minimum_size, node_size);
|
||||
if (node != nullptr) {
|
||||
Page::FromAddress(node->address())
|
||||
->add_available_in_free_list(-(*node_size));
|
||||
->remove_available_in_free_list(*node_size);
|
||||
DCHECK(IsVeryLong() || Available() == SumFreeLists());
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
|
||||
int* node_size, int minimum_size) {
|
||||
size_t* node_size,
|
||||
size_t minimum_size) {
|
||||
FreeListCategoryIterator it(this, type);
|
||||
FreeSpace* node = nullptr;
|
||||
while (it.HasNext()) {
|
||||
@ -2506,7 +2519,7 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
|
||||
node = current->SearchForNodeInList(minimum_size, node_size);
|
||||
if (node != nullptr) {
|
||||
Page::FromAddress(node->address())
|
||||
->add_available_in_free_list(-(*node_size));
|
||||
->remove_available_in_free_list(*node_size);
|
||||
DCHECK(IsVeryLong() || Available() == SumFreeLists());
|
||||
return node;
|
||||
}
|
||||
@ -2517,7 +2530,7 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
|
||||
return node;
|
||||
}
|
||||
|
||||
FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
|
||||
FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
|
||||
FreeSpace* node = nullptr;
|
||||
|
||||
// First try the allocation fast path: try to allocate the minimum element
|
||||
@ -2554,12 +2567,12 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
|
||||
// allocation space has been set up with the top and limit of the space. If
|
||||
// the allocation fails then NULL is returned, and the caller can perform a GC
|
||||
// or allocate a new page before retrying.
|
||||
HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
DCHECK(0 < size_in_bytes);
|
||||
HeapObject* FreeList::Allocate(size_t size_in_bytes) {
|
||||
DCHECK(size_in_bytes <= kMaxBlockSize);
|
||||
DCHECK(IsAligned(size_in_bytes, kPointerSize));
|
||||
// Don't free list allocate if there is linear space available.
|
||||
DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
|
||||
DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
|
||||
size_in_bytes);
|
||||
|
||||
// Mark the old linear allocation area with a free space map so it can be
|
||||
// skipped when scanning the heap. This also puts it back in the free list
|
||||
@ -2569,15 +2582,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
|
||||
Heap::kNoGCFlags, kNoGCCallbackFlags);
|
||||
|
||||
int new_node_size = 0;
|
||||
size_t new_node_size = 0;
|
||||
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
|
||||
if (new_node == nullptr) return nullptr;
|
||||
|
||||
int bytes_left = new_node_size - size_in_bytes;
|
||||
DCHECK(bytes_left >= 0);
|
||||
DCHECK_GE(new_node_size, size_in_bytes);
|
||||
size_t bytes_left = new_node_size - size_in_bytes;
|
||||
|
||||
#ifdef DEBUG
|
||||
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
|
||||
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
|
||||
reinterpret_cast<Object**>(new_node->address())[i] =
|
||||
Smi::FromInt(kCodeZapValue);
|
||||
}
|
||||
@ -2588,11 +2601,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
// candidate.
|
||||
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
|
||||
|
||||
const int kThreshold = IncrementalMarking::kAllocatedThreshold;
|
||||
const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
|
||||
|
||||
// Memory in the linear allocation area is counted as allocated. We may free
|
||||
// a little of this again immediately - see below.
|
||||
owner_->Allocate(new_node_size);
|
||||
owner_->Allocate(static_cast<int>(new_node_size));
|
||||
|
||||
if (owner_->heap()->inline_allocation_disabled()) {
|
||||
// Keep the linear allocation area empty if requested to do so, just
|
||||
@ -2603,17 +2616,17 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
} else if (bytes_left > kThreshold &&
|
||||
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
|
||||
FLAG_incremental_marking) {
|
||||
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
|
||||
size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
|
||||
// We don't want to give too large linear areas to the allocator while
|
||||
// incremental marking is going on, because we won't check again whether
|
||||
// we want to do another increment until the linear area is used up.
|
||||
DCHECK_GE(new_node_size, size_in_bytes + linear_size);
|
||||
owner_->Free(new_node->address() + size_in_bytes + linear_size,
|
||||
new_node_size - size_in_bytes - linear_size);
|
||||
owner_->SetAllocationInfo(
|
||||
new_node->address() + size_in_bytes,
|
||||
new_node->address() + size_in_bytes + linear_size);
|
||||
} else {
|
||||
DCHECK(bytes_left >= 0);
|
||||
// Normally we give the rest of the node to the allocator as its new
|
||||
// linear allocation area.
|
||||
owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
|
||||
@ -2623,8 +2636,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
return new_node;
|
||||
}
|
||||
|
||||
intptr_t FreeList::EvictFreeListItems(Page* page) {
|
||||
intptr_t sum = 0;
|
||||
size_t FreeList::EvictFreeListItems(Page* page) {
|
||||
size_t sum = 0;
|
||||
page->ForAllFreeListCategories(
|
||||
[this, &sum, page](FreeListCategory* category) {
|
||||
DCHECK_EQ(this, category->owner());
|
||||
@ -2698,8 +2711,8 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
intptr_t FreeListCategory::SumFreeList() {
|
||||
intptr_t sum = 0;
|
||||
size_t FreeListCategory::SumFreeList() {
|
||||
size_t sum = 0;
|
||||
FreeSpace* cur = top();
|
||||
while (cur != NULL) {
|
||||
DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
|
||||
@ -2736,8 +2749,8 @@ bool FreeList::IsVeryLong() {
|
||||
// This can take a very long time because it is linear in the number of entries
|
||||
// on the free list, so it should not be called if FreeListLength returns
|
||||
// kVeryLongFreeList.
|
||||
intptr_t FreeList::SumFreeLists() {
|
||||
intptr_t sum = 0;
|
||||
size_t FreeList::SumFreeLists() {
|
||||
size_t sum = 0;
|
||||
ForAllFreeListCategories(
|
||||
[&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
|
||||
return sum;
|
||||
@ -2776,10 +2789,12 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
|
||||
// Each page may have a small free space that is not tracked by a free list.
|
||||
// Update the maps for those free space objects.
|
||||
for (Page* page : *this) {
|
||||
int size = static_cast<int>(page->wasted_memory());
|
||||
size_t size = page->wasted_memory();
|
||||
if (size == 0) continue;
|
||||
DCHECK_GE(static_cast<size_t>(Page::kPageSize), size);
|
||||
Address address = page->OffsetToAddress(Page::kPageSize - size);
|
||||
heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
|
||||
heap()->CreateFillerObjectAt(address, static_cast<int>(size),
|
||||
ClearRecordedSlots::kNo);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2821,7 +2836,6 @@ HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
const int kMaxPagesToSweep = 1;
|
||||
|
||||
@ -2835,7 +2849,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
RefillFreeList();
|
||||
|
||||
// Retry the free list allocation.
|
||||
HeapObject* object = free_list_.Allocate(size_in_bytes);
|
||||
HeapObject* object =
|
||||
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
|
||||
if (object != NULL) return object;
|
||||
|
||||
// If sweeping is still in progress try to sweep pages on the main thread.
|
||||
@ -2843,7 +2858,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
identity(), size_in_bytes, kMaxPagesToSweep);
|
||||
RefillFreeList();
|
||||
if (max_freed >= size_in_bytes) {
|
||||
object = free_list_.Allocate(size_in_bytes);
|
||||
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
|
||||
if (object != nullptr) return object;
|
||||
}
|
||||
}
|
||||
@ -2851,7 +2866,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
|
||||
DCHECK((CountTotalPages() > 1) ||
|
||||
(size_in_bytes <= free_list_.Available()));
|
||||
return free_list_.Allocate(size_in_bytes);
|
||||
return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
|
||||
}
|
||||
|
||||
// If sweeper threads are active, wait for them at that point and steal
|
||||
@ -2971,7 +2986,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
|
||||
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
|
||||
object_size, this, executable);
|
||||
if (page == NULL) return AllocationResult::Retry(identity());
|
||||
DCHECK(page->area_size() >= object_size);
|
||||
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
|
||||
|
||||
size_ += static_cast<int>(page->size());
|
||||
AccountCommitted(page->size());
|
||||
|
@ -135,7 +135,8 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory };
|
||||
class FreeListCategory {
|
||||
public:
|
||||
static const int kSize = kIntSize + // FreeListCategoryType type_
|
||||
kIntSize + // int available_
|
||||
kIntSize + // padding for type_
|
||||
kSizetSize + // size_t available_
|
||||
kPointerSize + // FreeSpace* top_
|
||||
kPointerSize + // FreeListCategory* prev_
|
||||
kPointerSize; // FreeListCategory* next_
|
||||
@ -167,28 +168,28 @@ class FreeListCategory {
|
||||
// category is currently unlinked.
|
||||
void Relink();
|
||||
|
||||
bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
|
||||
bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
|
||||
|
||||
// Picks a node from the list and stores its size in |node_size|. Returns
|
||||
// nullptr if the category is empty.
|
||||
FreeSpace* PickNodeFromList(int* node_size);
|
||||
FreeSpace* PickNodeFromList(size_t* node_size);
|
||||
|
||||
// Performs a single try to pick a node of at least |minimum_size| from the
|
||||
// category. Stores the actual size in |node_size|. Returns nullptr if no
|
||||
// node is found.
|
||||
FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
|
||||
FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
|
||||
|
||||
// Picks a node of at least |minimum_size| from the category. Stores the
|
||||
// actual size in |node_size|. Returns nullptr if no node is found.
|
||||
FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
|
||||
FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
|
||||
|
||||
inline FreeList* owner();
|
||||
inline bool is_linked();
|
||||
bool is_empty() { return top() == nullptr; }
|
||||
int available() const { return available_; }
|
||||
size_t available() const { return available_; }
|
||||
|
||||
#ifdef DEBUG
|
||||
intptr_t SumFreeList();
|
||||
size_t SumFreeList();
|
||||
int FreeListLength();
|
||||
#endif
|
||||
|
||||
@ -211,7 +212,7 @@ class FreeListCategory {
|
||||
|
||||
// |available_|: Total available bytes in all blocks of this free list
|
||||
// category.
|
||||
int available_;
|
||||
size_t available_;
|
||||
|
||||
// |top_|: Points to the top FreeSpace* in the free list category.
|
||||
FreeSpace* top_;
|
||||
@ -320,26 +321,26 @@ class MemoryChunk {
|
||||
static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
|
||||
|
||||
static const size_t kMinHeaderSize =
|
||||
kSizeOffset + kPointerSize // size_t size
|
||||
+ kIntptrSize // Flags flags_
|
||||
+ kPointerSize // Address area_start_
|
||||
+ kPointerSize // Address area_end_
|
||||
+ 2 * kPointerSize // base::VirtualMemory reservation_
|
||||
+ kPointerSize // Address owner_
|
||||
+ kPointerSize // Heap* heap_
|
||||
+ kIntSize // int progress_bar_
|
||||
+ kIntSize // int live_bytes_count_
|
||||
+ kPointerSize // SlotSet* old_to_new_slots_
|
||||
+ kPointerSize // SlotSet* old_to_old_slots_
|
||||
+ kPointerSize // TypedSlotSet* typed_old_to_new_slots_
|
||||
+ kPointerSize // TypedSlotSet* typed_old_to_old_slots_
|
||||
+ kPointerSize // SkipList* skip_list_
|
||||
+ kPointerSize // AtomicValue high_water_mark_
|
||||
+ kPointerSize // base::Mutex* mutex_
|
||||
+ kPointerSize // base::AtomicWord concurrent_sweeping_
|
||||
+ 2 * kPointerSize // AtomicNumber free-list statistics
|
||||
+ kPointerSize // AtomicValue next_chunk_
|
||||
+ kPointerSize // AtomicValue prev_chunk_
|
||||
kSizeOffset + kSizetSize // size_t size
|
||||
+ kIntptrSize // Flags flags_
|
||||
+ kPointerSize // Address area_start_
|
||||
+ kPointerSize // Address area_end_
|
||||
+ 2 * kPointerSize // base::VirtualMemory reservation_
|
||||
+ kPointerSize // Address owner_
|
||||
+ kPointerSize // Heap* heap_
|
||||
+ kIntSize // int progress_bar_
|
||||
+ kIntSize // int live_bytes_count_
|
||||
+ kPointerSize // SlotSet* old_to_new_slots_
|
||||
+ kPointerSize // SlotSet* old_to_old_slots_
|
||||
+ kPointerSize // TypedSlotSet* typed_old_to_new_slots_
|
||||
+ kPointerSize // TypedSlotSet* typed_old_to_old_slots_
|
||||
+ kPointerSize // SkipList* skip_list_
|
||||
+ kPointerSize // AtomicValue high_water_mark_
|
||||
+ kPointerSize // base::Mutex* mutex_
|
||||
+ kPointerSize // base::AtomicWord concurrent_sweeping_
|
||||
+ 2 * kSizetSize // AtomicNumber free-list statistics
|
||||
+ kPointerSize // AtomicValue next_chunk_
|
||||
+ kPointerSize // AtomicValue prev_chunk_
|
||||
// FreeListCategory categories_[kNumberOfCategories]
|
||||
+ FreeListCategory::kSize * kNumberOfCategories +
|
||||
kPointerSize // LocalArrayBufferTracker* local_tracker_
|
||||
@ -459,7 +460,7 @@ class MemoryChunk {
|
||||
|
||||
Address area_start() { return area_start_; }
|
||||
Address area_end() { return area_end_; }
|
||||
int area_size() { return static_cast<int>(area_end() - area_start()); }
|
||||
size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
|
||||
|
||||
bool CommitArea(size_t requested);
|
||||
|
||||
@ -749,13 +750,10 @@ class Page : public MemoryChunk {
|
||||
}
|
||||
|
||||
// Returns the offset of a given address to this page.
|
||||
inline int Offset(Address a) {
|
||||
int offset = static_cast<int>(a - address());
|
||||
return offset;
|
||||
}
|
||||
inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
|
||||
|
||||
// Returns the address for a given offset to the this page.
|
||||
Address OffsetToAddress(int offset) {
|
||||
Address OffsetToAddress(size_t offset) {
|
||||
DCHECK_PAGE_OFFSET(offset);
|
||||
return address() + offset;
|
||||
}
|
||||
@ -775,9 +773,11 @@ class Page : public MemoryChunk {
|
||||
|
||||
void ResetFreeListStatistics();
|
||||
|
||||
int LiveBytesFromFreeList() {
|
||||
return static_cast<int>(area_size() - wasted_memory() -
|
||||
available_in_free_list());
|
||||
size_t AvailableInFreeList();
|
||||
|
||||
size_t LiveBytesFromFreeList() {
|
||||
DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
|
||||
return area_size() - wasted_memory() - available_in_free_list();
|
||||
}
|
||||
|
||||
FreeListCategory* free_list_category(FreeListCategoryType type) {
|
||||
@ -786,12 +786,18 @@ class Page : public MemoryChunk {
|
||||
|
||||
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
|
||||
|
||||
intptr_t wasted_memory() { return wasted_memory_.Value(); }
|
||||
void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
|
||||
intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
|
||||
void add_available_in_free_list(intptr_t available) {
|
||||
size_t wasted_memory() { return wasted_memory_.Value(); }
|
||||
void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
|
||||
size_t available_in_free_list() { return available_in_free_list_.Value(); }
|
||||
void add_available_in_free_list(size_t available) {
|
||||
DCHECK_LE(available, area_size());
|
||||
available_in_free_list_.Increment(available);
|
||||
}
|
||||
void remove_available_in_free_list(size_t available) {
|
||||
DCHECK_LE(available, area_size());
|
||||
DCHECK_GE(available_in_free_list(), available);
|
||||
available_in_free_list_.Decrement(available);
|
||||
}
|
||||
|
||||
size_t ShrinkToHighWaterMark();
|
||||
|
||||
@ -1693,12 +1699,12 @@ class FreeList {
|
||||
// was too small. Bookkeeping information will be written to the block, i.e.,
|
||||
// its contents will be destroyed. The start address should be word aligned,
|
||||
// and the size should be a non-zero multiple of the word size.
|
||||
int Free(Address start, int size_in_bytes, FreeMode mode);
|
||||
size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
|
||||
|
||||
// Allocate a block of size {size_in_bytes} from the free list. The block is
|
||||
// unitialized. A failure is returned if no block is available. The size
|
||||
// should be a non-zero multiple of the word size.
|
||||
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
|
||||
MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
|
||||
|
||||
// Clear the free list.
|
||||
void Reset();
|
||||
@ -1729,11 +1735,11 @@ class FreeList {
|
||||
// Used after booting the VM.
|
||||
void RepairLists(Heap* heap);
|
||||
|
||||
intptr_t EvictFreeListItems(Page* page);
|
||||
size_t EvictFreeListItems(Page* page);
|
||||
bool ContainsPageFreeListItems(Page* page);
|
||||
|
||||
PagedSpace* owner() { return owner_; }
|
||||
intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
|
||||
size_t wasted_bytes() { return wasted_bytes_.Value(); }
|
||||
|
||||
template <typename Callback>
|
||||
void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
|
||||
@ -1757,7 +1763,7 @@ class FreeList {
|
||||
void PrintCategories(FreeListCategoryType type);
|
||||
|
||||
#ifdef DEBUG
|
||||
intptr_t SumFreeLists();
|
||||
size_t SumFreeLists();
|
||||
bool IsVeryLong();
|
||||
#endif
|
||||
|
||||
@ -1781,33 +1787,33 @@ class FreeList {
|
||||
};
|
||||
|
||||
// The size range of blocks, in bytes.
|
||||
static const int kMinBlockSize = 3 * kPointerSize;
|
||||
static const int kMaxBlockSize = Page::kAllocatableMemory;
|
||||
static const size_t kMinBlockSize = 3 * kPointerSize;
|
||||
static const size_t kMaxBlockSize = Page::kAllocatableMemory;
|
||||
|
||||
static const int kTiniestListMax = 0xa * kPointerSize;
|
||||
static const int kTinyListMax = 0x1f * kPointerSize;
|
||||
static const int kSmallListMax = 0xff * kPointerSize;
|
||||
static const int kMediumListMax = 0x7ff * kPointerSize;
|
||||
static const int kLargeListMax = 0x3fff * kPointerSize;
|
||||
static const int kTinyAllocationMax = kTiniestListMax;
|
||||
static const int kSmallAllocationMax = kTinyListMax;
|
||||
static const int kMediumAllocationMax = kSmallListMax;
|
||||
static const int kLargeAllocationMax = kMediumListMax;
|
||||
static const size_t kTiniestListMax = 0xa * kPointerSize;
|
||||
static const size_t kTinyListMax = 0x1f * kPointerSize;
|
||||
static const size_t kSmallListMax = 0xff * kPointerSize;
|
||||
static const size_t kMediumListMax = 0x7ff * kPointerSize;
|
||||
static const size_t kLargeListMax = 0x3fff * kPointerSize;
|
||||
static const size_t kTinyAllocationMax = kTiniestListMax;
|
||||
static const size_t kSmallAllocationMax = kTinyListMax;
|
||||
static const size_t kMediumAllocationMax = kSmallListMax;
|
||||
static const size_t kLargeAllocationMax = kMediumListMax;
|
||||
|
||||
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
|
||||
FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
|
||||
|
||||
// Walks all available categories for a given |type| and tries to retrieve
|
||||
// a node. Returns nullptr if the category is empty.
|
||||
FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
|
||||
FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
|
||||
|
||||
// Tries to retrieve a node from the first category in a given |type|.
|
||||
// Returns nullptr if the category is empty.
|
||||
FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
|
||||
int minimum_size);
|
||||
FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
|
||||
size_t minimum_size);
|
||||
|
||||
// Searches a given |type| for a node of at least |minimum_size|.
|
||||
FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
|
||||
int minimum_size);
|
||||
FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
|
||||
size_t minimum_size);
|
||||
|
||||
FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
|
||||
if (size_in_bytes <= kTiniestListMax) {
|
||||
@ -1840,7 +1846,7 @@ class FreeList {
|
||||
FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
|
||||
|
||||
PagedSpace* owner_;
|
||||
base::AtomicNumber<intptr_t> wasted_bytes_;
|
||||
base::AtomicNumber<size_t> wasted_bytes_;
|
||||
FreeListCategory* categories_[kNumberOfCategories];
|
||||
|
||||
friend class FreeListCategory;
|
||||
@ -2027,14 +2033,16 @@ class PagedSpace : public Space {
|
||||
// the free list or accounted as waste.
|
||||
// If add_to_freelist is false then just accounting stats are updated and
|
||||
// no attempt to add area to free list is made.
|
||||
int Free(Address start, int size_in_bytes) {
|
||||
int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
|
||||
size_t Free(Address start, size_t size_in_bytes) {
|
||||
size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
|
||||
accounting_stats_.DeallocateBytes(size_in_bytes);
|
||||
DCHECK_GE(size_in_bytes, wasted);
|
||||
return size_in_bytes - wasted;
|
||||
}
|
||||
|
||||
int UnaccountedFree(Address start, int size_in_bytes) {
|
||||
int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
|
||||
size_t UnaccountedFree(Address start, size_t size_in_bytes) {
|
||||
size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
|
||||
DCHECK_GE(size_in_bytes, wasted);
|
||||
return size_in_bytes - wasted;
|
||||
}
|
||||
|
||||
|
@ -170,6 +170,10 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
|
||||
}
|
||||
|
||||
void SimulateFullSpace(v8::internal::PagedSpace* space) {
|
||||
i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
|
||||
if (collector->sweeping_in_progress()) {
|
||||
collector->EnsureSweepingCompleted();
|
||||
}
|
||||
space->EmptyAllocationInfo();
|
||||
space->ResetFreeList();
|
||||
space->ClearStats();
|
||||
|
Loading…
Reference in New Issue
Block a user