[heap] Add more tasks for parallel compaction

- We now compute the number of parallel compaction tasks, depending on the
  evacuation candidate list, the number of cores, and some hard limit.
- Free memory is moved over to compaction tasks (up to some limit)
- Moving over memory is done by dividing the free list of a given space up among
  other free lists. Since this is potentially slow we limit the maximum amount
  of moved memory.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1354383002

Cr-Commit-Position: refs/heads/master@{#30886}
This commit is contained in:
mlippautz 2015-09-23 05:28:55 -07:00 committed by Commit bot
parent f5dc276a2c
commit 0e84241883
5 changed files with 145 additions and 44 deletions

View File

@ -6,6 +6,7 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/sys-info.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/cpu-profiler.h"
@ -3370,26 +3371,39 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
}
int MarkCompactCollector::NumberOfParallelCompactionTasks() {
if (!FLAG_parallel_compaction) return 1;
// We cap the number of parallel compaction tasks by
// - (#cores - 1)
// - a value depending on the list of evacuation candidates
// - a hard limit
const int kPagesPerCompactionTask = 4;
const int kMaxCompactionTasks = 8;
return Min(kMaxCompactionTasks,
Min(1 + evacuation_candidates_.length() / kPagesPerCompactionTask,
Max(1, base::SysInfo::NumberOfProcessors() - 1)));
}
void MarkCompactCollector::EvacuatePagesInParallel() {
if (evacuation_candidates_.length() == 0) return;
int num_tasks = 1;
if (FLAG_parallel_compaction) {
num_tasks = NumberOfParallelCompactionTasks();
}
const int num_tasks = NumberOfParallelCompactionTasks();
// Set up compaction spaces.
CompactionSpaceCollection** compaction_spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks];
FreeList** free_lists = new FreeList*[2 * num_tasks];
for (int i = 0; i < num_tasks; i++) {
compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
free_lists[i] = compaction_spaces_for_tasks[i]->Get(OLD_SPACE)->free_list();
free_lists[i + num_tasks] =
compaction_spaces_for_tasks[i]->Get(CODE_SPACE)->free_list();
}
compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
heap()->old_space());
compaction_spaces_for_tasks[0]
->Get(CODE_SPACE)
->MoveOverFreeMemory(heap()->code_space());
heap()->old_space()->DivideFreeLists(free_lists, num_tasks, 1 * MB);
heap()->code_space()->DivideFreeLists(&free_lists[num_tasks], num_tasks,
1 * MB);
delete[] free_lists;
compaction_in_progress_ = true;
// Kick off parallel tasks.
@ -3400,10 +3414,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
v8::Platform::kShortRunningTask);
}
// Contribute in main thread. Counter and signal are in principal not needed.
concurrent_compaction_tasks_active_++;
// Perform compaction on the main thread.
EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
pending_compaction_tasks_semaphore_.Signal();
WaitUntilCompactionCompleted();

View File

@ -709,11 +709,8 @@ class MarkCompactCollector {
void EvacuatePagesInParallel();
int NumberOfParallelCompactionTasks() {
// TODO(hpayer, mlippautz): Figure out some logic to determine the number
// of compaction tasks.
return 1;
}
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks();
void WaitUntilCompactionCompleted();

View File

@ -2218,6 +2218,40 @@ intptr_t FreeList::Concatenate(FreeList* free_list) {
}
FreeSpace* PagedSpace::TryRemoveMemory() {
FreeSpace* space = nullptr;
int node_size = 0;
space = free_list()->FindNodeIn(FreeList::kHuge, &node_size);
if (space == nullptr)
space = free_list()->FindNodeIn(FreeList::kLarge, &node_size);
if (space == nullptr)
space = free_list()->FindNodeIn(FreeList::kMedium, &node_size);
if (space == nullptr)
space = free_list()->FindNodeIn(FreeList::kSmall, &node_size);
if (space != nullptr) {
accounting_stats_.DecreaseCapacity(node_size);
}
return space;
}
void PagedSpace::DivideFreeLists(FreeList** free_lists, int num,
intptr_t limit) {
CHECK(num > 0);
CHECK(free_lists != nullptr);
if (limit == 0) {
limit = std::numeric_limits<intptr_t>::max();
}
int index = 0;
FreeSpace* space = nullptr;
while (((space = TryRemoveMemory()) != nullptr) &&
(free_lists[index]->available() < limit)) {
free_lists[index]->owner()->AddMemory(space->address(), space->size());
index = (index + 1) % num;
}
}
void FreeList::Reset() {
small_list_.Reset();
medium_list_.Reset();
@ -2261,39 +2295,62 @@ int FreeList::Free(Address start, int size_in_bytes) {
}
void FreeList::UpdateFragmentationStats(FreeListCategoryType category,
Address address, int size) {
Page* page = Page::FromAddress(address);
switch (category) {
case kSmall:
page->add_available_in_small_free_list(size);
break;
case kMedium:
page->add_available_in_medium_free_list(size);
break;
case kLarge:
page->add_available_in_large_free_list(size);
break;
case kHuge:
page->add_available_in_huge_free_list(size);
break;
default:
UNREACHABLE();
}
}
FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
if (node != nullptr) {
UpdateFragmentationStats(category, node->address(), -(*node_size));
DCHECK(IsVeryLong() || available() == SumFreeLists());
}
return node;
}
FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeSpace* node = NULL;
Page* page = NULL;
if (size_in_bytes <= kSmallAllocationMax) {
node = small_list_.PickNodeFromList(node_size);
if (node != NULL) {
DCHECK(size_in_bytes <= *node_size);
page = Page::FromAddress(node->address());
page->add_available_in_small_free_list(-(*node_size));
DCHECK(IsVeryLong() || available() == SumFreeLists());
node = FindNodeIn(kSmall, node_size);
if (node != nullptr) {
DCHECK(size_in_bytes <= node->size());
return node;
}
}
if (size_in_bytes <= kMediumAllocationMax) {
node = medium_list_.PickNodeFromList(node_size);
if (node != NULL) {
DCHECK(size_in_bytes <= *node_size);
page = Page::FromAddress(node->address());
page->add_available_in_medium_free_list(-(*node_size));
DCHECK(IsVeryLong() || available() == SumFreeLists());
node = FindNodeIn(kMedium, node_size);
if (node != nullptr) {
DCHECK(size_in_bytes <= node->size());
return node;
}
}
if (size_in_bytes <= kLargeAllocationMax) {
node = large_list_.PickNodeFromList(node_size);
if (node != NULL) {
DCHECK(size_in_bytes <= *node_size);
page = Page::FromAddress(node->address());
page->add_available_in_large_free_list(-(*node_size));
DCHECK(IsVeryLong() || available() == SumFreeLists());
node = FindNodeIn(kLarge, node_size);
if (node != nullptr) {
DCHECK(size_in_bytes <= node->size());
return node;
}
}

View File

@ -1682,6 +1682,8 @@ class FreeList {
PagedSpace* owner() { return owner_; }
private:
enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
@ -1695,6 +1697,27 @@ class FreeList {
static const int kLargeAllocationMax = kMediumListMax;
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
switch (category) {
case kSmall:
return &small_list_;
case kMedium:
return &medium_list_;
case kLarge:
return &large_list_;
case kHuge:
return &huge_list_;
default:
UNREACHABLE();
}
UNREACHABLE();
return nullptr;
}
void UpdateFragmentationStats(FreeListCategoryType category, Address address,
int size);
PagedSpace* owner_;
Heap* heap_;
@ -1703,6 +1726,8 @@ class FreeList {
FreeListCategory large_list_;
FreeListCategory huge_list_;
friend class PagedSpace;
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
@ -1985,6 +2010,22 @@ class PagedSpace : public Space {
virtual bool is_local() { return false; }
// Divide {this} free lists up among {other_free_lists} up to some certain
// {limit} of bytes. Note that this operation eventually needs to iterate
// over nodes one-by-one, making it a potentially slow operation.
void DivideFreeLists(FreeList** other_free_lists, int num, intptr_t limit);
// Adds memory starting at {start} of {size_in_bytes} to the space.
void AddMemory(Address start, int size_in_bytes) {
IncreaseCapacity(size_in_bytes);
Free(start, size_in_bytes);
}
// Tries to remove some memory from {this} free lists. We try to remove
// as much memory as possible, i.e., we check the free lists from huge
// to small.
FreeSpace* TryRemoveMemory();
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
@ -2732,12 +2773,6 @@ class CompactionSpace : public PagedSpace {
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable) {}
// Adds external memory starting at {start} of {size_in_bytes} to the space.
void AddExternalMemory(Address start, int size_in_bytes) {
IncreaseCapacity(size_in_bytes);
Free(start, size_in_bytes);
}
virtual bool is_local() { return true; }
protected:

View File

@ -507,8 +507,8 @@ TEST(CompactionSpaceUsingExternalMemory) {
CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), 0);
// Make the rest of memory available for compaction.
compaction_space->AddExternalMemory(HeapObject::cast(chunk)->address(),
static_cast<int>(rest));
compaction_space->AddMemory(HeapObject::cast(chunk)->address(),
static_cast<int>(rest));
CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), rest);
while (num_rest_objects-- > 0) {