[heap] Add support for multiple concurrent marking tasks.

BUG=chromium:694255

Change-Id: Ib0403a2d406428d2cd7896521abb6e95c3841c1c
Reviewed-on: https://chromium-review.googlesource.com/563364
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46505}
This commit is contained in:
Ulan Degenbaev 2017-07-10 11:49:45 +02:00 committed by Commit Bot
parent b2133cd615
commit 5359d8680a
6 changed files with 65 additions and 34 deletions

View File

@ -255,10 +255,11 @@ class ConcurrentMarkingVisitor final
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
base::Semaphore* on_finish, int task_id)
base::Semaphore* on_finish, base::Mutex* lock, int task_id)
: CancelableTask(isolate),
concurrent_marking_(concurrent_marking),
on_finish_(on_finish),
lock_(lock),
task_id_(task_id) {}
virtual ~Task() {}
@ -266,12 +267,13 @@ class ConcurrentMarking::Task : public CancelableTask {
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
concurrent_marking_->Run(task_id_);
concurrent_marking_->Run(task_id_, lock_);
on_finish_->Signal();
}
ConcurrentMarking* concurrent_marking_;
base::Semaphore* on_finish_;
base::Mutex* lock_;
int task_id_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
@ -279,25 +281,24 @@ class ConcurrentMarking::Task : public CancelableTask {
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout)
: heap_(heap),
pending_task_semaphore_(0),
shared_(shared),
bailout_(bailout),
is_task_pending_(false) {
pending_task_semaphore_(0),
pending_task_count_(0) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
}
void ConcurrentMarking::Run(int task_id) {
void ConcurrentMarking::Run(int task_id, base::Mutex* lock) {
ConcurrentMarkingVisitor visitor(shared_, bailout_, task_id);
double time_ms = heap_->MonotonicallyIncreasingTimeInMs();
size_t bytes_marked = 0;
base::Mutex* relocation_mutex = heap_->relocation_mutex();
{
TimedScope scope(&time_ms);
while (true) {
base::LockGuard<base::Mutex> guard(relocation_mutex);
base::LockGuard<base::Mutex> guard(lock);
HeapObject* object;
if (!shared_->Pop(task_id, &object)) break;
Address new_space_top = heap_->new_space()->original_top();
@ -313,7 +314,7 @@ void ConcurrentMarking::Run(int task_id) {
{
// Take the lock to synchronize with worklist update after
// young generation GC.
base::LockGuard<base::Mutex> guard(relocation_mutex);
base::LockGuard<base::Mutex> guard(lock);
bailout_->FlushToGlobal(task_id);
}
}
@ -324,25 +325,38 @@ void ConcurrentMarking::Run(int task_id) {
}
}
void ConcurrentMarking::StartTask() {
const int kConcurrentMarkingTaskId = 1;
void ConcurrentMarking::Start() {
if (!FLAG_concurrent_marking) return;
is_task_pending_ = true;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new Task(heap_->isolate(), this, &pending_task_semaphore_,
kConcurrentMarkingTaskId),
v8::Platform::kShortRunningTask);
pending_task_count_ = kTasks;
for (int i = 0; i < kTasks; i++) {
int task_id = i + 1;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new Task(heap_->isolate(), this, &pending_task_semaphore_,
&task_lock_[i].lock, task_id),
v8::Platform::kShortRunningTask);
}
}
void ConcurrentMarking::WaitForTaskToComplete() {
void ConcurrentMarking::EnsureCompleted() {
if (!FLAG_concurrent_marking) return;
pending_task_semaphore_.Wait();
is_task_pending_ = false;
while (pending_task_count_ > 0) {
pending_task_semaphore_.Wait();
pending_task_count_--;
}
}
void ConcurrentMarking::EnsureTaskCompleted() {
if (IsTaskPending()) {
WaitForTaskToComplete();
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking) {
if (!FLAG_concurrent_marking) return;
for (int i = 0; i < kTasks; i++) {
concurrent_marking_->task_lock_[i].lock.Lock();
}
}
ConcurrentMarking::PauseScope::~PauseScope() {
if (!FLAG_concurrent_marking) return;
for (int i = kTasks - 1; i >= 0; i--) {
concurrent_marking_->task_lock_[i].lock.Unlock();
}
}

View File

@ -20,24 +20,40 @@ class Worklist;
class ConcurrentMarking {
public:
// When the scope is entered, the concurrent marking tasks
// are paused and are not looking at the heap objects.
class PauseScope {
public:
explicit PauseScope(ConcurrentMarking* concurrent_marking);
~PauseScope();
private:
ConcurrentMarking* concurrent_marking_;
};
static const int kTasks = 4;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared_,
MarkingWorklist* bailout_);
void StartTask();
void WaitForTaskToComplete();
bool IsTaskPending() { return is_task_pending_; }
void EnsureTaskCompleted();
void Start();
bool IsRunning() { return pending_task_count_ > 0; }
void EnsureCompleted();
private:
struct TaskLock {
base::Mutex lock;
char cache_line_padding[64];
};
class Task;
void Run(int task_id);
void Run(int task_id, base::Mutex* lock);
Heap* heap_;
base::Semaphore pending_task_semaphore_;
MarkingWorklist* shared_;
MarkingWorklist* bailout_;
bool is_task_pending_;
TaskLock task_lock_[kTasks];
base::Semaphore pending_task_semaphore_;
int pending_task_count_;
};
} // namespace internal

View File

@ -1176,7 +1176,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
DCHECK(array->map() != fixed_cow_array_map());
Object** dst = array->data_start() + dst_index;
Object** src = array->data_start() + src_index;
if (FLAG_concurrent_marking && concurrent_marking()->IsTaskPending()) {
if (FLAG_concurrent_marking && concurrent_marking()->IsRunning()) {
if (dst < src) {
for (int i = 0; i < len; i++) {
base::AsAtomicWord::Relaxed_Store(
@ -1647,6 +1647,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanExpandOldGeneration(new_space()->Size()));
@ -1696,6 +1697,7 @@ static bool IsLogging(Isolate* isolate) {
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.

View File

@ -592,8 +592,7 @@ void IncrementalMarking::StartMarking() {
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
if (FLAG_concurrent_marking) {
ConcurrentMarking* concurrent_marking = heap_->concurrent_marking();
concurrent_marking->StartTask();
heap_->concurrent_marking()->Start();
}
// Ready to start incremental marking.

View File

@ -931,7 +931,7 @@ void MarkCompactCollector::Prepare() {
// them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
heap()->concurrent_marking()->EnsureTaskCompleted();
heap()->concurrent_marking()->EnsureCompleted();
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {

View File

@ -28,8 +28,8 @@ TEST(ConcurrentMarking) {
CHECK(shared.Pop(0, &object));
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout);
concurrent_marking->StartTask();
concurrent_marking->WaitForTaskToComplete();
concurrent_marking->Start();
concurrent_marking->EnsureCompleted();
delete concurrent_marking;
}