[heap] Reschedule concurrent marking tasks.

If the incremental marker discovers more marking work and the concurrent
marking tasks have already exited, then new concurrent marking tasks
are scheduled to help the main thread marker.

BUG=chromium:694255

Change-Id: I559af62790e6034b23a412d3308ba8b6ae3d27c9
Reviewed-on: https://chromium-review.googlesource.com/574170
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46703}
This commit is contained in:
Ulan Degenbaev 2017-07-17 13:00:33 +02:00 committed by Commit Bot
parent 14c5c4fde7
commit ce04f26abb
7 changed files with 114 additions and 49 deletions

View File

@ -255,10 +255,9 @@ class ConcurrentMarkingVisitor final
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
base::Semaphore* on_finish, base::Mutex* lock, int task_id)
base::Mutex* lock, int task_id)
: CancelableTask(isolate),
concurrent_marking_(concurrent_marking),
on_finish_(on_finish),
lock_(lock),
task_id_(task_id) {}
@ -268,11 +267,9 @@ class ConcurrentMarking::Task : public CancelableTask {
// v8::internal::CancelableTask overrides.
void RunInternal() override {
concurrent_marking_->Run(task_id_, lock_);
on_finish_->Signal();
}
ConcurrentMarking* concurrent_marking_;
base::Semaphore* on_finish_;
base::Mutex* lock_;
int task_id_;
DISALLOW_COPY_AND_ASSIGN(Task);
@ -283,18 +280,24 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
: heap_(heap),
shared_(shared),
bailout_(bailout),
pending_task_semaphore_(0),
pending_task_count_(0) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
for (int i = 0; i <= kTasks; i++) {
is_pending_[i] = false;
}
}
void ConcurrentMarking::Run(int task_id, base::Mutex* lock) {
ConcurrentMarkingVisitor visitor(shared_, bailout_, task_id);
double time_ms;
size_t bytes_marked = 0;
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Starting concurrent marking task %d\n", task_id);
}
{
TimedScope scope(&time_ms);
while (true) {
@ -317,48 +320,71 @@ void ConcurrentMarking::Run(int task_id, base::Mutex* lock) {
base::LockGuard<base::Mutex> guard(lock);
bailout_->FlushToGlobal(task_id);
}
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
is_pending_[task_id] = false;
--pending_task_count_;
pending_condition_.NotifyAll();
}
}
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp("concurrently marked %dKB in %.2fms\n",
static_cast<int>(bytes_marked / KB),
time_ms);
heap_->isolate()->PrintWithTimestamp(
"Task %d concurrently marked %dKB in %.2fms\n", task_id,
static_cast<int>(bytes_marked / KB), time_ms);
}
}
void ConcurrentMarking::Start() {
void ConcurrentMarking::ScheduleTasks() {
if (!FLAG_concurrent_marking) return;
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp("Starting concurrent marking\n");
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ < kTasks) {
// Task id 0 is for the main thread.
for (int i = 1; i <= kTasks; i++) {
if (!is_pending_[i]) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i);
}
is_pending_[i] = true;
++pending_task_count_;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new Task(heap_->isolate(), this, &task_lock_[i].lock, i),
v8::Platform::kShortRunningTask);
}
}
}
pending_task_count_ = kTasks;
for (int i = 0; i < kTasks; i++) {
int task_id = i + 1;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new Task(heap_->isolate(), this, &pending_task_semaphore_,
&task_lock_[i].lock, task_id),
v8::Platform::kShortRunningTask);
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
if (!FLAG_concurrent_marking) return;
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
}
if (!shared_->IsGlobalPoolEmpty()) {
ScheduleTasks();
}
}
void ConcurrentMarking::EnsureCompleted() {
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
while (pending_task_count_ > 0) {
pending_task_semaphore_.Wait();
pending_task_count_--;
pending_condition_.Wait(&pending_lock_);
}
}
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking) {
if (!FLAG_concurrent_marking) return;
for (int i = 0; i < kTasks; i++) {
for (int i = 1; i <= kTasks; i++) {
concurrent_marking_->task_lock_[i].lock.Lock();
}
}
ConcurrentMarking::PauseScope::~PauseScope() {
if (!FLAG_concurrent_marking) return;
for (int i = kTasks - 1; i >= 0; i--) {
for (int i = kTasks; i >= 1; i--) {
concurrent_marking_->task_lock_[i].lock.Unlock();
}
}

View File

@ -36,9 +36,9 @@ class ConcurrentMarking {
ConcurrentMarking(Heap* heap, MarkingWorklist* shared_,
MarkingWorklist* bailout_);
void Start();
bool IsRunning() { return pending_task_count_ > 0; }
void ScheduleTasks();
void EnsureCompleted();
void RescheduleTasksIfNeeded();
private:
struct TaskLock {
@ -50,9 +50,11 @@ class ConcurrentMarking {
Heap* heap_;
MarkingWorklist* shared_;
MarkingWorklist* bailout_;
TaskLock task_lock_[kTasks];
base::Semaphore pending_task_semaphore_;
TaskLock task_lock_[kTasks + 1];
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_;
bool is_pending_[kTasks + 1];
};
} // namespace internal

View File

@ -1177,7 +1177,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
DCHECK(array->map() != fixed_cow_array_map());
Object** dst = array->data_start() + dst_index;
Object** src = array->data_start() + src_index;
if (FLAG_concurrent_marking && concurrent_marking()->IsRunning()) {
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst < src) {
for (int i = 0; i < len; i++) {
base::AsAtomicWord::Relaxed_Store(

View File

@ -592,7 +592,7 @@ void IncrementalMarking::StartMarking() {
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
if (FLAG_concurrent_marking) {
heap_->concurrent_marking()->Start();
heap_->concurrent_marking()->ScheduleTasks();
}
// Ready to start incremental marking.
@ -1237,6 +1237,9 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
}
}
}
if (FLAG_concurrent_marking) {
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);

View File

@ -15,20 +15,41 @@
namespace v8 {
namespace internal {
void PublishSegment(ConcurrentMarking::MarkingWorklist* worklist,
HeapObject* object) {
for (int i = 0; i <= ConcurrentMarking::MarkingWorklist::kSegmentCapacity;
i++) {
worklist->Push(0, object);
}
CHECK(worklist->Pop(0, &object));
}
TEST(ConcurrentMarking) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
ConcurrentMarking::MarkingWorklist shared, bailout;
for (int i = 0; i <= ConcurrentMarking::MarkingWorklist::kSegmentCapacity;
i++) {
shared.Push(0, heap->undefined_value());
}
HeapObject* object;
CHECK(shared.Pop(0, &object));
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout);
concurrent_marking->Start();
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->EnsureCompleted();
delete concurrent_marking;
}
TEST(ConcurrentMarkingReschedule) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
ConcurrentMarking::MarkingWorklist shared, bailout;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->EnsureCompleted();
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->EnsureCompleted();
delete concurrent_marking;
}

View File

@ -2199,9 +2199,9 @@ TEST(InstanceOfStubWriteBarrier) {
CHECK(f->IsOptimized());
while (
!ObjectMarking::IsBlack(f->code(), MarkingState::Internal(f->code())) &&
!marking->IsStopped()) {
while (!ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
f->code(), MarkingState::Internal(f->code())) &&
!marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
@ -4958,8 +4958,8 @@ TEST(Regress3631) {
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Handle<JSWeakCollection> weak_map(reinterpret_cast<JSWeakCollection*>(*obj));
HeapObject* weak_map_table = HeapObject::cast(weak_map->table());
while (!ObjectMarking::IsBlack(weak_map_table,
MarkingState::Internal(weak_map_table)) &&
while (!ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
weak_map_table, MarkingState::Internal(weak_map_table)) &&
!marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
@ -5740,7 +5740,8 @@ TEST(Regress598319) {
// progress bar, we would fail here.
for (int i = 0; i < arr.get()->length(); i++) {
HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(ObjectMarking::IsBlack(arr_value, MarkingState::Internal(arr_value)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
arr_value, MarkingState::Internal(arr_value)));
}
}
@ -5887,13 +5888,15 @@ TEST(LeftTrimFixedArrayInBlackArea) {
isolate->factory()->NewFixedArray(4, TENURED);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(50, TENURED);
CHECK(heap->old_space()->Contains(*array));
CHECK(ObjectMarking::IsBlack(*array, MarkingState::Internal(*array)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
*array, MarkingState::Internal(*array)));
// Now left trim the allocated black area. A filler has to be installed
// for the trimmed area and all mark bits of the trimmed area have to be
// cleared.
FixedArrayBase* trimmed = heap->LeftTrimFixedArray(*array, 10);
CHECK(ObjectMarking::IsBlack(trimmed, MarkingState::Internal(trimmed)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
trimmed, MarkingState::Internal(trimmed)));
heap::GcAndSweep(heap, OLD_SPACE);
}
@ -5930,7 +5933,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
Address start_address = array->address();
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
CHECK(ObjectMarking::IsBlack(*array, MarkingState::Internal(*array)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
*array, MarkingState::Internal(*array)));
CHECK(MarkingState::Internal(page).bitmap()->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address),
page->AddressToMarkbitIndex(end_address)));
@ -5944,8 +5948,10 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
trimmed = heap->LeftTrimFixedArray(previous, 1);
HeapObject* filler = HeapObject::FromAddress(previous->address());
CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsBlack(trimmed, MarkingState::Internal(trimmed)));
CHECK(ObjectMarking::IsBlack(previous, MarkingState::Internal(previous)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
trimmed, MarkingState::Internal(trimmed)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
previous, MarkingState::Internal(previous)));
previous = trimmed;
}
@ -5955,8 +5961,10 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
trimmed = heap->LeftTrimFixedArray(previous, i);
HeapObject* filler = HeapObject::FromAddress(previous->address());
CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsBlack(trimmed, MarkingState::Internal(trimmed)));
CHECK(ObjectMarking::IsBlack(previous, MarkingState::Internal(previous)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
trimmed, MarkingState::Internal(trimmed)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
previous, MarkingState::Internal(previous)));
previous = trimmed;
}
}
@ -5996,7 +6004,8 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
Address start_address = array->address();
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
CHECK(ObjectMarking::IsBlack(*array, MarkingState::Internal(*array)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
*array, MarkingState::Internal(*array)));
CHECK(MarkingState::Internal(page).bitmap()->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address),

View File

@ -84,6 +84,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
CHECK(!heap->new_space()->ContainsSlow(to_be_promoted_page->address()));
CHECK(heap->old_space()->ContainsSlow(to_be_promoted_page->address()));
}
isolate->Dispose();
}
UNINITIALIZED_TEST(PagePromotion_NewToNew) {
@ -111,6 +112,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToNew) {
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
}
isolate->Dispose();
}
UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
@ -152,6 +154,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
CHECK(to_be_promoted_page->Contains(buffer->address()));
CHECK(ArrayBufferTracker::IsTracked(*buffer));
}
isolate->Dispose();
}
UNINITIALIZED_HEAP_TEST(Regress658718) {
@ -188,6 +191,7 @@ UNINITIALIZED_HEAP_TEST(Regress658718) {
heap->mark_compact_collector()->sweeper().StartSweeperTasks();
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
isolate->Dispose();
}
} // namespace internal