v8/test/cctest/heap/test-concurrent-marking.cc
Ulan Degenbaev 6b5bc5e931 [heap] Refactor marking worklists
This unifies marking worklists handling by the main thread marker and
by the concurrent markers. A new class called MarkingWorklistsHolder
owns all marking worklists: the default worklist, the on-hold worklist,
and the embedder worklist. Each thread creates a local view of the
marking worklists by creating an instance of MarkingWorklists.

Additionally, marking visitors now work on MarkingWorklists instead of
accessing each worklist individually.

Besides cleaning the code up, this CL provides a bottleneck for
implementing per-context worklists.

Bug: chromium:973627
Change-Id: I52ad65c94bc0695287ba7bf4d8a814a9035e2888
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1941947
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65421}
2019-12-11 16:46:40 +00:00

152 lines
5.1 KiB
C++

// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdlib.h>
#include "src/init/v8.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/worklist.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
namespace heap {
void PublishSegment(MarkingWorklist* worklist, HeapObject object) {
for (size_t i = 0; i <= MarkingWorklist::kSegmentCapacity; i++) {
worklist->Push(0, object);
}
CHECK(worklist->Pop(0, &object));
}
TEST(ConcurrentMarking) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return;
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking;
}
TEST(ConcurrentMarkingReschedule) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return;
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking;
}
TEST(ConcurrentMarkingPreemptAndReschedule) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return;
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking;
}
TEST(ConcurrentMarkingMarkedBytes) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = CcTest::heap();
HandleScope sc(isolate);
Handle<FixedArray> root = isolate->factory()->NewFixedArray(1000000);
CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return;
heap::SimulateIncrementalMarking(heap, false);
heap->concurrent_marking()->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size());
}
UNINITIALIZED_TEST(ConcurrentMarkingStoppedOnTeardown) {
if (!i::FLAG_concurrent_marking) return;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Factory* factory = i_isolate->factory();
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
for (int i = 0; i < 10000; i++) {
factory->NewJSWeakMap();
}
Heap* heap = i_isolate->heap();
heap::SimulateIncrementalMarking(heap, false);
}
isolate->Dispose();
}
} // namespace heap
} // namespace internal
} // namespace v8