[Heap]: Marking use Jobs.
StopRequest is removed in favor of: COMPLETE_TASKS_FOR_TESTING -> JoinForTesting() PREEMPT_TASKS -> Pause() COMPLETE_ONGOING_TASKS now has the same behavior as PREEMPT_TASKS - we should avoid waiting on the main thread as much as possible. Change-Id: Icceeb4f0c0fda2ed234b2f26fe308b11410fcfb7 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2376166 Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#70037}
This commit is contained in:
parent
c1e1a6a4aa
commit
4a2b2b2e56
@ -347,27 +347,30 @@ FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
|
||||
return FixedArray::unchecked_cast(object);
|
||||
}
|
||||
|
||||
class ConcurrentMarking::Task : public CancelableTask {
|
||||
class ConcurrentMarking::JobTask : public v8::JobTask {
|
||||
public:
|
||||
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
|
||||
TaskState* task_state, int task_id)
|
||||
: CancelableTask(isolate),
|
||||
concurrent_marking_(concurrent_marking),
|
||||
task_state_(task_state),
|
||||
task_id_(task_id) {}
|
||||
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
|
||||
bool is_forced_gc)
|
||||
: concurrent_marking_(concurrent_marking),
|
||||
mark_compact_epoch_(mark_compact_epoch),
|
||||
is_forced_gc_(is_forced_gc) {}
|
||||
|
||||
~Task() override = default;
|
||||
~JobTask() override = default;
|
||||
|
||||
private:
|
||||
// v8::internal::CancelableTask overrides.
|
||||
void RunInternal() override {
|
||||
concurrent_marking_->Run(task_id_, task_state_);
|
||||
// v8::JobTask overrides.
|
||||
void Run(JobDelegate* delegate) override {
|
||||
concurrent_marking_->Run(delegate, mark_compact_epoch_, is_forced_gc_);
|
||||
}
|
||||
|
||||
size_t GetMaxConcurrency(size_t worker_count) const override {
|
||||
return concurrent_marking_->GetMaxConcurrency(worker_count);
|
||||
}
|
||||
|
||||
private:
|
||||
ConcurrentMarking* concurrent_marking_;
|
||||
TaskState* task_state_;
|
||||
int task_id_;
|
||||
DISALLOW_COPY_AND_ASSIGN(Task);
|
||||
const unsigned mark_compact_epoch_;
|
||||
const bool is_forced_gc_;
|
||||
DISALLOW_COPY_AND_ASSIGN(JobTask);
|
||||
};
|
||||
|
||||
ConcurrentMarking::ConcurrentMarking(Heap* heap,
|
||||
@ -382,16 +385,19 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
|
||||
#endif
|
||||
}
|
||||
|
||||
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
|
||||
void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
|
||||
bool is_forced_gc) {
|
||||
TRACE_BACKGROUND_GC(heap_->tracer(),
|
||||
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
|
||||
size_t kBytesUntilInterruptCheck = 64 * KB;
|
||||
int kObjectsUntilInterrupCheck = 1000;
|
||||
uint8_t task_id = delegate->GetTaskId() + 1;
|
||||
TaskState* task_state = &task_state_[task_id];
|
||||
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
|
||||
ConcurrentMarkingVisitor visitor(
|
||||
task_id, &local_marking_worklists, weak_objects_, heap_,
|
||||
task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
|
||||
heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
|
||||
mark_compact_epoch, Heap::GetBytecodeFlushMode(),
|
||||
heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
|
||||
&task_state->memory_chunk_data);
|
||||
NativeContextInferrer& native_context_inferrer =
|
||||
task_state->native_context_inferrer;
|
||||
@ -457,7 +463,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
|
||||
marked_bytes += current_marked_bytes;
|
||||
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
|
||||
marked_bytes);
|
||||
if (task_state->preemption_request) {
|
||||
if (delegate->ShouldYield()) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"ConcurrentMarking::Run Preempted");
|
||||
break;
|
||||
@ -492,13 +498,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
|
||||
if (ephemeron_marked) {
|
||||
set_ephemeron_marked(true);
|
||||
}
|
||||
|
||||
{
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
is_pending_[task_id] = false;
|
||||
--pending_task_count_;
|
||||
pending_condition_.NotifyAll();
|
||||
}
|
||||
}
|
||||
if (FLAG_trace_concurrent_marking) {
|
||||
heap_->isolate()->PrintWithTimestamp(
|
||||
@ -507,109 +506,62 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentMarking::ScheduleTasks() {
|
||||
size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
|
||||
// TODO(ulan): Iterate context_worklists() if other worklists are empty.
|
||||
return std::min<size_t>(
|
||||
kMaxTasks, worker_count + marking_worklists_->shared()->Size() +
|
||||
weak_objects_->current_ephemerons.GlobalPoolSize() +
|
||||
weak_objects_->discovered_ephemerons.GlobalPoolSize());
|
||||
}
|
||||
|
||||
void ConcurrentMarking::ScheduleJob() {
|
||||
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
|
||||
DCHECK(!heap_->IsTearingDown());
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
if (total_task_count_ == 0) {
|
||||
static const int num_cores =
|
||||
V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
|
||||
#if defined(V8_OS_MACOSX)
|
||||
// Mac OSX 10.11 and prior seems to have trouble when doing concurrent
|
||||
// marking on competing hyper-threads (regresses Octane/Splay). As such,
|
||||
// only use num_cores/2, leaving one of those for the main thread.
|
||||
// TODO(ulan): Use all cores on Mac 10.12+.
|
||||
total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
|
||||
#else // defined(V8_OS_MACOSX)
|
||||
// On other platforms use all logical cores, leaving one for the main
|
||||
// thread.
|
||||
total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 2));
|
||||
#endif // defined(V8_OS_MACOSX)
|
||||
if (FLAG_gc_experiment_reduce_concurrent_marking_tasks) {
|
||||
// Use at most half of the cores in the experiment.
|
||||
total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
|
||||
}
|
||||
DCHECK_LE(total_task_count_, kMaxTasks);
|
||||
}
|
||||
// Task id 0 is for the main thread.
|
||||
for (int i = 1; i <= total_task_count_; i++) {
|
||||
if (!is_pending_[i]) {
|
||||
if (FLAG_trace_concurrent_marking) {
|
||||
heap_->isolate()->PrintWithTimestamp(
|
||||
"Scheduling concurrent marking task %d\n", i);
|
||||
}
|
||||
task_state_[i].preemption_request = false;
|
||||
task_state_[i].mark_compact_epoch =
|
||||
heap_->mark_compact_collector()->epoch();
|
||||
task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
|
||||
is_pending_[i] = true;
|
||||
++pending_task_count_;
|
||||
auto task =
|
||||
std::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
|
||||
cancelable_id_[i] = task->id();
|
||||
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
|
||||
}
|
||||
}
|
||||
DCHECK_EQ(total_task_count_, pending_task_count_);
|
||||
DCHECK(!job_handle_ || !job_handle_->IsRunning());
|
||||
|
||||
job_handle_ = V8::GetCurrentPlatform()->PostJob(
|
||||
TaskPriority::kUserVisible,
|
||||
std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(),
|
||||
heap_->is_current_gc_forced()));
|
||||
DCHECK(job_handle_->IsRunning());
|
||||
}
|
||||
|
||||
void ConcurrentMarking::RescheduleTasksIfNeeded() {
|
||||
void ConcurrentMarking::RescheduleJobIfNeeded() {
|
||||
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
|
||||
if (heap_->IsTearingDown()) return;
|
||||
{
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
// The total task count is initialized in ScheduleTasks from
|
||||
// NumberOfWorkerThreads of the platform.
|
||||
if (total_task_count_ > 0 && pending_task_count_ == total_task_count_) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!marking_worklists_->shared()->IsEmpty() ||
|
||||
!weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
|
||||
!weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
|
||||
ScheduleTasks();
|
||||
|
||||
if (marking_worklists_->shared()->IsEmpty() &&
|
||||
weak_objects_->current_ephemerons.IsGlobalPoolEmpty() &&
|
||||
weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
|
||||
return;
|
||||
}
|
||||
if (!job_handle_ || !job_handle_->IsRunning())
|
||||
ScheduleJob();
|
||||
else
|
||||
job_handle_->NotifyConcurrencyIncrease();
|
||||
}
|
||||
|
||||
bool ConcurrentMarking::Stop(StopRequest stop_request) {
|
||||
bool ConcurrentMarking::Pause() {
|
||||
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
|
||||
if (pending_task_count_ == 0) return false;
|
||||
|
||||
if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
|
||||
CancelableTaskManager* task_manager =
|
||||
heap_->isolate()->cancelable_task_manager();
|
||||
for (int i = 1; i <= total_task_count_; i++) {
|
||||
if (is_pending_[i]) {
|
||||
if (task_manager->TryAbort(cancelable_id_[i]) ==
|
||||
TryAbortResult::kTaskAborted) {
|
||||
is_pending_[i] = false;
|
||||
--pending_task_count_;
|
||||
} else if (stop_request == StopRequest::PREEMPT_TASKS) {
|
||||
task_state_[i].preemption_request = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
while (pending_task_count_ > 0) {
|
||||
pending_condition_.Wait(&pending_lock_);
|
||||
}
|
||||
for (int i = 1; i <= total_task_count_; i++) {
|
||||
DCHECK(!is_pending_[i]);
|
||||
}
|
||||
if (!job_handle_ || !job_handle_->IsRunning()) return false;
|
||||
job_handle_->Cancel();
|
||||
return true;
|
||||
}
|
||||
|
||||
void ConcurrentMarking::JoinForTesting() {
|
||||
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
|
||||
DCHECK(job_handle_ && job_handle_->IsRunning());
|
||||
job_handle_->Join();
|
||||
}
|
||||
|
||||
bool ConcurrentMarking::IsStopped() {
|
||||
if (!FLAG_concurrent_marking) return true;
|
||||
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
return pending_task_count_ == 0;
|
||||
return !job_handle_ || !job_handle_->IsRunning();
|
||||
}
|
||||
|
||||
void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
|
||||
for (int i = 1; i <= total_task_count_; i++) {
|
||||
for (int i = 1; i <= kMaxTasks; i++) {
|
||||
main_stats->Merge(task_state_[i].native_context_stats);
|
||||
task_state_[i].native_context_stats.Clear();
|
||||
}
|
||||
@ -617,8 +569,8 @@ void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
|
||||
|
||||
void ConcurrentMarking::FlushMemoryChunkData(
|
||||
MajorNonAtomicMarkingState* marking_state) {
|
||||
DCHECK_EQ(pending_task_count_, 0);
|
||||
for (int i = 1; i <= total_task_count_; i++) {
|
||||
DCHECK(!job_handle_ || !job_handle_->IsRunning());
|
||||
for (int i = 1; i <= kMaxTasks; i++) {
|
||||
MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
|
||||
for (auto& pair : memory_chunk_data) {
|
||||
// ClearLiveness sets the live bytes to zero.
|
||||
@ -640,7 +592,7 @@ void ConcurrentMarking::FlushMemoryChunkData(
|
||||
}
|
||||
|
||||
void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
|
||||
for (int i = 1; i <= total_task_count_; i++) {
|
||||
for (int i = 1; i <= kMaxTasks; i++) {
|
||||
auto it = task_state_[i].memory_chunk_data.find(chunk);
|
||||
if (it != task_state_[i].memory_chunk_data.end()) {
|
||||
it->second.live_bytes = 0;
|
||||
@ -651,7 +603,7 @@ void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
|
||||
|
||||
size_t ConcurrentMarking::TotalMarkedBytes() {
|
||||
size_t result = 0;
|
||||
for (int i = 1; i <= total_task_count_; i++) {
|
||||
for (int i = 1; i <= kMaxTasks; i++) {
|
||||
result +=
|
||||
base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
|
||||
}
|
||||
@ -661,14 +613,12 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
|
||||
|
||||
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
|
||||
: concurrent_marking_(concurrent_marking),
|
||||
resume_on_exit_(FLAG_concurrent_marking &&
|
||||
concurrent_marking_->Stop(
|
||||
ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
|
||||
resume_on_exit_(FLAG_concurrent_marking && concurrent_marking_->Pause()) {
|
||||
DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
|
||||
}
|
||||
|
||||
ConcurrentMarking::PauseScope::~PauseScope() {
|
||||
if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
|
||||
if (resume_on_exit_) concurrent_marking_->ScheduleJob();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -54,17 +54,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
|
||||
const bool resume_on_exit_;
|
||||
};
|
||||
|
||||
enum class StopRequest {
|
||||
// Preempt ongoing tasks ASAP (and cancel unstarted tasks).
|
||||
PREEMPT_TASKS,
|
||||
// Wait for ongoing tasks to complete (and cancels unstarted tasks).
|
||||
COMPLETE_ONGOING_TASKS,
|
||||
// Wait for all scheduled tasks to complete (only use this in tests that
|
||||
// control the full stack -- otherwise tasks cancelled by the platform can
|
||||
// make this call hang).
|
||||
COMPLETE_TASKS_FOR_TESTING,
|
||||
};
|
||||
|
||||
// TODO(gab): The only thing that prevents this being above 7 is
|
||||
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
|
||||
// task 0, reserved for the main thread).
|
||||
@ -76,13 +65,14 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
|
||||
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
|
||||
// heap should not be moved while these are active (can be stopped safely via
|
||||
// Stop() or PauseScope).
|
||||
void ScheduleTasks();
|
||||
void ScheduleJob();
|
||||
void RescheduleJobIfNeeded();
|
||||
|
||||
// Stops concurrent marking per |stop_request|'s semantics. Returns true
|
||||
// if concurrent marking was in progress, false otherwise.
|
||||
bool Stop(StopRequest stop_request);
|
||||
bool Pause();
|
||||
void JoinForTesting();
|
||||
|
||||
void RescheduleTasksIfNeeded();
|
||||
// Flushes native context sizes to the given table of the main thread.
|
||||
void FlushNativeContexts(NativeContextStats* main_stats);
|
||||
// Flushes memory chunk data using the given marking state.
|
||||
@ -103,31 +93,24 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
|
||||
|
||||
private:
|
||||
struct TaskState {
|
||||
// The main thread sets this flag to true when it wants the concurrent
|
||||
// marker to give up the worker thread.
|
||||
std::atomic<bool> preemption_request;
|
||||
size_t marked_bytes = 0;
|
||||
unsigned mark_compact_epoch;
|
||||
bool is_forced_gc;
|
||||
MemoryChunkDataMap memory_chunk_data;
|
||||
NativeContextInferrer native_context_inferrer;
|
||||
NativeContextStats native_context_stats;
|
||||
char cache_line_padding[64];
|
||||
};
|
||||
class Task;
|
||||
void Run(int task_id, TaskState* task_state);
|
||||
class JobTask;
|
||||
void Run(JobDelegate* delegate, unsigned mark_compact_epoch,
|
||||
bool is_forced_gc);
|
||||
size_t GetMaxConcurrency(size_t worker_count);
|
||||
|
||||
std::unique_ptr<JobHandle> job_handle_;
|
||||
Heap* const heap_;
|
||||
MarkingWorklists* const marking_worklists_;
|
||||
WeakObjects* const weak_objects_;
|
||||
TaskState task_state_[kMaxTasks + 1];
|
||||
std::atomic<size_t> total_marked_bytes_{0};
|
||||
std::atomic<bool> ephemeron_marked_{false};
|
||||
base::Mutex pending_lock_;
|
||||
base::ConditionVariable pending_condition_;
|
||||
int pending_task_count_ = 0;
|
||||
bool is_pending_[kMaxTasks + 1] = {};
|
||||
CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
|
||||
int total_task_count_ = 0;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -5564,6 +5564,10 @@ void Heap::TearDown() {
|
||||
stress_scavenge_observer_ = nullptr;
|
||||
}
|
||||
|
||||
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
|
||||
concurrent_marking_->Pause();
|
||||
}
|
||||
|
||||
if (mark_compact_collector_) {
|
||||
mark_compact_collector_->TearDown();
|
||||
mark_compact_collector_.reset();
|
||||
@ -5580,6 +5584,7 @@ void Heap::TearDown() {
|
||||
scavenger_collector_.reset();
|
||||
array_buffer_sweeper_.reset();
|
||||
incremental_marking_.reset();
|
||||
|
||||
concurrent_marking_.reset();
|
||||
|
||||
gc_idle_time_handler_.reset();
|
||||
|
@ -246,7 +246,7 @@ void IncrementalMarking::StartMarking() {
|
||||
MarkRoots();
|
||||
|
||||
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
|
||||
heap_->concurrent_marking()->ScheduleTasks();
|
||||
heap_->concurrent_marking()->ScheduleJob();
|
||||
}
|
||||
|
||||
// Ready to start incremental marking.
|
||||
@ -1104,7 +1104,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
|
||||
}
|
||||
if (FLAG_concurrent_marking) {
|
||||
local_marking_worklists()->ShareWork();
|
||||
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
|
||||
heap_->concurrent_marking()->RescheduleJobIfNeeded();
|
||||
}
|
||||
}
|
||||
if (state_ == MARKING) {
|
||||
|
@ -904,12 +904,11 @@ void MarkCompactCollector::Prepare() {
|
||||
}
|
||||
}
|
||||
|
||||
void MarkCompactCollector::FinishConcurrentMarking(
|
||||
ConcurrentMarking::StopRequest stop_request) {
|
||||
void MarkCompactCollector::FinishConcurrentMarking() {
|
||||
// FinishConcurrentMarking is called for both, concurrent and parallel,
|
||||
// marking. It is safe to call this function when tasks are already finished.
|
||||
if (FLAG_parallel_marking || FLAG_concurrent_marking) {
|
||||
heap()->concurrent_marking()->Stop(stop_request);
|
||||
heap()->concurrent_marking()->Pause();
|
||||
heap()->concurrent_marking()->FlushMemoryChunkData(
|
||||
non_atomic_marking_state());
|
||||
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
|
||||
@ -1664,12 +1663,11 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
|
||||
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
|
||||
|
||||
if (FLAG_parallel_marking) {
|
||||
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
|
||||
heap_->concurrent_marking()->RescheduleJobIfNeeded();
|
||||
}
|
||||
|
||||
work_to_do = ProcessEphemerons();
|
||||
FinishConcurrentMarking(
|
||||
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
|
||||
FinishConcurrentMarking();
|
||||
}
|
||||
|
||||
CHECK(weak_objects_.current_ephemerons.IsEmpty());
|
||||
@ -1984,12 +1982,11 @@ void MarkCompactCollector::MarkLiveObjects() {
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
|
||||
if (FLAG_parallel_marking) {
|
||||
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
|
||||
heap_->concurrent_marking()->RescheduleJobIfNeeded();
|
||||
}
|
||||
DrainMarkingWorklist();
|
||||
|
||||
FinishConcurrentMarking(
|
||||
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
|
||||
FinishConcurrentMarking();
|
||||
DrainMarkingWorklist();
|
||||
}
|
||||
|
||||
|
@ -479,7 +479,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
|
||||
// Stop concurrent marking (either by preempting it right away or waiting for
|
||||
// it to complete as requested by |stop_request|).
|
||||
void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
|
||||
void FinishConcurrentMarking();
|
||||
|
||||
bool StartCompaction();
|
||||
|
||||
|
@ -44,9 +44,8 @@ TEST(ConcurrentMarking) {
|
||||
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
|
||||
PublishSegment(marking_worklists.shared(),
|
||||
ReadOnlyRoots(heap).undefined_value());
|
||||
concurrent_marking->ScheduleTasks();
|
||||
concurrent_marking->Stop(
|
||||
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
|
||||
concurrent_marking->ScheduleJob();
|
||||
concurrent_marking->JoinForTesting();
|
||||
delete concurrent_marking;
|
||||
}
|
||||
|
||||
@ -67,14 +66,12 @@ TEST(ConcurrentMarkingReschedule) {
|
||||
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
|
||||
PublishSegment(marking_worklists.shared(),
|
||||
ReadOnlyRoots(heap).undefined_value());
|
||||
concurrent_marking->ScheduleTasks();
|
||||
concurrent_marking->Stop(
|
||||
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
|
||||
concurrent_marking->ScheduleJob();
|
||||
concurrent_marking->Pause();
|
||||
PublishSegment(marking_worklists.shared(),
|
||||
ReadOnlyRoots(heap).undefined_value());
|
||||
concurrent_marking->RescheduleTasksIfNeeded();
|
||||
concurrent_marking->Stop(
|
||||
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
|
||||
concurrent_marking->RescheduleJobIfNeeded();
|
||||
concurrent_marking->JoinForTesting();
|
||||
delete concurrent_marking;
|
||||
}
|
||||
|
||||
@ -96,14 +93,13 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
|
||||
for (int i = 0; i < 5000; i++)
|
||||
PublishSegment(marking_worklists.shared(),
|
||||
ReadOnlyRoots(heap).undefined_value());
|
||||
concurrent_marking->ScheduleTasks();
|
||||
concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
|
||||
concurrent_marking->ScheduleJob();
|
||||
concurrent_marking->Pause();
|
||||
for (int i = 0; i < 5000; i++)
|
||||
PublishSegment(marking_worklists.shared(),
|
||||
ReadOnlyRoots(heap).undefined_value());
|
||||
concurrent_marking->RescheduleTasksIfNeeded();
|
||||
concurrent_marking->Stop(
|
||||
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
|
||||
concurrent_marking->RescheduleJobIfNeeded();
|
||||
concurrent_marking->JoinForTesting();
|
||||
delete concurrent_marking;
|
||||
}
|
||||
|
||||
@ -117,8 +113,7 @@ TEST(ConcurrentMarkingMarkedBytes) {
|
||||
CcTest::CollectAllGarbage();
|
||||
if (!heap->incremental_marking()->IsStopped()) return;
|
||||
heap::SimulateIncrementalMarking(heap, false);
|
||||
heap->concurrent_marking()->Stop(
|
||||
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
|
||||
heap->concurrent_marking()->JoinForTesting();
|
||||
CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size());
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user