From acf5e1aabb3e8491dd0f3a8287e5dda44a486e2d Mon Sep 17 00:00:00 2001 From: Ulan Degenbaev Date: Mon, 5 Oct 2020 14:05:16 +0200 Subject: [PATCH] Split v8_enable_concurrent_marking into two flags The new flags are - v8_enable_atomic_object_field_writes that makes field write operations relaxed atomic. - v8_enable_atomic_marking_state that makes the marking state and the write-barrier thread-safe. The motivation is that we want to disable atomic object fields while keeping the marking states thread-safe. This allows us to increase TSAN coverage for background compilation and streaming tasks while keeping the write-barrier used by the tasks thread-safe. Bug: v8:10988 Change-Id: I11d66954dda4bf36d24c5e6f14ee5bc7a0f86094 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2448467 Reviewed-by: Georg Neis Reviewed-by: Michael Achenbach Commit-Queue: Ulan Degenbaev Cr-Commit-Position: refs/heads/master@{#70329} --- BUILD.gn | 30 +++++++++++++++++++++++++++--- infra/mb/mb_config.pyl | 8 +++++++- src/flags/flag-definitions.h | 2 +- src/heap/concurrent-marking.cc | 8 ++++++-- src/heap/incremental-marking.h | 2 +- src/heap/mark-compact.h | 4 ++-- src/objects/tagged-field-inl.h | 4 ++-- 7 files changed, 46 insertions(+), 12 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 1942c7896a..cda68bf8c5 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -131,7 +131,15 @@ declare_args() { # Sets -dV8_TRACE_FEEDBACK_UPDATES. v8_enable_trace_feedback_updates = false - # Sets -dV8_CONCURRENT_MARKING + # Sets -dV8_ATOMIC_OBJECT_FIELD_WRITES and turns all field write operations + # into relaxed atomic operations. + v8_enable_atomic_object_field_writes = "" + + # Sets -dV8_ATOMIC_MARKING_STATE + v8_enable_atomic_marking_state = "" + + # Controls the default values of v8_enable_atomic_object_field_writes and + # v8_enable_concurrent_marking_state. See the default setting code below. v8_enable_concurrent_marking = true # Runs mksnapshot with --turbo-profiling. After building in this @@ -318,6 +326,16 @@ if (v8_enable_heap_sandbox == "") { if (v8_enable_single_generation == "") { v8_enable_single_generation = v8_disable_write_barriers } +if (v8_enable_atomic_object_field_writes == "") { + v8_enable_atomic_object_field_writes = v8_enable_concurrent_marking +} +if (v8_enable_atomic_marking_state == "") { + v8_enable_atomic_marking_state = v8_enable_concurrent_marking +} +assert(!v8_enable_concurrent_marking || v8_enable_atomic_object_field_writes, + "Concurrent marking requires atomic object field writes.") +assert(!v8_enable_concurrent_marking || v8_enable_atomic_marking_state, + "Concurrent marking requires atomic marking state.") # Toggle pointer compression for correctness fuzzing when building the # clang_x64_pointer_compression toolchain. We'll correctness-compare the @@ -611,8 +629,11 @@ config("features") { if (v8_use_external_startup_data) { defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] } - if (v8_enable_concurrent_marking) { - defines += [ "V8_CONCURRENT_MARKING" ] + if (v8_enable_atomic_object_field_writes) { + defines += [ "V8_ATOMIC_OBJECT_FIELD_WRITES" ] + } + if (v8_enable_atomic_marking_state) { + defines += [ "V8_ATOMIC_MARKING_STATE" ] } if (v8_enable_lazy_source_positions) { defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ] @@ -1671,6 +1692,9 @@ action("v8_dump_build_config") { "is_ubsan_vptr=$is_ubsan_vptr", "target_cpu=\"$target_cpu\"", "v8_current_cpu=\"$v8_current_cpu\"", + "v8_enable_atomic_marking_state=$v8_enable_atomic_marking_state", + "v8_enable_atomic_object_field_writes=" + + "$v8_enable_atomic_object_field_writes", "v8_enable_concurrent_marking=$v8_enable_concurrent_marking", "v8_enable_i18n_support=$v8_enable_i18n_support", "v8_enable_verify_predictable=$v8_enable_verify_predictable", diff --git a/infra/mb/mb_config.pyl b/infra/mb/mb_config.pyl index 7d5cc73026..ed78a28ecf 100644 --- a/infra/mb/mb_config.pyl +++ b/infra/mb/mb_config.pyl @@ -661,7 +661,13 @@ }, 'disable_concurrent_marking': { - 'gn_args': 'v8_enable_concurrent_marking=false', + # Disable concurrent marking and atomic object field writes in order to + # increase the TSAN coverage for background tasks. We need to keep the + # atomic marking state enabled because that is needed for the concurrent + # write-barrier used by background compilation. + 'gn_args': 'v8_enable_concurrent_marking=false ' + 'v8_enable_atomic_object_field_writes=false ' + 'v8_enable_atomic_marking_state=true ', }, 'disable_pgo': { diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index a1f00c201c..704d6c08a4 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -999,7 +999,7 @@ DEFINE_BOOL(scavenge_separate_stack_scanning, false, "use a separate phase for stack scanning in scavenge") DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge") DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory") -#ifdef V8_CONCURRENT_MARKING +#if defined(V8_ATOMIC_MARKING_STATE) && defined(V8_ATOMIC_OBJECT_FIELD_WRITES) #define V8_CONCURRENT_MARKING_BOOL true #else #define V8_CONCURRENT_MARKING_BOOL false diff --git a/src/heap/concurrent-marking.cc b/src/heap/concurrent-marking.cc index a5c3c10cbb..632e30eab9 100644 --- a/src/heap/concurrent-marking.cc +++ b/src/heap/concurrent-marking.cc @@ -375,10 +375,14 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, : heap_(heap), marking_worklists_(marking_worklists), weak_objects_(weak_objects) { -// The runtime flag should be set only if the compile time flag was set. -#ifndef V8_CONCURRENT_MARKING +#ifndef V8_ATOMIC_MARKING_STATE + // Concurrent and parallel marking require atomic marking state. CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking); #endif +#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES + // Concurrent marking requires atomic object field writes. + CHECK(!FLAG_concurrent_marking); +#endif } void ConcurrentMarking::Run(int task_id, TaskState* task_state) { diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h index b36e335ff5..5c54593d1a 100644 --- a/src/heap/incremental-marking.h +++ b/src/heap/incremental-marking.h @@ -81,7 +81,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { static constexpr size_t kGlobalActivationThreshold = 0; #endif -#ifdef V8_CONCURRENT_MARKING +#ifdef V8_ATOMIC_MARKING_STATE static const AccessMode kAtomicity = AccessMode::ATOMIC; #else static const AccessMode kAtomicity = AccessMode::NON_ATOMIC; diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index 7c125cf5dc..6763e79f13 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -434,11 +434,11 @@ class MainMarkingVisitor final // Collector for young and old generation. class MarkCompactCollector final : public MarkCompactCollectorBase { public: -#ifdef V8_CONCURRENT_MARKING +#ifdef V8_ATOMIC_MARKING_STATE using MarkingState = MajorMarkingState; #else using MarkingState = MajorNonAtomicMarkingState; -#endif // V8_CONCURRENT_MARKING +#endif // V8_ATOMIC_MARKING_STATE using AtomicMarkingState = MajorAtomicMarkingState; using NonAtomicMarkingState = MajorNonAtomicMarkingState; diff --git a/src/objects/tagged-field-inl.h b/src/objects/tagged-field-inl.h index fed3192dd9..76c80192a5 100644 --- a/src/objects/tagged-field-inl.h +++ b/src/objects/tagged-field-inl.h @@ -70,7 +70,7 @@ T TaggedField::load(const Isolate* isolate, HeapObject host, // static template void TaggedField::store(HeapObject host, T value) { -#ifdef V8_CONCURRENT_MARKING +#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES Relaxed_Store(host, value); #else *location(host) = full_to_tagged(value.ptr()); @@ -80,7 +80,7 @@ void TaggedField::store(HeapObject host, T value) { // static template void TaggedField::store(HeapObject host, int offset, T value) { -#ifdef V8_CONCURRENT_MARKING +#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES Relaxed_Store(host, offset, value); #else *location(host, offset) = full_to_tagged(value.ptr());