Split v8_enable_concurrent_marking into two flags

The new flags are
- v8_enable_atomic_object_field_writes that makes field write operations
  relaxed atomic.
- v8_enable_atomic_marking_state that makes the marking state and the
  write-barrier thread-safe.

The motivation is that we want to disable atomic object fields while
keeping the marking states thread-safe. This allows us to increase
TSAN coverage for background compilation and streaming tasks while
keeping the write-barrier used by the tasks thread-safe.

Bug: v8:10988
Change-Id: I11d66954dda4bf36d24c5e6f14ee5bc7a0f86094
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2448467
Reviewed-by: Georg Neis <neis@chromium.org>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70329}
This commit is contained in:
Ulan Degenbaev 2020-10-05 14:05:16 +02:00 committed by Commit Bot
parent a10ec2be98
commit acf5e1aabb
7 changed files with 46 additions and 12 deletions

View File

@ -131,7 +131,15 @@ declare_args() {
# Sets -dV8_TRACE_FEEDBACK_UPDATES.
v8_enable_trace_feedback_updates = false
# Sets -dV8_CONCURRENT_MARKING
# Sets -dV8_ATOMIC_OBJECT_FIELD_WRITES and turns all field write operations
# into relaxed atomic operations.
v8_enable_atomic_object_field_writes = ""
# Sets -dV8_ATOMIC_MARKING_STATE
v8_enable_atomic_marking_state = ""
# Controls the default values of v8_enable_atomic_object_field_writes and
# v8_enable_concurrent_marking_state. See the default setting code below.
v8_enable_concurrent_marking = true
# Runs mksnapshot with --turbo-profiling. After building in this
@ -318,6 +326,16 @@ if (v8_enable_heap_sandbox == "") {
if (v8_enable_single_generation == "") {
v8_enable_single_generation = v8_disable_write_barriers
}
if (v8_enable_atomic_object_field_writes == "") {
v8_enable_atomic_object_field_writes = v8_enable_concurrent_marking
}
if (v8_enable_atomic_marking_state == "") {
v8_enable_atomic_marking_state = v8_enable_concurrent_marking
}
assert(!v8_enable_concurrent_marking || v8_enable_atomic_object_field_writes,
"Concurrent marking requires atomic object field writes.")
assert(!v8_enable_concurrent_marking || v8_enable_atomic_marking_state,
"Concurrent marking requires atomic marking state.")
# Toggle pointer compression for correctness fuzzing when building the
# clang_x64_pointer_compression toolchain. We'll correctness-compare the
@ -611,8 +629,11 @@ config("features") {
if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
}
if (v8_enable_concurrent_marking) {
defines += [ "V8_CONCURRENT_MARKING" ]
if (v8_enable_atomic_object_field_writes) {
defines += [ "V8_ATOMIC_OBJECT_FIELD_WRITES" ]
}
if (v8_enable_atomic_marking_state) {
defines += [ "V8_ATOMIC_MARKING_STATE" ]
}
if (v8_enable_lazy_source_positions) {
defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ]
@ -1671,6 +1692,9 @@ action("v8_dump_build_config") {
"is_ubsan_vptr=$is_ubsan_vptr",
"target_cpu=\"$target_cpu\"",
"v8_current_cpu=\"$v8_current_cpu\"",
"v8_enable_atomic_marking_state=$v8_enable_atomic_marking_state",
"v8_enable_atomic_object_field_writes=" +
"$v8_enable_atomic_object_field_writes",
"v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_verify_predictable=$v8_enable_verify_predictable",

View File

@ -661,7 +661,13 @@
},
'disable_concurrent_marking': {
'gn_args': 'v8_enable_concurrent_marking=false',
# Disable concurrent marking and atomic object field writes in order to
# increase the TSAN coverage for background tasks. We need to keep the
# atomic marking state enabled because that is needed for the concurrent
# write-barrier used by background compilation.
'gn_args': 'v8_enable_concurrent_marking=false '
'v8_enable_atomic_object_field_writes=false '
'v8_enable_atomic_marking_state=true ',
},
'disable_pgo': {

View File

@ -999,7 +999,7 @@ DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
#ifdef V8_CONCURRENT_MARKING
#if defined(V8_ATOMIC_MARKING_STATE) && defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true
#else
#define V8_CONCURRENT_MARKING_BOOL false

View File

@ -375,10 +375,14 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
: heap_(heap),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
#ifndef V8_ATOMIC_MARKING_STATE
// Concurrent and parallel marking require atomic marking state.
CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
#endif
#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
// Concurrent marking requires atomic object field writes.
CHECK(!FLAG_concurrent_marking);
#endif
}
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {

View File

@ -81,7 +81,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
static constexpr size_t kGlobalActivationThreshold = 0;
#endif
#ifdef V8_CONCURRENT_MARKING
#ifdef V8_ATOMIC_MARKING_STATE
static const AccessMode kAtomicity = AccessMode::ATOMIC;
#else
static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;

View File

@ -434,11 +434,11 @@ class MainMarkingVisitor final
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
#ifdef V8_CONCURRENT_MARKING
#ifdef V8_ATOMIC_MARKING_STATE
using MarkingState = MajorMarkingState;
#else
using MarkingState = MajorNonAtomicMarkingState;
#endif // V8_CONCURRENT_MARKING
#endif // V8_ATOMIC_MARKING_STATE
using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;

View File

@ -70,7 +70,7 @@ T TaggedField<T, kFieldOffset>::load(const Isolate* isolate, HeapObject host,
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
#ifdef V8_CONCURRENT_MARKING
#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, value);
#else
*location(host) = full_to_tagged(value.ptr());
@ -80,7 +80,7 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
#ifdef V8_CONCURRENT_MARKING
#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, offset, value);
#else
*location(host, offset) = full_to_tagged(value.ptr());