diff --git a/src/heap/concurrent-marking.cc b/src/heap/concurrent-marking.cc index 9c0e305186..4beade2b86 100644 --- a/src/heap/concurrent-marking.cc +++ b/src/heap/concurrent-marking.cc @@ -168,6 +168,13 @@ class ConcurrentMarkingVisitor final } void MarkObject(HeapObject* object) { +#ifdef THREAD_SANITIZER + // Perform a dummy acquire load to tell TSAN that there is no data race + // in mark-bit inititialization. See MemoryChunk::Initialize for the + // corresponding release store. + MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); + CHECK_NE(chunk->synchronized_heap(), nullptr); +#endif if (ObjectMarking::WhiteToGrey( object, marking_state(object))) { deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kShared); diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 24b78c728e..7cc0f0db04 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -512,6 +512,11 @@ void Page::InitializeAsAnchor(Space* space) { SetFlag(ANCHOR); } +Heap* MemoryChunk::synchronized_heap() { + return reinterpret_cast( + base::Acquire_Load(reinterpret_cast(&heap_))); +} + MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space* owner, @@ -554,6 +559,14 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, if (reservation != nullptr) { chunk->reservation_.TakeControl(reservation); } + +#ifdef THREAD_SANITIZER + // The mark-bit clearing function above emits a memory fence. Since TSAN + // does not process memory fences, we use the following annotation to tell + // TSAN that there is no data race in mark-bit clearing. + base::Release_Store(reinterpret_cast(&chunk->heap_), + reinterpret_cast(heap)); +#endif return chunk; } diff --git a/src/heap/spaces.h b/src/heap/spaces.h index c29c6a11c0..64300e4166 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -440,6 +440,8 @@ class MemoryChunk { inline Heap* heap() const { return heap_; } + Heap* synchronized_heap(); + inline SkipList* skip_list() { return skip_list_; } inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } diff --git a/test/cctest/heap/test-compaction.cc b/test/cctest/heap/test-compaction.cc index 9b62fa2067..f34ac54375 100644 --- a/test/cctest/heap/test-compaction.cc +++ b/test/cctest/heap/test-compaction.cc @@ -41,6 +41,7 @@ void CheckAllObjectsOnPage(std::vector>& handles, } // namespace HEAP_TEST(CompactionFullAbortedPage) { + if (FLAG_never_compact) return; // Test the scenario where we reach OOM during compaction and the whole page // is aborted. @@ -85,6 +86,7 @@ HEAP_TEST(CompactionFullAbortedPage) { HEAP_TEST(CompactionPartiallyAbortedPage) { + if (FLAG_never_compact) return; // Test the scenario where we reach OOM during compaction and parts of the // page have already been migrated to a new one. @@ -159,7 +161,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) { HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) { - if (!FLAG_never_compact) return; + if (FLAG_never_compact) return; // Test the scenario where we reach OOM during compaction and parts of the // page have already been migrated to a new one. Objects on the aborted page // are linked together. This test makes sure that intra-aborted page pointers diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc index 5c7706caad..932323d513 100644 --- a/test/cctest/heap/test-heap.cc +++ b/test/cctest/heap/test-heap.cc @@ -5773,6 +5773,7 @@ HEAP_TEST(Regress589413) { TEST(Regress598319) { if (!FLAG_incremental_marking) return; + ManualGCScope manual_gc_scope; // This test ensures that no white objects can cross the progress bar of large // objects during incremental marking. It checks this by using Shift() during // incremental marking. diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc index 1de3a71527..6dd01ec9f3 100644 --- a/test/cctest/test-serialize.cc +++ b/test/cctest/test-serialize.cc @@ -1127,6 +1127,7 @@ TEST(CodeSerializerLargeCodeObject) { } TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) { + if (FLAG_never_compact) return; FLAG_stress_incremental_marking = false; FLAG_serialize_toplevel = true; FLAG_always_opt = false; diff --git a/test/cctest/test-unboxed-doubles.cc b/test/cctest/test-unboxed-doubles.cc index 6c0c44cdb5..80339b0fd6 100644 --- a/test/cctest/test-unboxed-doubles.cc +++ b/test/cctest/test-unboxed-doubles.cc @@ -911,6 +911,7 @@ TEST(LayoutDescriptorAppendIfFastOrUseFullAllDoubles) { TEST(Regress436816) { + ManualGCScope manual_gc_scope; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); @@ -960,6 +961,7 @@ TEST(Regress436816) { TEST(DescriptorArrayTrimming) { + ManualGCScope manual_gc_scope; CcTest::InitializeVM(); v8::HandleScope scope(CcTest::isolate()); Isolate* isolate = CcTest::i_isolate();