[heap] TSAN annotation for mark-bits initialization.
This patch also fixes several cctests that require manual GC. BUG=chromium:694255 Change-Id: Ida93ed2498a6c5b0187ee78d2b1da27d2ff1906a Reviewed-on: https://chromium-review.googlesource.com/533233 Commit-Queue: Ulan Degenbaev <ulan@chromium.org> Reviewed-by: Hannes Payer <hpayer@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/master@{#45913}
This commit is contained in:
parent
70c6830795
commit
eb6b3408dc
@ -168,6 +168,13 @@ class ConcurrentMarkingVisitor final
|
||||
}
|
||||
|
||||
void MarkObject(HeapObject* object) {
|
||||
#ifdef THREAD_SANITIZER
|
||||
// Perform a dummy acquire load to tell TSAN that there is no data race
|
||||
// in mark-bit inititialization. See MemoryChunk::Initialize for the
|
||||
// corresponding release store.
|
||||
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
|
||||
CHECK_NE(chunk->synchronized_heap(), nullptr);
|
||||
#endif
|
||||
if (ObjectMarking::WhiteToGrey<MarkBit::AccessMode::ATOMIC>(
|
||||
object, marking_state(object))) {
|
||||
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kShared);
|
||||
|
@ -512,6 +512,11 @@ void Page::InitializeAsAnchor(Space* space) {
|
||||
SetFlag(ANCHOR);
|
||||
}
|
||||
|
||||
Heap* MemoryChunk::synchronized_heap() {
|
||||
return reinterpret_cast<Heap*>(
|
||||
base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
|
||||
}
|
||||
|
||||
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
Address area_start, Address area_end,
|
||||
Executability executable, Space* owner,
|
||||
@ -554,6 +559,14 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
if (reservation != nullptr) {
|
||||
chunk->reservation_.TakeControl(reservation);
|
||||
}
|
||||
|
||||
#ifdef THREAD_SANITIZER
|
||||
// The mark-bit clearing function above emits a memory fence. Since TSAN
|
||||
// does not process memory fences, we use the following annotation to tell
|
||||
// TSAN that there is no data race in mark-bit clearing.
|
||||
base::Release_Store(reinterpret_cast<base::AtomicWord*>(&chunk->heap_),
|
||||
reinterpret_cast<base::AtomicWord>(heap));
|
||||
#endif
|
||||
return chunk;
|
||||
}
|
||||
|
||||
|
@ -440,6 +440,8 @@ class MemoryChunk {
|
||||
|
||||
inline Heap* heap() const { return heap_; }
|
||||
|
||||
Heap* synchronized_heap();
|
||||
|
||||
inline SkipList* skip_list() { return skip_list_; }
|
||||
|
||||
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
|
||||
|
@ -41,6 +41,7 @@ void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
|
||||
} // namespace
|
||||
|
||||
HEAP_TEST(CompactionFullAbortedPage) {
|
||||
if (FLAG_never_compact) return;
|
||||
// Test the scenario where we reach OOM during compaction and the whole page
|
||||
// is aborted.
|
||||
|
||||
@ -85,6 +86,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
|
||||
|
||||
|
||||
HEAP_TEST(CompactionPartiallyAbortedPage) {
|
||||
if (FLAG_never_compact) return;
|
||||
// Test the scenario where we reach OOM during compaction and parts of the
|
||||
// page have already been migrated to a new one.
|
||||
|
||||
@ -159,7 +161,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
|
||||
|
||||
|
||||
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
|
||||
if (!FLAG_never_compact) return;
|
||||
if (FLAG_never_compact) return;
|
||||
// Test the scenario where we reach OOM during compaction and parts of the
|
||||
// page have already been migrated to a new one. Objects on the aborted page
|
||||
// are linked together. This test makes sure that intra-aborted page pointers
|
||||
|
@ -5773,6 +5773,7 @@ HEAP_TEST(Regress589413) {
|
||||
|
||||
TEST(Regress598319) {
|
||||
if (!FLAG_incremental_marking) return;
|
||||
ManualGCScope manual_gc_scope;
|
||||
// This test ensures that no white objects can cross the progress bar of large
|
||||
// objects during incremental marking. It checks this by using Shift() during
|
||||
// incremental marking.
|
||||
|
@ -1127,6 +1127,7 @@ TEST(CodeSerializerLargeCodeObject) {
|
||||
}
|
||||
|
||||
TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
|
||||
if (FLAG_never_compact) return;
|
||||
FLAG_stress_incremental_marking = false;
|
||||
FLAG_serialize_toplevel = true;
|
||||
FLAG_always_opt = false;
|
||||
|
@ -911,6 +911,7 @@ TEST(LayoutDescriptorAppendIfFastOrUseFullAllDoubles) {
|
||||
|
||||
|
||||
TEST(Regress436816) {
|
||||
ManualGCScope manual_gc_scope;
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Factory* factory = isolate->factory();
|
||||
@ -960,6 +961,7 @@ TEST(Regress436816) {
|
||||
|
||||
|
||||
TEST(DescriptorArrayTrimming) {
|
||||
ManualGCScope manual_gc_scope;
|
||||
CcTest::InitializeVM();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
|
Loading…
Reference in New Issue
Block a user