[heap] Add concurrent marking write barrier

A LocalHeap creates and owns an instance of MarkingBarrier. A pointer to
the marking barrier is set to a thread_local variable for a quick access.

WriteBarrier::MarkingSlow fetches the thread_local variable and invokes
the write barrier if it is set. Otherwise, it invokes the main thread
heap()->marking_barrier().

Each marking barrier has its own local marking worklist that is
published during scavenge (for updating pointers) and at finalization
of incremental marking.

Typed-slot recording does not work yet because it is not thread-safe.
It will be fixed in a subsequent CL.

Bug: v8:10315
Change-Id: I221a906436cd91e7405a253ce0eb06cf68046f2c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2354809
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69448}
This commit is contained in:
Ulan Degenbaev 2020-08-18 09:33:41 +02:00 committed by Commit Bot
parent ddc3672d91
commit 1dd7f3a953
12 changed files with 228 additions and 55 deletions

View File

@ -14,26 +14,51 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace {
thread_local MarkingBarrier* current_marking_barrier = nullptr;
} // namespace
void WriteBarrier::SetForThread(MarkingBarrier* marking_barrier) {
DCHECK_NULL(current_marking_barrier);
current_marking_barrier = marking_barrier;
}
void WriteBarrier::ClearForThread(MarkingBarrier* marking_barrier) {
DCHECK_EQ(current_marking_barrier, marking_barrier);
current_marking_barrier = nullptr;
}
void WriteBarrier::MarkingSlow(Heap* heap, HeapObject host, HeapObjectSlot slot, void WriteBarrier::MarkingSlow(Heap* heap, HeapObject host, HeapObjectSlot slot,
HeapObject value) { HeapObject value) {
heap->marking_barrier()->Write(host, slot, value); MarkingBarrier* marking_barrier = current_marking_barrier
? current_marking_barrier
: heap->marking_barrier();
marking_barrier->Write(host, slot, value);
} }
void WriteBarrier::MarkingSlow(Heap* heap, Code host, RelocInfo* reloc_info, void WriteBarrier::MarkingSlow(Heap* heap, Code host, RelocInfo* reloc_info,
HeapObject value) { HeapObject value) {
heap->marking_barrier()->Write(host, reloc_info, value); MarkingBarrier* marking_barrier = current_marking_barrier
? current_marking_barrier
: heap->marking_barrier();
marking_barrier->Write(host, reloc_info, value);
} }
void WriteBarrier::MarkingSlow(Heap* heap, JSArrayBuffer host, void WriteBarrier::MarkingSlow(Heap* heap, JSArrayBuffer host,
ArrayBufferExtension* extension) { ArrayBufferExtension* extension) {
heap->marking_barrier()->Write(host, extension); MarkingBarrier* marking_barrier = current_marking_barrier
? current_marking_barrier
: heap->marking_barrier();
marking_barrier->Write(host, extension);
} }
void WriteBarrier::MarkingSlow(Heap* heap, Map host, void WriteBarrier::MarkingSlow(Heap* heap, Map host,
DescriptorArray descriptor_array, DescriptorArray descriptor_array,
int number_of_own_descriptors) { int number_of_own_descriptors) {
heap->marking_barrier()->Write(host, descriptor_array, MarkingBarrier* marking_barrier = current_marking_barrier
number_of_own_descriptors); ? current_marking_barrier
: heap->marking_barrier();
marking_barrier->Write(host, descriptor_array, number_of_own_descriptors);
} }
int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) { int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {

View File

@ -21,6 +21,7 @@ class Heap;
class JSArrayBuffer; class JSArrayBuffer;
class Map; class Map;
class MarkCompactCollector; class MarkCompactCollector;
class MarkingBarrier;
class RelocInfo; class RelocInfo;
// Note: In general it is preferred to use the macros defined in // Note: In general it is preferred to use the macros defined in
@ -55,6 +56,9 @@ class V8_EXPORT_PRIVATE WriteBarrier {
// It is invoked from generated code and has to take raw addresses. // It is invoked from generated code and has to take raw addresses.
static int MarkingFromCode(Address raw_host, Address raw_slot); static int MarkingFromCode(Address raw_host, Address raw_slot);
static void SetForThread(MarkingBarrier*);
static void ClearForThread(MarkingBarrier*);
private: private:
static void MarkingSlow(Heap* heap, HeapObject host, HeapObjectSlot, static void MarkingSlow(Heap* heap, HeapObject host, HeapObjectSlot,
HeapObject value); HeapObject value);

View File

@ -5251,8 +5251,7 @@ void Heap::SetUp() {
concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr)); concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr));
} }
marking_barrier_.reset(new MarkingBarrier(this, mark_compact_collector(), marking_barrier_.reset(new MarkingBarrier(this));
incremental_marking()));
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
space_[i] = nullptr; space_[i] = nullptr;

View File

@ -237,7 +237,7 @@ void IncrementalMarking::StartMarking() {
SetState(MARKING); SetState(MARKING);
heap_->marking_barrier()->Activate(is_compacting_); MarkingBarrier::ActivateAll(heap(), is_compacting_);
heap_->isolate()->compilation_cache()->MarkCompactPrologue(); heap_->isolate()->compilation_cache()->MarkCompactPrologue();
@ -412,6 +412,8 @@ void IncrementalMarking::FinalizeIncrementally() {
// so we can do it only once at the beginning of the finalization. // so we can do it only once at the beginning of the finalization.
RetainMaps(); RetainMaps();
MarkingBarrier::PublishAll(heap());
finalize_marking_completed_ = true; finalize_marking_completed_ = true;
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
@ -433,6 +435,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
#endif // ENABLE_MINOR_MC #endif // ENABLE_MINOR_MC
collector_->local_marking_worklists()->Publish(); collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
collector_->marking_worklists()->Update( collector_->marking_worklists()->Update(
[ [
#ifdef DEBUG #ifdef DEBUG

View File

@ -251,6 +251,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// increase chances of reusing of map transition tree in future. // increase chances of reusing of map transition tree in future.
void RetainMaps(); void RetainMaps();
void PublishWriteBarrierWorklists();
// Updates scheduled_bytes_to_mark_ to ensure marking progress based on // Updates scheduled_bytes_to_mark_ to ensure marking progress based on
// time. // time.
void ScheduleBytesToMarkBasedOnTime(double time_ms); void ScheduleBytesToMarkBasedOnTime(double time_ms);

View File

@ -10,7 +10,9 @@
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/handles/local-handles.h" #include "src/handles/local-handles.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/local-heap-inl.h" #include "src/heap/local-heap-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/safepoint.h" #include "src/heap/safepoint.h"
namespace v8 { namespace v8 {
@ -32,6 +34,7 @@ LocalHeap::LocalHeap(Heap* heap,
next_(nullptr), next_(nullptr),
handles_(new LocalHandles), handles_(new LocalHandles),
persistent_handles_(std::move(persistent_handles)), persistent_handles_(std::move(persistent_handles)),
marking_barrier_(new MarkingBarrier(this)),
old_space_allocator_(this, heap->old_space()) { old_space_allocator_(this, heap->old_space()) {
heap_->safepoint()->AddLocalHeap(this); heap_->safepoint()->AddLocalHeap(this);
if (persistent_handles_) { if (persistent_handles_) {
@ -39,9 +42,15 @@ LocalHeap::LocalHeap(Heap* heap,
} }
DCHECK_NULL(current_local_heap); DCHECK_NULL(current_local_heap);
current_local_heap = this; current_local_heap = this;
WriteBarrier::SetForThread(marking_barrier_.get());
if (heap_->incremental_marking()->IsMarking()) {
marking_barrier_->Activate(heap_->incremental_marking()->IsCompacting());
}
} }
LocalHeap::~LocalHeap() { LocalHeap::~LocalHeap() {
marking_barrier_->Publish();
WriteBarrier::ClearForThread(marking_barrier_.get());
// Give up LAB before parking thread // Give up LAB before parking thread
old_space_allocator_.FreeLinearAllocationArea(); old_space_allocator_.FreeLinearAllocationArea();

View File

@ -76,6 +76,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
Heap* heap() { return heap_; } Heap* heap() { return heap_; }
MarkingBarrier* marking_barrier() { return marking_barrier_.get(); }
ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; } ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; }
// Mark/Unmark linear allocation areas black. Used for black allocation. // Mark/Unmark linear allocation areas black. Used for black allocation.
@ -155,6 +156,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::unique_ptr<LocalHandles> handles_; std::unique_ptr<LocalHandles> handles_;
std::unique_ptr<PersistentHandles> persistent_handles_; std::unique_ptr<PersistentHandles> persistent_handles_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
ConcurrentAllocator old_space_allocator_; ConcurrentAllocator old_space_allocator_;

View File

@ -456,6 +456,8 @@ void MarkCompactCollector::TearDown() {
AbortWeakObjects(); AbortWeakObjects();
if (heap()->incremental_marking()->IsMarking()) { if (heap()->incremental_marking()->IsMarking()) {
local_marking_worklists()->Publish(); local_marking_worklists()->Publish();
heap()->marking_barrier()->Publish();
// Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear(); marking_worklists()->Clear();
} }
} }
@ -1954,6 +1956,7 @@ void MarkCompactCollector::MarkLiveObjects() {
IncrementalMarking* incremental_marking = heap_->incremental_marking(); IncrementalMarking* incremental_marking = heap_->incremental_marking();
if (was_marked_incrementally_) { if (was_marked_incrementally_) {
incremental_marking->Finalize(); incremental_marking->Finalize();
MarkingBarrier::DeactivateAll(heap());
} else { } else {
CHECK(incremental_marking->IsStopped()); CHECK(incremental_marking->IsStopped());
} }
@ -2058,10 +2061,6 @@ void MarkCompactCollector::MarkLiveObjects() {
} }
} }
if (was_marked_incrementally_) {
heap()->marking_barrier()->Deactivate();
}
epoch_++; epoch_++;
} }

View File

@ -27,7 +27,7 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
// visits the host object. // visits the host object.
return false; return false;
} }
if (WhiteToGreyAndPush(value)) { if (WhiteToGreyAndPush(value) && is_main_thread_barrier_) {
incremental_marking_->RestartIfNotMarking(); incremental_marking_->RestartIfNotMarking();
} }
return true; return true;
@ -35,7 +35,7 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) { bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state_.WhiteToGrey(obj)) { if (marking_state_.WhiteToGrey(obj)) {
collector_->local_marking_worklists()->Push(obj); worklist_.Push(obj);
return true; return true;
} }
return false; return false;

View File

@ -11,16 +11,29 @@
#include "src/heap/mark-compact-inl.h" #include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h" #include "src/heap/mark-compact.h"
#include "src/heap/marking-barrier-inl.h" #include "src/heap/marking-barrier-inl.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/safepoint.h"
#include "src/objects/js-array-buffer.h" #include "src/objects/js-array-buffer.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
MarkingBarrier::MarkingBarrier(Heap* heap, MarkCompactCollector* collector, MarkingBarrier::MarkingBarrier(Heap* heap)
IncrementalMarking* incremental_marking)
: heap_(heap), : heap_(heap),
collector_(collector), collector_(heap_->mark_compact_collector()),
incremental_marking_(incremental_marking) {} incremental_marking_(heap_->incremental_marking()),
worklist_(collector_->marking_worklists()->shared()),
is_main_thread_barrier_(true) {}
MarkingBarrier::MarkingBarrier(LocalHeap* local_heap)
: heap_(local_heap->heap()),
collector_(heap_->mark_compact_collector()),
incremental_marking_(nullptr),
worklist_(collector_->marking_worklists()->shared()),
is_main_thread_barrier_(false) {}
MarkingBarrier::~MarkingBarrier() { DCHECK(worklist_.IsLocalEmpty()); }
void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot, void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
HeapObject value) { HeapObject value) {
@ -32,6 +45,7 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
} }
void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) { void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) {
DCHECK(is_main_thread_barrier_);
if (MarkValue(host, value)) { if (MarkValue(host, value)) {
if (is_compacting_) { if (is_compacting_) {
collector_->RecordRelocSlot(host, reloc_info, value); collector_->RecordRelocSlot(host, reloc_info, value);
@ -41,6 +55,7 @@ void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) {
void MarkingBarrier::Write(JSArrayBuffer host, void MarkingBarrier::Write(JSArrayBuffer host,
ArrayBufferExtension* extension) { ArrayBufferExtension* extension) {
DCHECK(is_main_thread_barrier_);
if (!V8_CONCURRENT_MARKING_BOOL && marking_state_.IsBlack(host)) { if (!V8_CONCURRENT_MARKING_BOOL && marking_state_.IsBlack(host)) {
// The extension will be marked when the marker visits the host object. // The extension will be marked when the marker visits the host object.
return; return;
@ -50,6 +65,7 @@ void MarkingBarrier::Write(JSArrayBuffer host,
void MarkingBarrier::Write(Map host, DescriptorArray descriptor_array, void MarkingBarrier::Write(Map host, DescriptorArray descriptor_array,
int number_of_own_descriptors) { int number_of_own_descriptors) {
DCHECK(is_main_thread_barrier_);
int16_t raw_marked = descriptor_array.raw_number_of_marked_descriptors(); int16_t raw_marked = descriptor_array.raw_number_of_marked_descriptors();
if (NumberOfMarkedDescriptors::decode(collector_->epoch(), raw_marked) < if (NumberOfMarkedDescriptors::decode(collector_->epoch(), raw_marked) <
number_of_own_descriptors) { number_of_own_descriptors) {
@ -58,44 +74,89 @@ void MarkingBarrier::Write(Map host, DescriptorArray descriptor_array,
} }
} }
void MarkingBarrier::Deactivate(PagedSpace* space) { // static
void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) {
heap->marking_barrier()->Activate(is_compacting);
if (FLAG_local_heaps) {
heap->safepoint()->IterateLocalHeaps(
[is_compacting](LocalHeap* local_heap) {
local_heap->marking_barrier()->Activate(is_compacting);
});
}
}
// static
void MarkingBarrier::DeactivateAll(Heap* heap) {
heap->marking_barrier()->Deactivate();
if (FLAG_local_heaps) {
heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->marking_barrier()->Deactivate();
});
}
}
// static
void MarkingBarrier::PublishAll(Heap* heap) {
heap->marking_barrier()->Publish();
if (FLAG_local_heaps) {
heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->marking_barrier()->Publish();
});
}
}
void MarkingBarrier::Publish() {
if (is_activated_) {
worklist_.Publish();
}
}
void MarkingBarrier::DeactivateSpace(PagedSpace* space) {
DCHECK(is_main_thread_barrier_);
for (Page* p : *space) { for (Page* p : *space) {
p->SetOldGenerationPageFlags(false); p->SetOldGenerationPageFlags(false);
} }
} }
void MarkingBarrier::Deactivate(NewSpace* space) { void MarkingBarrier::DeactivateSpace(NewSpace* space) {
DCHECK(is_main_thread_barrier_);
for (Page* p : *space) { for (Page* p : *space) {
p->SetYoungGenerationPageFlags(false); p->SetYoungGenerationPageFlags(false);
} }
} }
void MarkingBarrier::Deactivate() { void MarkingBarrier::Deactivate() {
Deactivate(heap_->old_space()); Publish();
Deactivate(heap_->map_space());
Deactivate(heap_->code_space());
Deactivate(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(false);
DCHECK(p->IsLargePage());
}
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false);
}
for (LargePage* p : *heap_->code_lo_space()) {
p->SetOldGenerationPageFlags(false);
}
is_activated_ = false; is_activated_ = false;
is_compacting_ = false; is_compacting_ = false;
if (is_main_thread_barrier_) {
DeactivateSpace(heap_->old_space());
DeactivateSpace(heap_->map_space());
DeactivateSpace(heap_->code_space());
DeactivateSpace(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(false);
DCHECK(p->IsLargePage());
}
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false);
}
for (LargePage* p : *heap_->code_lo_space()) {
p->SetOldGenerationPageFlags(false);
}
}
DCHECK(worklist_.IsLocalEmpty());
} }
void MarkingBarrier::Activate(PagedSpace* space) { void MarkingBarrier::ActivateSpace(PagedSpace* space) {
DCHECK(is_main_thread_barrier_);
for (Page* p : *space) { for (Page* p : *space) {
p->SetOldGenerationPageFlags(true); p->SetOldGenerationPageFlags(true);
} }
} }
void MarkingBarrier::Activate(NewSpace* space) { void MarkingBarrier::ActivateSpace(NewSpace* space) {
DCHECK(is_main_thread_barrier_);
for (Page* p : *space) { for (Page* p : *space) {
p->SetYoungGenerationPageFlags(true); p->SetYoungGenerationPageFlags(true);
} }
@ -103,24 +164,27 @@ void MarkingBarrier::Activate(NewSpace* space) {
void MarkingBarrier::Activate(bool is_compacting) { void MarkingBarrier::Activate(bool is_compacting) {
DCHECK(!is_activated_); DCHECK(!is_activated_);
DCHECK(worklist_.IsLocalEmpty());
is_compacting_ = is_compacting; is_compacting_ = is_compacting;
is_activated_ = true; is_activated_ = true;
Activate(heap_->old_space()); if (is_main_thread_barrier_) {
Activate(heap_->map_space()); ActivateSpace(heap_->old_space());
Activate(heap_->code_space()); ActivateSpace(heap_->map_space());
Activate(heap_->new_space()); ActivateSpace(heap_->code_space());
ActivateSpace(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) { for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(true); p->SetYoungGenerationPageFlags(true);
DCHECK(p->IsLargePage()); DCHECK(p->IsLargePage());
} }
for (LargePage* p : *heap_->lo_space()) { for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(true); p->SetOldGenerationPageFlags(true);
} }
for (LargePage* p : *heap_->code_lo_space()) { for (LargePage* p : *heap_->code_lo_space()) {
p->SetOldGenerationPageFlags(true); p->SetOldGenerationPageFlags(true);
}
} }
} }

View File

@ -14,14 +14,23 @@ namespace internal {
class Heap; class Heap;
class IncrementalMarking; class IncrementalMarking;
class LocalHeap;
class PagedSpace; class PagedSpace;
class NewSpace; class NewSpace;
class MarkingBarrier { class MarkingBarrier {
public: public:
MarkingBarrier(Heap*, MarkCompactCollector*, IncrementalMarking*); explicit MarkingBarrier(Heap*);
explicit MarkingBarrier(LocalHeap*);
~MarkingBarrier();
void Activate(bool is_compacting); void Activate(bool is_compacting);
void Deactivate(); void Deactivate();
void Publish();
static void ActivateAll(Heap* heap, bool is_compacting);
static void DeactivateAll(Heap* heap);
static void PublishAll(Heap* heap);
void Write(HeapObject host, HeapObjectSlot, HeapObject value); void Write(HeapObject host, HeapObjectSlot, HeapObject value);
void Write(Code host, RelocInfo*, HeapObject value); void Write(Code host, RelocInfo*, HeapObject value);
@ -36,18 +45,20 @@ class MarkingBarrier {
inline bool WhiteToGreyAndPush(HeapObject value); inline bool WhiteToGreyAndPush(HeapObject value);
void Activate(PagedSpace*); void ActivateSpace(PagedSpace*);
void Activate(NewSpace*); void ActivateSpace(NewSpace*);
void Deactivate(PagedSpace*); void DeactivateSpace(PagedSpace*);
void Deactivate(NewSpace*); void DeactivateSpace(NewSpace*);
MarkingState marking_state_;
Heap* heap_; Heap* heap_;
MarkCompactCollector* collector_; MarkCompactCollector* collector_;
IncrementalMarking* incremental_marking_; IncrementalMarking* incremental_marking_;
MarkingWorklist::Local worklist_;
MarkingState marking_state_;
bool is_compacting_ = false; bool is_compacting_ = false;
bool is_activated_ = false; bool is_activated_ = false;
bool is_main_thread_barrier_;
}; };
} // namespace internal } // namespace internal

View File

@ -250,5 +250,60 @@ UNINITIALIZED_TEST(ConcurrentBlackAllocation) {
isolate->Dispose(); isolate->Dispose();
} }
class ConcurrentWriteBarrierThread final : public v8::base::Thread {
public:
explicit ConcurrentWriteBarrierThread(Heap* heap, FixedArray fixed_array,
HeapObject value)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap),
fixed_array_(fixed_array),
value_(value) {}
void Run() override {
LocalHeap local_heap(heap_);
fixed_array_.set(0, value_);
}
Heap* heap_;
FixedArray fixed_array_;
HeapObject value_;
};
UNINITIALIZED_TEST(ConcurrentWriteBarrier) {
ManualGCScope manual_gc_scope;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Heap* heap = i_isolate->heap();
FixedArray fixed_array;
HeapObject value;
{
HandleScope handle_scope(i_isolate);
Handle<FixedArray> fixed_array_handle(
i_isolate->factory()->NewFixedArray(1));
Handle<HeapNumber> value_handle(i_isolate->factory()->NewHeapNumber(1.1));
fixed_array = *fixed_array_handle;
value = *value_handle;
}
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
CHECK(heap->incremental_marking()->marking_state()->IsWhite(value));
auto thread =
std::make_unique<ConcurrentWriteBarrierThread>(heap, fixed_array, value);
CHECK(thread->Start());
thread->Join();
CHECK(heap->incremental_marking()->marking_state()->IsBlackOrGrey(value));
isolate->Dispose();
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8