[heap] Move marking states to heap.
Detach the marking state from the collectors and move them to heap. Also update users to access via heap and reduce dependencies on the collectors. This is a prerequisite for moving sweeper to the heap, which is needed for concurrent sweeping in MinorMC. Bug: v8:12612 Change-Id: Ia0bb2b7566b24eeb0d75c911edbfd626f07dad0f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3925548 Commit-Queue: Omer Katz <omerkatz@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/main@{#83542}
This commit is contained in:
parent
82708bbd0f
commit
8efa1719f0
@ -1499,6 +1499,8 @@ filegroup(
|
||||
"src/heap/marking-barrier.cc",
|
||||
"src/heap/marking-barrier.h",
|
||||
"src/heap/marking-barrier-inl.h",
|
||||
"src/heap/marking-state.h",
|
||||
"src/heap/marking-state-inl.h",
|
||||
"src/heap/marking-visitor-inl.h",
|
||||
"src/heap/marking-visitor.h",
|
||||
"src/heap/marking-worklist-inl.h",
|
||||
|
2
BUILD.gn
2
BUILD.gn
@ -3079,6 +3079,8 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/heap/mark-compact.h",
|
||||
"src/heap/marking-barrier-inl.h",
|
||||
"src/heap/marking-barrier.h",
|
||||
"src/heap/marking-state-inl.h",
|
||||
"src/heap/marking-state.h",
|
||||
"src/heap/marking-visitor-inl.h",
|
||||
"src/heap/marking-visitor.h",
|
||||
"src/heap/marking-worklist-inl.h",
|
||||
|
@ -730,14 +730,13 @@ void GlobalHandles::TracedNode::Verify(const Address* const* slot) {
|
||||
const TracedNode* node = FromLocation(*slot);
|
||||
auto* global_handles = GlobalHandles::From(node);
|
||||
DCHECK(node->IsInUse());
|
||||
auto* incremental_marking =
|
||||
global_handles->isolate()->heap()->incremental_marking();
|
||||
Heap* heap = global_handles->isolate()->heap();
|
||||
auto* incremental_marking = heap->incremental_marking();
|
||||
if (incremental_marking && incremental_marking->IsMarking()) {
|
||||
Object object = node->object();
|
||||
if (object.IsHeapObject()) {
|
||||
DCHECK_IMPLIES(node->markbit<AccessMode::ATOMIC>(),
|
||||
!incremental_marking->marking_state()->IsWhite(
|
||||
HeapObject::cast(object)));
|
||||
!heap->marking_state()->IsWhite(HeapObject::cast(object)));
|
||||
}
|
||||
}
|
||||
DCHECK_IMPLIES(ObjectInYoungGeneration(node->object()),
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/mark-compact-inl.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/marking-visitor-inl.h"
|
||||
#include "src/heap/marking-visitor.h"
|
||||
#include "src/heap/marking.h"
|
||||
|
@ -696,8 +696,7 @@ void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
|
||||
auto& heap = *isolate()->heap();
|
||||
marker.conservative_visitor().SetGlobalHandlesMarkingVisitor(
|
||||
std::make_unique<GlobalHandleMarkingVisitor>(
|
||||
heap, *heap.mark_compact_collector()->marking_state(),
|
||||
*heap.mark_compact_collector()->local_marking_worklists()));
|
||||
heap, *heap.mark_compact_collector()->local_marking_worklists()));
|
||||
}
|
||||
marker.EnterAtomicPause(stack_state);
|
||||
if (isolate_ && *collection_type_ == CollectionType::kMinor) {
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/marking-worklist-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
|
@ -13,8 +13,7 @@ namespace internal {
|
||||
UnifiedHeapMarkingState::UnifiedHeapMarkingState(
|
||||
Heap* heap, MarkingWorklists::Local* local_marking_worklist)
|
||||
: heap_(heap),
|
||||
marking_state_(heap_ ? heap_->mark_compact_collector()->marking_state()
|
||||
: nullptr),
|
||||
marking_state_(heap_ ? heap_->marking_state() : nullptr),
|
||||
local_marking_worklist_(local_marking_worklist),
|
||||
track_retaining_path_(v8_flags.track_retaining_path) {
|
||||
DCHECK_IMPLIES(v8_flags.track_retaining_path,
|
||||
|
@ -4,16 +4,16 @@
|
||||
|
||||
#include "src/heap/global-handle-marking-visitor.h"
|
||||
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/marking-worklist-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
GlobalHandleMarkingVisitor::GlobalHandleMarkingVisitor(
|
||||
Heap& heap, MarkingState& marking_state,
|
||||
MarkingWorklists::Local& local_marking_worklist)
|
||||
Heap& heap, MarkingWorklists::Local& local_marking_worklist)
|
||||
: heap_(heap),
|
||||
marking_state_(marking_state),
|
||||
marking_state_(*heap_.marking_state()),
|
||||
local_marking_worklist_(local_marking_worklist),
|
||||
traced_node_bounds_(
|
||||
heap.isolate()->global_handles()->GetTracedNodeBounds()) {}
|
||||
|
@ -18,7 +18,7 @@ namespace internal {
|
||||
// which requires them to be kept alive.
|
||||
class GlobalHandleMarkingVisitor final : public ::heap::base::StackVisitor {
|
||||
public:
|
||||
GlobalHandleMarkingVisitor(Heap&, MarkingState&, MarkingWorklists::Local&);
|
||||
GlobalHandleMarkingVisitor(Heap&, MarkingWorklists::Local&);
|
||||
~GlobalHandleMarkingVisitor() override = default;
|
||||
|
||||
void VisitPointer(const void*) override;
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "src/heap/heap-write-barrier.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/large-spaces.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk-layout.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
|
@ -63,6 +63,8 @@
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-barrier-inl.h"
|
||||
#include "src/heap/marking-barrier.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/marking-state.h"
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
#include "src/heap/memory-chunk-layout.h"
|
||||
#include "src/heap/memory-measurement.h"
|
||||
@ -224,7 +226,10 @@ Heap::Heap()
|
||||
allocation_type_for_in_place_internalizable_strings_(
|
||||
isolate()->OwnsStringTables() ? AllocationType::kOld
|
||||
: AllocationType::kSharedOld),
|
||||
collection_barrier_(new CollectionBarrier(this)) {
|
||||
collection_barrier_(new CollectionBarrier(this)),
|
||||
marking_state_(isolate_),
|
||||
non_atomic_marking_state_(isolate_),
|
||||
atomic_marking_state_(isolate_) {
|
||||
// Ensure old_generation_size_ is a multiple of kPageSize.
|
||||
DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
|
||||
|
||||
@ -3680,9 +3685,9 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
|
||||
// Clear the mark bits of the black area that belongs now to the filler.
|
||||
// This is an optimization. The sweeper will release black fillers anyway.
|
||||
if (incremental_marking()->black_allocation() &&
|
||||
incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
|
||||
marking_state()->IsBlackOrGrey(filler)) {
|
||||
Page* page = Page::FromAddress(new_end);
|
||||
incremental_marking()->marking_state()->bitmap(page)->ClearRange(
|
||||
marking_state()->bitmap(page)->ClearRange(
|
||||
page->AddressToMarkbitIndex(new_end),
|
||||
page->AddressToMarkbitIndex(new_end + bytes_to_trim));
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "src/heap/base/stack.h"
|
||||
#include "src/heap/gc-callbacks.h"
|
||||
#include "src/heap/heap-allocator.h"
|
||||
#include "src/heap/marking-state.h"
|
||||
#include "src/init/heap-symbols.h"
|
||||
#include "src/objects/allocation-site.h"
|
||||
#include "src/objects/fixed-array.h"
|
||||
@ -1699,6 +1700,14 @@ class Heap {
|
||||
return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
|
||||
}
|
||||
|
||||
MarkingState* marking_state() { return &marking_state_; }
|
||||
|
||||
NonAtomicMarkingState* non_atomic_marking_state() {
|
||||
return &non_atomic_marking_state_;
|
||||
}
|
||||
|
||||
AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
|
||||
|
||||
private:
|
||||
class AllocationTrackerForDebugging;
|
||||
|
||||
@ -2454,6 +2463,10 @@ class Heap {
|
||||
|
||||
std::unique_ptr<third_party_heap::Heap> tp_heap_;
|
||||
|
||||
MarkingState marking_state_;
|
||||
NonAtomicMarkingState non_atomic_marking_state_;
|
||||
AtomicMarkingState atomic_marking_state_;
|
||||
|
||||
// Classes in "heap" can be friends.
|
||||
friend class AlwaysAllocateScope;
|
||||
friend class ArrayBufferCollector;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -58,9 +58,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
|
||||
incremental_marking_job_(heap),
|
||||
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
|
||||
old_generation_observer_(this, kOldGenerationAllocatedThreshold),
|
||||
marking_state_(heap->isolate()),
|
||||
atomic_marking_state_(heap->isolate()),
|
||||
non_atomic_marking_state_(heap->isolate()) {}
|
||||
marking_state_(heap->marking_state()),
|
||||
atomic_marking_state_(heap->atomic_marking_state()) {}
|
||||
|
||||
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
|
||||
HeapObject obj) {
|
||||
@ -284,9 +283,8 @@ void IncrementalMarking::MarkRoots() {
|
||||
->CreateJob(
|
||||
v8::TaskPriority::kUserBlocking,
|
||||
std::make_unique<YoungGenerationMarkingJob>(
|
||||
heap_->isolate(), minor_collector_,
|
||||
minor_collector_->marking_worklists(), std::move(marking_items),
|
||||
YoungMarkingJobType::kIncremental))
|
||||
heap_->isolate(), heap_, minor_collector_->marking_worklists(),
|
||||
std::move(marking_items), YoungMarkingJobType::kIncremental))
|
||||
->Join();
|
||||
}
|
||||
}
|
||||
@ -454,14 +452,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
|
||||
|
||||
Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
|
||||
|
||||
MarkingState* minor_marking_state =
|
||||
heap()->minor_mark_compact_collector()->marking_state();
|
||||
MarkingState* marking_state = heap()->marking_state();
|
||||
|
||||
major_collector_->local_marking_worklists()->Publish();
|
||||
MarkingBarrier::PublishAll(heap());
|
||||
PtrComprCageBase cage_base(heap_->isolate());
|
||||
major_collector_->marking_worklists()->Update([this, minor_marking_state,
|
||||
cage_base, filler_map](
|
||||
major_collector_->marking_worklists()->Update([this, marking_state, cage_base,
|
||||
filler_map](
|
||||
HeapObject obj,
|
||||
HeapObject* out) -> bool {
|
||||
DCHECK(obj.IsHeapObject());
|
||||
@ -479,7 +476,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
|
||||
}
|
||||
HeapObject dest = map_word.ToForwardingAddress();
|
||||
USE(this);
|
||||
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
|
||||
DCHECK_IMPLIES(marking_state->IsWhite(obj), obj.IsFreeSpaceOrFiller());
|
||||
if (dest.InSharedHeap()) {
|
||||
// Object got promoted into the shared heap. Drop it from the client
|
||||
// heap marking worklist.
|
||||
@ -497,7 +494,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
|
||||
DCHECK_IMPLIES(
|
||||
v8_flags.minor_mc,
|
||||
!obj.map_word(cage_base, kRelaxedLoad).IsForwardingAddress());
|
||||
if (minor_marking_state->IsWhite(obj)) {
|
||||
if (marking_state->IsWhite(obj)) {
|
||||
return false;
|
||||
}
|
||||
// Either a large object or an object marked by the minor
|
||||
@ -509,13 +506,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
|
||||
// Only applicable during minor MC garbage collections.
|
||||
if (!Heap::IsLargeObject(obj) &&
|
||||
Page::FromHeapObject(obj)->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
|
||||
if (minor_marking_state->IsWhite(obj)) {
|
||||
if (marking_state->IsWhite(obj)) {
|
||||
return false;
|
||||
}
|
||||
*out = obj;
|
||||
return true;
|
||||
}
|
||||
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
|
||||
DCHECK_IMPLIES(marking_state->IsWhite(obj),
|
||||
obj.IsFreeSpaceOrFiller(cage_base));
|
||||
// Skip one word filler objects that appear on the
|
||||
// stack when we perform in place array shift.
|
||||
|
@ -86,12 +86,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
|
||||
|
||||
IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
|
||||
|
||||
MarkingState* marking_state() { return &marking_state_; }
|
||||
AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
|
||||
NonAtomicMarkingState* non_atomic_marking_state() {
|
||||
return &non_atomic_marking_state_;
|
||||
}
|
||||
|
||||
void NotifyLeftTrimming(HeapObject from, HeapObject to);
|
||||
|
||||
bool IsStopped() const { return !IsMarking(); }
|
||||
@ -169,6 +163,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
|
||||
}
|
||||
|
||||
private:
|
||||
MarkingState* marking_state() { return marking_state_; }
|
||||
AtomicMarkingState* atomic_marking_state() { return atomic_marking_state_; }
|
||||
|
||||
class IncrementalMarkingRootMarkingVisitor;
|
||||
|
||||
class Observer : public AllocationObserver {
|
||||
@ -270,9 +267,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
|
||||
Observer new_generation_observer_;
|
||||
Observer old_generation_observer_;
|
||||
|
||||
MarkingState marking_state_;
|
||||
AtomicMarkingState atomic_marking_state_;
|
||||
NonAtomicMarkingState non_atomic_marking_state_;
|
||||
MarkingState* const marking_state_;
|
||||
AtomicMarkingState* const atomic_marking_state_;
|
||||
|
||||
base::Mutex background_live_bytes_mutex_;
|
||||
std::unordered_map<MemoryChunk*, intptr_t> background_live_bytes_;
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/heap/invalidated-slots.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
|
@ -34,7 +34,10 @@ InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToShared(
|
||||
|
||||
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
|
||||
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
|
||||
RememberedSetType remembered_set_type, LivenessCheck liveness_check) {
|
||||
RememberedSetType remembered_set_type, LivenessCheck liveness_check)
|
||||
: marking_state_(liveness_check == LivenessCheck::kYes
|
||||
? chunk->heap()->non_atomic_marking_state()
|
||||
: nullptr) {
|
||||
USE(remembered_set_type);
|
||||
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
|
||||
|
||||
@ -42,14 +45,6 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(
|
||||
iterator_end_ = invalidated_slots->end();
|
||||
sentinel_ = chunk->area_end();
|
||||
|
||||
if (liveness_check == LivenessCheck::kYes) {
|
||||
marking_state_ =
|
||||
chunk->heap()->mark_compact_collector()->non_atomic_marking_state();
|
||||
} else {
|
||||
DCHECK_EQ(LivenessCheck::kNo, liveness_check);
|
||||
marking_state_ = nullptr;
|
||||
}
|
||||
|
||||
// Invoke NextInvalidatedObject twice, to initialize
|
||||
// invalidated_start_ to the first invalidated object and
|
||||
// next_invalidated_object_ to the second one.
|
||||
|
@ -64,7 +64,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
|
||||
Address sentinel_;
|
||||
InvalidatedObjectInfo current_{kNullAddress, 0, false};
|
||||
InvalidatedObjectInfo next_{kNullAddress, 0, false};
|
||||
NonAtomicMarkingState* marking_state_;
|
||||
NonAtomicMarkingState* const marking_state_;
|
||||
InvalidatedSlots empty_;
|
||||
#ifdef DEBUG
|
||||
Address last_slot_;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "src/heap/combined-heap.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/list.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/marking.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
@ -151,11 +152,10 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
|
||||
heap()->GCFlagsForIncrementalMarking(),
|
||||
kGCCallbackScheduleIdleGarbageCollection);
|
||||
if (heap()->incremental_marking()->black_allocation()) {
|
||||
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
|
||||
heap()->marking_state()->WhiteToBlack(object);
|
||||
}
|
||||
DCHECK_IMPLIES(
|
||||
heap()->incremental_marking()->black_allocation(),
|
||||
heap()->incremental_marking()->marking_state()->IsBlack(object));
|
||||
DCHECK_IMPLIES(heap()->incremental_marking()->black_allocation(),
|
||||
heap()->marking_state()->IsBlack(object));
|
||||
page->InitializationMemoryFence();
|
||||
heap()->NotifyOldGenerationExpansion(identity(), page);
|
||||
AdvanceAndInvokeAllocationObservers(object.address(),
|
||||
@ -185,11 +185,10 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
|
||||
HeapObject object = page->GetObject();
|
||||
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
|
||||
if (heap()->incremental_marking()->black_allocation()) {
|
||||
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
|
||||
heap()->marking_state()->WhiteToBlack(object);
|
||||
}
|
||||
DCHECK_IMPLIES(
|
||||
heap()->incremental_marking()->black_allocation(),
|
||||
heap()->incremental_marking()->marking_state()->IsBlack(object));
|
||||
DCHECK_IMPLIES(heap()->incremental_marking()->black_allocation(),
|
||||
heap()->marking_state()->IsBlack(object));
|
||||
page->InitializationMemoryFence();
|
||||
if (identity() == CODE_LO_SPACE) {
|
||||
heap()->isolate()->AddCodeMemoryChunk(page);
|
||||
@ -504,10 +503,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
|
||||
page->SetFlag(MemoryChunk::TO_PAGE);
|
||||
UpdatePendingObject(result);
|
||||
if (v8_flags.minor_mc) {
|
||||
heap()
|
||||
->minor_mark_compact_collector()
|
||||
->non_atomic_marking_state()
|
||||
->ClearLiveness(page);
|
||||
heap()->non_atomic_marking_state()->ClearLiveness(page);
|
||||
}
|
||||
page->InitializationMemoryFence();
|
||||
DCHECK(page->IsLargePage());
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/index-generator.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/marking-worklist-inl.h"
|
||||
#include "src/heap/marking-worklist.h"
|
||||
#include "src/heap/objects-visiting-inl.h"
|
||||
@ -43,7 +44,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
|
||||
|
||||
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
|
||||
if (Heap::InYoungGeneration(obj) &&
|
||||
non_atomic_marking_state_.WhiteToGrey(obj)) {
|
||||
non_atomic_marking_state()->WhiteToGrey(obj)) {
|
||||
local_marking_worklists_->Push(obj);
|
||||
}
|
||||
}
|
||||
@ -321,13 +322,12 @@ enum class YoungMarkingJobType { kAtomic, kIncremental };
|
||||
|
||||
class YoungGenerationMarkingJob : public v8::JobTask {
|
||||
public:
|
||||
YoungGenerationMarkingJob(Isolate* isolate,
|
||||
MinorMarkCompactCollector* collector,
|
||||
YoungGenerationMarkingJob(Isolate* isolate, Heap* heap,
|
||||
MarkingWorklists* global_worklists,
|
||||
std::vector<PageMarkingItem> marking_items,
|
||||
YoungMarkingJobType young_marking_job_type)
|
||||
: isolate_(isolate),
|
||||
collector_(collector),
|
||||
heap_(heap),
|
||||
global_worklists_(global_worklists),
|
||||
marking_items_(std::move(marking_items)),
|
||||
remaining_marking_items_(marking_items_.size()),
|
||||
@ -345,7 +345,7 @@ class YoungGenerationMarkingJob : public v8::JobTask {
|
||||
void ProcessMarkingItems(YoungGenerationMarkingTask* task);
|
||||
|
||||
Isolate* isolate_;
|
||||
MinorMarkCompactCollector* collector_;
|
||||
Heap* heap_;
|
||||
MarkingWorklists* global_worklists_;
|
||||
std::vector<PageMarkingItem> marking_items_;
|
||||
std::atomic_size_t remaining_marking_items_{0};
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "src/heap/large-spaces.h"
|
||||
#include "src/heap/mark-compact-inl.h"
|
||||
#include "src/heap/marking-barrier.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/marking-visitor-inl.h"
|
||||
#include "src/heap/marking-visitor.h"
|
||||
#include "src/heap/memory-chunk-layout.h"
|
||||
@ -218,8 +219,7 @@ class FullMarkingVerifier : public MarkingVerifier {
|
||||
public:
|
||||
explicit FullMarkingVerifier(Heap* heap)
|
||||
: MarkingVerifier(heap),
|
||||
marking_state_(
|
||||
heap->mark_compact_collector()->non_atomic_marking_state()) {}
|
||||
marking_state_(heap->non_atomic_marking_state()) {}
|
||||
|
||||
void Run() override {
|
||||
VerifyRoots();
|
||||
@ -309,7 +309,7 @@ class FullMarkingVerifier : public MarkingVerifier {
|
||||
}
|
||||
}
|
||||
|
||||
NonAtomicMarkingState* marking_state_;
|
||||
NonAtomicMarkingState* const marking_state_;
|
||||
};
|
||||
|
||||
class EvacuationVerifier : public ObjectVisitorWithCageBases,
|
||||
@ -474,9 +474,9 @@ class FullEvacuationVerifier : public EvacuationVerifier {
|
||||
} // namespace
|
||||
#endif // VERIFY_HEAP
|
||||
|
||||
// =============================================================================
|
||||
// MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
|
||||
// =============================================================================
|
||||
// ==================================================================
|
||||
// CollectorBase, MinorMarkCompactCollector, MarkCompactCollector
|
||||
// ==================================================================
|
||||
|
||||
namespace {
|
||||
|
||||
@ -502,8 +502,8 @@ int NumberOfParallelCompactionTasks(Heap* heap) {
|
||||
CollectorBase::CollectorBase(Heap* heap, GarbageCollector collector)
|
||||
: heap_(heap),
|
||||
garbage_collector_(collector),
|
||||
marking_state_(heap->isolate()),
|
||||
non_atomic_marking_state_(heap->isolate()) {
|
||||
marking_state_(heap_->marking_state()),
|
||||
non_atomic_marking_state_(heap_->non_atomic_marking_state()) {
|
||||
DCHECK_NE(GarbageCollector::SCAVENGER, garbage_collector_);
|
||||
}
|
||||
|
||||
@ -566,7 +566,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
|
||||
uses_shared_heap_(isolate()->has_shared_heap() || isolate()->is_shared()),
|
||||
is_shared_heap_isolate_(isolate()->is_shared_heap_isolate()),
|
||||
should_record_old_to_shared_slots_(isolate()->has_shared_heap()),
|
||||
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
|
||||
sweeper_(new Sweeper(heap)) {
|
||||
}
|
||||
|
||||
MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
|
||||
@ -1455,7 +1455,7 @@ class InternalizedStringTableCleaner final : public RootVisitor {
|
||||
OffHeapObjectSlot end) override {
|
||||
DCHECK_EQ(root, Root::kStringTable);
|
||||
// Visit all HeapObject pointers in [start, end).
|
||||
auto* marking_state = heap_->mark_compact_collector()->marking_state();
|
||||
auto* marking_state = heap_->marking_state();
|
||||
Isolate* isolate = heap_->isolate();
|
||||
for (OffHeapObjectSlot p = start; p < end; ++p) {
|
||||
Object o = p.load(isolate);
|
||||
@ -1485,8 +1485,7 @@ class ExternalStringTableCleaner : public RootVisitor {
|
||||
void VisitRootPointers(Root root, const char* description,
|
||||
FullObjectSlot start, FullObjectSlot end) override {
|
||||
// Visit all HeapObject pointers in [start, end).
|
||||
NonAtomicMarkingState* marking_state =
|
||||
heap_->mark_compact_collector()->non_atomic_marking_state();
|
||||
NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
|
||||
Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
|
||||
for (FullObjectSlot p = start; p < end; ++p) {
|
||||
Object o = *p;
|
||||
@ -1613,10 +1612,9 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
|
||||
class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
|
||||
public:
|
||||
explicit RecordMigratedSlotVisitor(
|
||||
MarkCompactCollector* collector,
|
||||
EphemeronRememberedSet* ephemeron_remembered_set)
|
||||
: ObjectVisitorWithCageBases(collector->isolate()),
|
||||
collector_(collector),
|
||||
Heap* heap, EphemeronRememberedSet* ephemeron_remembered_set)
|
||||
: ObjectVisitorWithCageBases(heap->isolate()),
|
||||
heap_(heap),
|
||||
ephemeron_remembered_set_(ephemeron_remembered_set) {}
|
||||
|
||||
inline void VisitPointer(HeapObject host, ObjectSlot p) final {
|
||||
@ -1684,7 +1682,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
|
||||
// the old-to-new remembered set.
|
||||
DCHECK(!Heap::InYoungGeneration(target));
|
||||
DCHECK(!target.InSharedWritableHeap());
|
||||
collector_->RecordRelocSlot(host, rinfo, target);
|
||||
heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
|
||||
}
|
||||
|
||||
inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
|
||||
@ -1693,7 +1691,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
|
||||
HeapObject object = rinfo->target_object(cage_base());
|
||||
GenerationalBarrierForCode(host, rinfo, object);
|
||||
WriteBarrier::Shared(host, rinfo, object);
|
||||
collector_->RecordRelocSlot(host, rinfo, object);
|
||||
heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
|
||||
}
|
||||
|
||||
// Entries that are skipped for recording.
|
||||
@ -1727,15 +1725,14 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
|
||||
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
|
||||
MemoryChunk::FromHeapObject(host), slot);
|
||||
}
|
||||
} else if (p->InSharedHeap() &&
|
||||
collector_->should_record_old_to_shared_slots()) {
|
||||
} else if (p->InSharedHeap() && !heap_->IsShared()) {
|
||||
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
|
||||
MemoryChunk::FromHeapObject(host), slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MarkCompactCollector* collector_;
|
||||
Heap* const heap_;
|
||||
EphemeronRememberedSet* ephemeron_remembered_set_;
|
||||
};
|
||||
|
||||
@ -2152,8 +2149,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
|
||||
}
|
||||
|
||||
inline bool Visit(HeapObject object, int size) override {
|
||||
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
|
||||
&heap_->ephemeron_remembered_set_);
|
||||
RecordMigratedSlotVisitor visitor(heap_, &heap_->ephemeron_remembered_set_);
|
||||
Map map = object.map(cage_base());
|
||||
// Instead of calling object.IterateFast(cage_base(), &visitor) here
|
||||
// we can shortcut and use the precomputed size value passed to the visitor.
|
||||
@ -2203,7 +2199,7 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
|
||||
heap_->local_embedder_heap_tracer()->embedder_stack_state() ==
|
||||
cppgc::EmbedderStackState::kMayContainHeapPointers) {
|
||||
GlobalHandleMarkingVisitor global_handles_marker(
|
||||
*heap_, marking_state_, *local_marking_worklists_);
|
||||
*heap_, *local_marking_worklists_);
|
||||
stack.IteratePointers(&global_handles_marker);
|
||||
}
|
||||
}
|
||||
@ -3016,8 +3012,7 @@ class StringForwardingTableCleaner final {
|
||||
explicit StringForwardingTableCleaner(Heap* heap)
|
||||
: heap_(heap),
|
||||
isolate_(heap_->isolate()),
|
||||
marking_state_(
|
||||
heap_->mark_compact_collector()->non_atomic_marking_state()) {}
|
||||
marking_state_(heap_->non_atomic_marking_state()) {}
|
||||
void Run() {
|
||||
StringForwardingTable* forwarding_table =
|
||||
isolate_->string_forwarding_table();
|
||||
@ -3095,9 +3090,9 @@ class StringForwardingTableCleaner final {
|
||||
ThinString::cast(original_string).RawField(ThinString::kActualOffset);
|
||||
MarkCompactCollector::RecordSlot(original_string, slot, forward_string);
|
||||
}
|
||||
Heap* heap_;
|
||||
Isolate* isolate_;
|
||||
NonAtomicMarkingState* marking_state_;
|
||||
Heap* const heap_;
|
||||
Isolate* const isolate_;
|
||||
NonAtomicMarkingState* const marking_state_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
@ -4309,13 +4304,12 @@ void Evacuator::Finalize() {
|
||||
|
||||
class FullEvacuator : public Evacuator {
|
||||
public:
|
||||
explicit FullEvacuator(MarkCompactCollector* collector)
|
||||
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
|
||||
explicit FullEvacuator(Heap* heap)
|
||||
: Evacuator(heap, &record_visitor_, &local_allocator_,
|
||||
AlwaysPromoteYoung::kYes),
|
||||
record_visitor_(collector, &ephemeron_remembered_set_),
|
||||
record_visitor_(heap_, &ephemeron_remembered_set_),
|
||||
local_allocator_(heap_,
|
||||
CompactionSpaceKind::kCompactionSpaceForMarkCompact),
|
||||
collector_(collector) {}
|
||||
CompactionSpaceKind::kCompactionSpaceForMarkCompact) {}
|
||||
|
||||
GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
|
||||
return GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY;
|
||||
@ -4348,13 +4342,11 @@ class FullEvacuator : public Evacuator {
|
||||
EphemeronRememberedSet ephemeron_remembered_set_;
|
||||
RecordMigratedSlotVisitor record_visitor_;
|
||||
EvacuationAllocator local_allocator_;
|
||||
|
||||
MarkCompactCollector* collector_;
|
||||
};
|
||||
|
||||
bool FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
|
||||
const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
|
||||
NonAtomicMarkingState* marking_state = collector_->non_atomic_marking_state();
|
||||
NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
|
||||
*live_bytes = marking_state->live_bytes(chunk);
|
||||
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"FullEvacuator::RawEvacuatePage", "evacuation_mode",
|
||||
@ -4399,8 +4391,9 @@ bool FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
|
||||
} else {
|
||||
// Aborted compaction page. Actual processing happens on the main
|
||||
// thread for simplicity reasons.
|
||||
collector_->ReportAbortedEvacuationCandidateDueToOOM(
|
||||
failed_object.address(), static_cast<Page*>(chunk));
|
||||
heap_->mark_compact_collector()
|
||||
->ReportAbortedEvacuationCandidateDueToOOM(
|
||||
failed_object.address(), static_cast<Page*>(chunk));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -4472,20 +4465,19 @@ class PageEvacuationJob : public v8::JobTask {
|
||||
};
|
||||
|
||||
namespace {
|
||||
template <class Evacuator, class Collector>
|
||||
template <class Evacuator>
|
||||
size_t CreateAndExecuteEvacuationTasks(
|
||||
Collector* collector,
|
||||
Heap* heap,
|
||||
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
|
||||
MigrationObserver* migration_observer) {
|
||||
base::Optional<ProfilingMigrationObserver> profiling_observer;
|
||||
if (collector->isolate()->log_object_relocation()) {
|
||||
profiling_observer.emplace(collector->heap());
|
||||
if (heap->isolate()->log_object_relocation()) {
|
||||
profiling_observer.emplace(heap);
|
||||
}
|
||||
std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
|
||||
const int wanted_num_tasks =
|
||||
NumberOfParallelCompactionTasks(collector->heap());
|
||||
const int wanted_num_tasks = NumberOfParallelCompactionTasks(heap);
|
||||
for (int i = 0; i < wanted_num_tasks; i++) {
|
||||
auto evacuator = std::make_unique<Evacuator>(collector);
|
||||
auto evacuator = std::make_unique<Evacuator>(heap);
|
||||
if (profiling_observer) {
|
||||
evacuator->AddObserver(&profiling_observer.value());
|
||||
}
|
||||
@ -4497,7 +4489,7 @@ size_t CreateAndExecuteEvacuationTasks(
|
||||
V8::GetCurrentPlatform()
|
||||
->CreateJob(
|
||||
v8::TaskPriority::kUserBlocking,
|
||||
std::make_unique<PageEvacuationJob>(collector->isolate(), &evacuators,
|
||||
std::make_unique<PageEvacuationJob>(heap->isolate(), &evacuators,
|
||||
std::move(evacuation_items)))
|
||||
->Join();
|
||||
for (auto& evacuator : evacuators) {
|
||||
@ -4605,8 +4597,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
||||
|
||||
// Promote young generation large objects.
|
||||
if (auto* new_lo_space = heap()->new_lo_space()) {
|
||||
auto* marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
auto* marking_state = heap()->non_atomic_marking_state();
|
||||
for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
|
||||
LargePage* current = *(it++);
|
||||
HeapObject object = current->GetObject();
|
||||
@ -4629,7 +4620,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
||||
evacuation_items.size());
|
||||
|
||||
wanted_num_tasks = CreateAndExecuteEvacuationTasks<FullEvacuator>(
|
||||
this, std::move(evacuation_items), nullptr);
|
||||
heap(), std::move(evacuation_items), nullptr);
|
||||
}
|
||||
|
||||
const size_t aborted_pages = PostProcessAbortedEvacuationCandidates();
|
||||
@ -5447,11 +5438,10 @@ void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
|
||||
|
||||
namespace {
|
||||
|
||||
void ReRecordPage(Heap* heap,
|
||||
v8::internal::NonAtomicMarkingState* marking_state,
|
||||
Address failed_start, Page* page) {
|
||||
void ReRecordPage(Heap* heap, Address failed_start, Page* page) {
|
||||
DCHECK(page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
|
||||
|
||||
NonAtomicMarkingState* marking_state = heap->non_atomic_marking_state();
|
||||
// Aborted compaction page. We have to record slots here, since we
|
||||
// might not have recorded them in first place.
|
||||
|
||||
@ -5502,12 +5492,10 @@ size_t MarkCompactCollector::PostProcessAbortedEvacuationCandidates() {
|
||||
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
|
||||
}
|
||||
for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
|
||||
ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
|
||||
start_and_page.second);
|
||||
ReRecordPage(heap(), start_and_page.first, start_and_page.second);
|
||||
}
|
||||
for (auto start_and_page : aborted_evacuation_candidates_due_to_flags_) {
|
||||
ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
|
||||
start_and_page.second);
|
||||
ReRecordPage(heap(), start_and_page.first, start_and_page.second);
|
||||
}
|
||||
const size_t aborted_pages =
|
||||
aborted_evacuation_candidates_due_to_oom_.size() +
|
||||
@ -5545,8 +5533,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
|
||||
}
|
||||
|
||||
void MarkCompactCollector::SweepLargeSpace(LargeObjectSpace* space) {
|
||||
auto* marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
auto* marking_state = heap()->non_atomic_marking_state();
|
||||
PtrComprCageBase cage_base(heap()->isolate());
|
||||
size_t surviving_object_size = 0;
|
||||
for (auto it = space->begin(); it != space->end();) {
|
||||
@ -5624,8 +5611,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
|
||||
public:
|
||||
explicit YoungGenerationMarkingVerifier(Heap* heap)
|
||||
: MarkingVerifier(heap),
|
||||
marking_state_(
|
||||
heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
|
||||
marking_state_(heap->non_atomic_marking_state()) {}
|
||||
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const MemoryChunk* chunk) override {
|
||||
@ -5689,7 +5675,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
|
||||
}
|
||||
}
|
||||
|
||||
NonAtomicMarkingState* marking_state_;
|
||||
NonAtomicMarkingState* const marking_state_;
|
||||
};
|
||||
|
||||
class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
|
||||
@ -5755,9 +5741,8 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
|
||||
|
||||
bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
|
||||
DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
|
||||
return Heap::InYoungGeneration(*p) && !heap->minor_mark_compact_collector()
|
||||
->non_atomic_marking_state()
|
||||
->IsBlack(HeapObject::cast(*p));
|
||||
return Heap::InYoungGeneration(*p) &&
|
||||
!heap->non_atomic_marking_state()->IsBlack(HeapObject::cast(*p));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -5802,7 +5787,7 @@ constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
|
||||
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
|
||||
: CollectorBase(heap, GarbageCollector::MINOR_MARK_COMPACTOR),
|
||||
page_parallel_job_semaphore_(0),
|
||||
sweeper_(std::make_unique<Sweeper>(heap_, non_atomic_marking_state())) {}
|
||||
sweeper_(std::make_unique<Sweeper>(heap_)) {}
|
||||
|
||||
std::pair<size_t, size_t> MinorMarkCompactCollector::ProcessMarkingWorklist(
|
||||
size_t bytes_to_process) {
|
||||
@ -5853,8 +5838,7 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
|
||||
// Migrate color to old generation marking in case the object survived
|
||||
// young generation garbage collection.
|
||||
if (heap_->incremental_marking()->IsMarking()) {
|
||||
DCHECK(
|
||||
heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
|
||||
DCHECK(heap_->atomic_marking_state()->IsWhite(dst));
|
||||
heap_->incremental_marking()->TransferColor(src, dst);
|
||||
}
|
||||
}
|
||||
@ -5867,9 +5851,8 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
|
||||
class YoungGenerationRecordMigratedSlotVisitor final
|
||||
: public RecordMigratedSlotVisitor {
|
||||
public:
|
||||
explicit YoungGenerationRecordMigratedSlotVisitor(
|
||||
MarkCompactCollector* collector)
|
||||
: RecordMigratedSlotVisitor(collector, nullptr) {}
|
||||
explicit YoungGenerationRecordMigratedSlotVisitor(Heap* heap)
|
||||
: RecordMigratedSlotVisitor(heap, nullptr) {}
|
||||
|
||||
void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
|
||||
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
|
||||
@ -5885,7 +5868,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
|
||||
// Only record slots for host objects that are considered as live by the
|
||||
// full collector.
|
||||
inline bool IsLive(HeapObject object) {
|
||||
return collector_->non_atomic_marking_state()->IsBlack(object);
|
||||
return heap_->non_atomic_marking_state()->IsBlack(object);
|
||||
}
|
||||
|
||||
inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
|
||||
@ -6059,7 +6042,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
|
||||
// Since we promote all surviving large objects immediately, all remaining
|
||||
// large objects must be dead.
|
||||
NonAtomicMarkingState* marking_state = non_atomic_marking_state();
|
||||
NonAtomicMarkingState* const marking_state = non_atomic_marking_state();
|
||||
heap()->new_lo_space()->FreeDeadObjects([marking_state](HeapObject obj) {
|
||||
// New large object space is not swept and markbits for non-promoted
|
||||
// objects are still in tact.
|
||||
@ -6081,7 +6064,6 @@ void MinorMarkCompactCollector::MakeIterable(
|
||||
CHECK(!p->IsLargePage());
|
||||
// We have to clear the full collectors markbits for the areas that we
|
||||
// remove here.
|
||||
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
|
||||
Address free_start = p->area_start();
|
||||
|
||||
for (auto object_and_size :
|
||||
@ -6092,7 +6074,7 @@ void MinorMarkCompactCollector::MakeIterable(
|
||||
if (free_end != free_start) {
|
||||
CHECK_GT(free_end, free_start);
|
||||
size_t size = static_cast<size_t>(free_end - free_start);
|
||||
full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
|
||||
heap()->non_atomic_marking_state()->bitmap(p)->ClearRange(
|
||||
p->AddressToMarkbitIndex(free_start),
|
||||
p->AddressToMarkbitIndex(free_end));
|
||||
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
|
||||
@ -6109,7 +6091,7 @@ void MinorMarkCompactCollector::MakeIterable(
|
||||
if (free_start != p->area_end()) {
|
||||
CHECK_GT(p->area_end(), free_start);
|
||||
size_t size = static_cast<size_t>(p->area_end() - free_start);
|
||||
full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
|
||||
heap()->non_atomic_marking_state()->bitmap(p)->ClearRange(
|
||||
p->AddressToMarkbitIndex(free_start),
|
||||
p->AddressToMarkbitIndex(p->area_end()));
|
||||
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
|
||||
@ -6124,10 +6106,8 @@ namespace {
|
||||
// Helper class for pruning the string table.
|
||||
class YoungGenerationExternalStringTableCleaner : public RootVisitor {
|
||||
public:
|
||||
explicit YoungGenerationExternalStringTableCleaner(
|
||||
MinorMarkCompactCollector* collector)
|
||||
: heap_(collector->heap()),
|
||||
marking_state_(collector->non_atomic_marking_state()) {}
|
||||
explicit YoungGenerationExternalStringTableCleaner(Heap* heap)
|
||||
: heap_(heap), marking_state_(heap_->non_atomic_marking_state()) {}
|
||||
|
||||
void VisitRootPointers(Root root, const char* description,
|
||||
FullObjectSlot start, FullObjectSlot end) override {
|
||||
@ -6153,8 +6133,8 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
|
||||
}
|
||||
|
||||
private:
|
||||
Heap* heap_;
|
||||
NonAtomicMarkingState* marking_state_;
|
||||
Heap* const heap_;
|
||||
NonAtomicMarkingState* const marking_state_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
@ -6166,7 +6146,7 @@ void MinorMarkCompactCollector::ClearNonLiveReferences() {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
|
||||
// Internalized strings are always stored in old space, so there is no
|
||||
// need to clean them here.
|
||||
YoungGenerationExternalStringTableCleaner external_visitor(this);
|
||||
YoungGenerationExternalStringTableCleaner external_visitor(heap());
|
||||
heap()->external_string_table_.IterateYoung(&external_visitor);
|
||||
heap()->external_string_table_.CleanUpYoung();
|
||||
}
|
||||
@ -6203,12 +6183,11 @@ class RootMarkingItem;
|
||||
|
||||
class YoungGenerationMarkingTask {
|
||||
public:
|
||||
YoungGenerationMarkingTask(Isolate* isolate,
|
||||
MinorMarkCompactCollector* collector,
|
||||
YoungGenerationMarkingTask(Isolate* isolate, Heap* heap,
|
||||
MarkingWorklists* global_worklists)
|
||||
: marking_worklists_local_(
|
||||
std::make_unique<MarkingWorklists::Local>(global_worklists)),
|
||||
marking_state_(collector->marking_state()),
|
||||
marking_state_(heap->marking_state()),
|
||||
visitor_(isolate, marking_state_, marking_worklists_local()) {}
|
||||
|
||||
void MarkObject(Object object) {
|
||||
@ -6294,11 +6273,10 @@ V8_INLINE SlotCallbackResult PageMarkingItem::CheckAndMarkObject(
|
||||
|
||||
void YoungGenerationMarkingJob::Run(JobDelegate* delegate) {
|
||||
if (delegate->IsJoiningThread()) {
|
||||
TRACE_GC(collector_->heap()->tracer(),
|
||||
GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
|
||||
TRACE_GC(heap_->tracer(), GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
|
||||
ProcessItems(delegate);
|
||||
} else {
|
||||
TRACE_GC_EPOCH(collector_->heap()->tracer(),
|
||||
TRACE_GC_EPOCH(heap_->tracer(),
|
||||
GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
|
||||
ThreadKind::kBackground);
|
||||
ProcessItems(delegate);
|
||||
@ -6333,7 +6311,7 @@ void YoungGenerationMarkingJob::ProcessItems(JobDelegate* delegate) {
|
||||
double marking_time = 0.0;
|
||||
{
|
||||
TimedScope scope(&marking_time);
|
||||
YoungGenerationMarkingTask task(isolate_, collector_, global_worklists_);
|
||||
YoungGenerationMarkingTask task(isolate_, heap_, global_worklists_);
|
||||
ProcessMarkingItems(&task);
|
||||
if (!incremental()) {
|
||||
task.EmptyMarkingWorklist();
|
||||
@ -6342,8 +6320,8 @@ void YoungGenerationMarkingJob::ProcessItems(JobDelegate* delegate) {
|
||||
}
|
||||
}
|
||||
if (v8_flags.trace_minor_mc_parallel_marking) {
|
||||
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
|
||||
static_cast<void*>(this), marking_time);
|
||||
PrintIsolate(isolate_, "marking[%p]: time=%f\n", static_cast<void*>(this),
|
||||
marking_time);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6413,7 +6391,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
|
||||
->CreateJob(
|
||||
v8::TaskPriority::kUserBlocking,
|
||||
std::make_unique<YoungGenerationMarkingJob>(
|
||||
isolate(), this, marking_worklists(),
|
||||
isolate(), heap(), marking_worklists(),
|
||||
std::move(marking_items), YoungMarkingJobType::kAtomic))
|
||||
->Join();
|
||||
|
||||
@ -6585,13 +6563,13 @@ namespace {
|
||||
|
||||
class YoungGenerationEvacuator : public Evacuator {
|
||||
public:
|
||||
explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
|
||||
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
|
||||
explicit YoungGenerationEvacuator(Heap* heap)
|
||||
: Evacuator(heap, &record_visitor_, &local_allocator_,
|
||||
AlwaysPromoteYoung::kNo),
|
||||
record_visitor_(collector->heap()->mark_compact_collector()),
|
||||
record_visitor_(heap_),
|
||||
local_allocator_(
|
||||
heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
|
||||
collector_(collector) {}
|
||||
collector_(heap_->minor_mark_compact_collector()) {}
|
||||
|
||||
GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
|
||||
return GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
|
||||
@ -6613,7 +6591,7 @@ bool YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
|
||||
intptr_t* live_bytes) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"YoungGenerationEvacuator::RawEvacuatePage");
|
||||
NonAtomicMarkingState* marking_state = collector_->non_atomic_marking_state();
|
||||
NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
|
||||
*live_bytes = marking_state->live_bytes(chunk);
|
||||
DCHECK_EQ(kPageNewToOld, ComputeEvacuationMode(chunk));
|
||||
LiveObjectVisitor::VisitBlackObjectsNoFail(chunk, marking_state,
|
||||
@ -6663,7 +6641,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
|
||||
LargePage* current = *it;
|
||||
it++;
|
||||
HeapObject object = current->GetObject();
|
||||
if (non_atomic_marking_state_.IsBlack(object)) {
|
||||
if (non_atomic_marking_state()->IsBlack(object)) {
|
||||
heap_->lo_space()->PromoteNewLargeObject(current);
|
||||
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
|
||||
promoted_large_pages_.push_back(current);
|
||||
@ -6678,7 +6656,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
|
||||
const auto pages_count = evacuation_items.size();
|
||||
const auto wanted_num_tasks =
|
||||
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
|
||||
this, std::move(evacuation_items), &observer);
|
||||
heap(), std::move(evacuation_items), &observer);
|
||||
|
||||
if (v8_flags.trace_evacuation) {
|
||||
TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes, 0);
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "include/v8-internal.h"
|
||||
#include "src/heap/base/worklist.h"
|
||||
#include "src/heap/concurrent-marking.h"
|
||||
#include "src/heap/marking-state.h"
|
||||
#include "src/heap/marking-visitor.h"
|
||||
#include "src/heap/marking-worklist.h"
|
||||
#include "src/heap/marking.h"
|
||||
@ -178,85 +179,6 @@ enum class MemoryReductionMode { kNone, kShouldReduceMemory };
|
||||
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
|
||||
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
|
||||
|
||||
// This is used by marking visitors.
|
||||
class MarkingState final
|
||||
: public MarkingStateBase<MarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
explicit MarkingState(PtrComprCageBase cage_base)
|
||||
: MarkingStateBase(cage_base) {}
|
||||
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
return chunk->marking_bitmap<AccessMode::ATOMIC>();
|
||||
}
|
||||
|
||||
// Concurrent marking uses local live bytes so we may do these accesses
|
||||
// non-atomically.
|
||||
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(by, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
intptr_t live_bytes(const MemoryChunk* chunk) const {
|
||||
return chunk->live_byte_count_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(value, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.store(value, std::memory_order_relaxed);
|
||||
}
|
||||
};
|
||||
|
||||
// This is used by Scavenger and Evacuator in TransferColor.
|
||||
// Live byte increments have to be atomic.
|
||||
class AtomicMarkingState final
|
||||
: public MarkingStateBase<AtomicMarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
explicit AtomicMarkingState(PtrComprCageBase cage_base)
|
||||
: MarkingStateBase(cage_base) {}
|
||||
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
return chunk->marking_bitmap<AccessMode::ATOMIC>();
|
||||
}
|
||||
|
||||
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(by, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.fetch_add(by);
|
||||
}
|
||||
};
|
||||
|
||||
class NonAtomicMarkingState final
|
||||
: public MarkingStateBase<NonAtomicMarkingState, AccessMode::NON_ATOMIC> {
|
||||
public:
|
||||
explicit NonAtomicMarkingState(PtrComprCageBase cage_base)
|
||||
: MarkingStateBase(cage_base) {}
|
||||
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
|
||||
}
|
||||
|
||||
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(by, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
intptr_t live_bytes(const MemoryChunk* chunk) const {
|
||||
return chunk->live_byte_count_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(value, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.store(value, std::memory_order_relaxed);
|
||||
}
|
||||
};
|
||||
|
||||
// This visitor is used for marking on the main thread. It is cheaper than
|
||||
// the concurrent marking visitor because it does not snapshot JSObjects.
|
||||
template <typename MarkingState>
|
||||
@ -339,15 +261,6 @@ class CollectorBase {
|
||||
virtual void Prepare() = 0;
|
||||
virtual void StartMarking() = 0;
|
||||
|
||||
MarkingState* marking_state() { return &marking_state_; }
|
||||
|
||||
NonAtomicMarkingState* non_atomic_marking_state() {
|
||||
return &non_atomic_marking_state_;
|
||||
}
|
||||
|
||||
inline Heap* heap() const { return heap_; }
|
||||
inline Isolate* isolate();
|
||||
|
||||
MarkingWorklists* marking_worklists() { return &marking_worklists_; }
|
||||
|
||||
MarkingWorklists::Local* local_marking_worklists() {
|
||||
@ -374,6 +287,15 @@ class CollectorBase {
|
||||
std::vector<LargePage*> promoted_large_pages_;
|
||||
|
||||
protected:
|
||||
inline Heap* heap() const { return heap_; }
|
||||
inline Isolate* isolate();
|
||||
|
||||
MarkingState* marking_state() { return marking_state_; }
|
||||
|
||||
NonAtomicMarkingState* non_atomic_marking_state() {
|
||||
return non_atomic_marking_state_;
|
||||
}
|
||||
|
||||
void StartSweepSpace(Sweeper* sweeper, PagedSpaceBase* space);
|
||||
|
||||
Heap* heap_;
|
||||
@ -382,8 +304,8 @@ class CollectorBase {
|
||||
|
||||
std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
|
||||
|
||||
MarkingState marking_state_;
|
||||
NonAtomicMarkingState non_atomic_marking_state_;
|
||||
MarkingState* const marking_state_;
|
||||
NonAtomicMarkingState* const non_atomic_marking_state_;
|
||||
|
||||
explicit CollectorBase(Heap* heap, GarbageCollector collector);
|
||||
virtual ~CollectorBase() = default;
|
||||
|
155
src/heap/marking-state-inl.h
Normal file
155
src/heap/marking-state-inl.h
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_MARKING_STATE_INL_H_
|
||||
#define V8_HEAP_MARKING_STATE_INL_H_
|
||||
|
||||
#include "src/heap/marking-state.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
MarkBit MarkingStateBase<ConcreteState, access_mode>::MarkBitFrom(
|
||||
const HeapObject obj) const {
|
||||
return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
MarkBit MarkingStateBase<ConcreteState, access_mode>::MarkBitFrom(
|
||||
const BasicMemoryChunk* p, Address addr) const {
|
||||
return static_cast<const ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
|
||||
p->AddressToMarkbitIndex(addr));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
Marking::ObjectColor MarkingStateBase<ConcreteState, access_mode>::Color(
|
||||
const HeapObject obj) const {
|
||||
return Marking::Color(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::IsImpossible(
|
||||
const HeapObject obj) const {
|
||||
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::IsBlack(
|
||||
const HeapObject obj) const {
|
||||
return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::IsWhite(
|
||||
const HeapObject obj) const {
|
||||
return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::IsGrey(
|
||||
const HeapObject obj) const {
|
||||
return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::IsBlackOrGrey(
|
||||
const HeapObject obj) const {
|
||||
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::WhiteToGrey(HeapObject obj) {
|
||||
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::WhiteToBlack(
|
||||
HeapObject obj) {
|
||||
return WhiteToGrey(obj) && GreyToBlack(obj);
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(HeapObject obj) {
|
||||
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
|
||||
MarkBit markbit = MarkBitFrom(chunk, obj.address());
|
||||
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
|
||||
static_cast<ConcreteState*>(this)->IncrementLiveBytes(
|
||||
MemoryChunk::cast(chunk),
|
||||
ALIGN_TO_ALLOCATION_ALIGNMENT(obj.Size(cage_base())));
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlackUnaccounted(
|
||||
HeapObject obj) {
|
||||
return Marking::GreyToBlack<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
void MarkingStateBase<ConcreteState, access_mode>::ClearLiveness(
|
||||
MemoryChunk* chunk) {
|
||||
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
|
||||
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
|
||||
}
|
||||
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* MarkingState::bitmap(
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
return chunk->marking_bitmap<AccessMode::ATOMIC>();
|
||||
}
|
||||
|
||||
void MarkingState::IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(by, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
intptr_t MarkingState::live_bytes(const MemoryChunk* chunk) const {
|
||||
return chunk->live_byte_count_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void MarkingState::SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(value, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.store(value, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>* NonAtomicMarkingState::bitmap(
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
|
||||
}
|
||||
|
||||
void NonAtomicMarkingState::IncrementLiveBytes(MemoryChunk* chunk,
|
||||
intptr_t by) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(by, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
intptr_t NonAtomicMarkingState::live_bytes(const MemoryChunk* chunk) const {
|
||||
return chunk->live_byte_count_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void NonAtomicMarkingState::SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(value, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.store(value, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* AtomicMarkingState::bitmap(
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
return chunk->marking_bitmap<AccessMode::ATOMIC>();
|
||||
}
|
||||
|
||||
void AtomicMarkingState::IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
|
||||
IsAligned(by, kObjectAlignment8GbHeap));
|
||||
chunk->live_byte_count_.fetch_add(by);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_MARKING_STATE_INL_H_
|
137
src/heap/marking-state.h
Normal file
137
src/heap/marking-state.h
Normal file
@ -0,0 +1,137 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_MARKING_STATE_H_
|
||||
#define V8_HEAP_MARKING_STATE_H_
|
||||
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/marking.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class BasicMemoryChunk;
|
||||
class MemoryChunk;
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
class MarkingStateBase {
|
||||
public:
|
||||
// Declares that this marking state is not collecting retainers, so the
|
||||
// marking visitor may update the heap state to store information about
|
||||
// progress, and may avoid fully visiting an object if it is safe to do so.
|
||||
static constexpr bool kCollectRetainers = false;
|
||||
|
||||
explicit MarkingStateBase(PtrComprCageBase cage_base)
|
||||
#if V8_COMPRESS_POINTERS
|
||||
: cage_base_(cage_base)
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
// The pointer compression cage base value used for decompression of all
|
||||
// tagged values except references to Code objects.
|
||||
V8_INLINE PtrComprCageBase cage_base() const {
|
||||
#if V8_COMPRESS_POINTERS
|
||||
return cage_base_;
|
||||
#else
|
||||
return PtrComprCageBase{};
|
||||
#endif // V8_COMPRESS_POINTERS
|
||||
}
|
||||
|
||||
V8_INLINE MarkBit MarkBitFrom(const HeapObject obj) const;
|
||||
|
||||
// {addr} may be tagged or aligned.
|
||||
V8_INLINE MarkBit MarkBitFrom(const BasicMemoryChunk* p, Address addr) const;
|
||||
|
||||
V8_INLINE Marking::ObjectColor Color(const HeapObject obj) const;
|
||||
|
||||
V8_INLINE bool IsImpossible(const HeapObject obj) const;
|
||||
|
||||
V8_INLINE bool IsBlack(const HeapObject obj) const;
|
||||
|
||||
V8_INLINE bool IsWhite(const HeapObject obj) const;
|
||||
|
||||
V8_INLINE bool IsGrey(const HeapObject obj) const;
|
||||
|
||||
V8_INLINE bool IsBlackOrGrey(const HeapObject obj) const;
|
||||
|
||||
V8_INLINE bool WhiteToGrey(HeapObject obj);
|
||||
|
||||
V8_INLINE bool WhiteToBlack(HeapObject obj);
|
||||
|
||||
V8_INLINE bool GreyToBlack(HeapObject obj);
|
||||
|
||||
V8_INLINE bool GreyToBlackUnaccounted(HeapObject obj);
|
||||
|
||||
V8_INLINE void ClearLiveness(MemoryChunk* chunk);
|
||||
|
||||
void AddStrongReferenceForReferenceSummarizer(HeapObject host,
|
||||
HeapObject obj) {
|
||||
// This is not a reference summarizer, so there is nothing to do here.
|
||||
}
|
||||
|
||||
void AddWeakReferenceForReferenceSummarizer(HeapObject host, HeapObject obj) {
|
||||
// This is not a reference summarizer, so there is nothing to do here.
|
||||
}
|
||||
|
||||
private:
|
||||
#if V8_COMPRESS_POINTERS
|
||||
const PtrComprCageBase cage_base_;
|
||||
#endif // V8_COMPRESS_POINTERS
|
||||
};
|
||||
|
||||
// This is used by marking visitors.
|
||||
class MarkingState final
|
||||
: public MarkingStateBase<MarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
explicit MarkingState(PtrComprCageBase cage_base)
|
||||
: MarkingStateBase(cage_base) {}
|
||||
|
||||
V8_INLINE ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
|
||||
const BasicMemoryChunk* chunk) const;
|
||||
|
||||
// Concurrent marking uses local live bytes so we may do these accesses
|
||||
// non-atomically.
|
||||
V8_INLINE void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by);
|
||||
|
||||
V8_INLINE intptr_t live_bytes(const MemoryChunk* chunk) const;
|
||||
|
||||
V8_INLINE void SetLiveBytes(MemoryChunk* chunk, intptr_t value);
|
||||
};
|
||||
|
||||
class NonAtomicMarkingState final
|
||||
: public MarkingStateBase<NonAtomicMarkingState, AccessMode::NON_ATOMIC> {
|
||||
public:
|
||||
explicit NonAtomicMarkingState(PtrComprCageBase cage_base)
|
||||
: MarkingStateBase(cage_base) {}
|
||||
|
||||
V8_INLINE ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const BasicMemoryChunk* chunk) const;
|
||||
|
||||
V8_INLINE void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by);
|
||||
|
||||
V8_INLINE intptr_t live_bytes(const MemoryChunk* chunk) const;
|
||||
|
||||
V8_INLINE void SetLiveBytes(MemoryChunk* chunk, intptr_t value);
|
||||
};
|
||||
|
||||
// This is used by Scavenger and Evacuator in TransferColor.
|
||||
// Live byte increments have to be atomic.
|
||||
class AtomicMarkingState final
|
||||
: public MarkingStateBase<AtomicMarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
explicit AtomicMarkingState(PtrComprCageBase cage_base)
|
||||
: MarkingStateBase(cage_base) {}
|
||||
|
||||
V8_INLINE ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
|
||||
const BasicMemoryChunk* chunk) const;
|
||||
|
||||
V8_INLINE void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_MARKING_STATE_H_
|
@ -5,6 +5,7 @@
|
||||
#ifndef V8_HEAP_MARKING_VISITOR_INL_H_
|
||||
#define V8_HEAP_MARKING_VISITOR_INL_H_
|
||||
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/marking-visitor.h"
|
||||
#include "src/heap/marking-worklist.h"
|
||||
#include "src/heap/objects-visiting-inl.h"
|
||||
|
@ -6,9 +6,8 @@
|
||||
#define V8_HEAP_MARKING_VISITOR_H_
|
||||
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/marking-state.h"
|
||||
#include "src/heap/marking-worklist.h"
|
||||
#include "src/heap/marking.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/objects-visiting.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/heap/weak-object-worklists.h"
|
||||
@ -22,107 +21,6 @@ struct EphemeronMarking {
|
||||
size_t newly_discovered_limit;
|
||||
};
|
||||
|
||||
template <typename ConcreteState, AccessMode access_mode>
|
||||
class MarkingStateBase {
|
||||
public:
|
||||
// Declares that this marking state is not collecting retainers, so the
|
||||
// marking visitor may update the heap state to store information about
|
||||
// progress, and may avoid fully visiting an object if it is safe to do so.
|
||||
static constexpr bool kCollectRetainers = false;
|
||||
|
||||
explicit MarkingStateBase(PtrComprCageBase cage_base)
|
||||
#if V8_COMPRESS_POINTERS
|
||||
: cage_base_(cage_base)
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
// The pointer compression cage base value used for decompression of all
|
||||
// tagged values except references to Code objects.
|
||||
V8_INLINE PtrComprCageBase cage_base() const {
|
||||
#if V8_COMPRESS_POINTERS
|
||||
return cage_base_;
|
||||
#else
|
||||
return PtrComprCageBase{};
|
||||
#endif // V8_COMPRESS_POINTERS
|
||||
}
|
||||
|
||||
V8_INLINE MarkBit MarkBitFrom(const HeapObject obj) const {
|
||||
return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
|
||||
}
|
||||
|
||||
// {addr} may be tagged or aligned.
|
||||
V8_INLINE MarkBit MarkBitFrom(const BasicMemoryChunk* p, Address addr) const {
|
||||
return static_cast<const ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
|
||||
p->AddressToMarkbitIndex(addr));
|
||||
}
|
||||
|
||||
Marking::ObjectColor Color(const HeapObject obj) const {
|
||||
return Marking::Color(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
V8_INLINE bool IsImpossible(const HeapObject obj) const {
|
||||
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
V8_INLINE bool IsBlack(const HeapObject obj) const {
|
||||
return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
V8_INLINE bool IsWhite(const HeapObject obj) const {
|
||||
return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
V8_INLINE bool IsGrey(const HeapObject obj) const {
|
||||
return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
V8_INLINE bool IsBlackOrGrey(const HeapObject obj) const {
|
||||
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
V8_INLINE bool WhiteToGrey(HeapObject obj) {
|
||||
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
V8_INLINE bool WhiteToBlack(HeapObject obj) {
|
||||
return WhiteToGrey(obj) && GreyToBlack(obj);
|
||||
}
|
||||
|
||||
V8_INLINE bool GreyToBlack(HeapObject obj) {
|
||||
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
|
||||
MarkBit markbit = MarkBitFrom(chunk, obj.address());
|
||||
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
|
||||
static_cast<ConcreteState*>(this)->IncrementLiveBytes(
|
||||
MemoryChunk::cast(chunk),
|
||||
ALIGN_TO_ALLOCATION_ALIGNMENT(obj.Size(cage_base())));
|
||||
return true;
|
||||
}
|
||||
|
||||
V8_INLINE bool GreyToBlackUnaccounted(HeapObject obj) {
|
||||
return Marking::GreyToBlack<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
void ClearLiveness(MemoryChunk* chunk) {
|
||||
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
|
||||
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
|
||||
}
|
||||
|
||||
void AddStrongReferenceForReferenceSummarizer(HeapObject host,
|
||||
HeapObject obj) {
|
||||
// This is not a reference summarizer, so there is nothing to do here.
|
||||
}
|
||||
|
||||
void AddWeakReferenceForReferenceSummarizer(HeapObject host, HeapObject obj) {
|
||||
// This is not a reference summarizer, so there is nothing to do here.
|
||||
}
|
||||
|
||||
private:
|
||||
#if V8_COMPRESS_POINTERS
|
||||
const PtrComprCageBase cage_base_;
|
||||
#endif // V8_COMPRESS_POINTERS
|
||||
};
|
||||
|
||||
// The base class for all marking visitors. It implements marking logic with
|
||||
// support of bytecode flushing, embedder tracing, weak and references.
|
||||
//
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/code-object-registry.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
#include "src/heap/memory-chunk-layout.h"
|
||||
@ -162,8 +163,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
|
||||
|
||||
categories_ = nullptr;
|
||||
|
||||
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(this,
|
||||
0);
|
||||
heap->non_atomic_marking_state()->SetLiveBytes(this, 0);
|
||||
if (executable == EXECUTABLE) {
|
||||
SetFlag(IS_EXECUTABLE);
|
||||
if (heap->write_protect_code_memory()) {
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/paged-spaces.h"
|
||||
#include "src/heap/safepoint.h"
|
||||
@ -26,10 +27,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
|
||||
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
|
||||
page->list_node().Initialize();
|
||||
if (v8_flags.minor_mc) {
|
||||
heap()
|
||||
->minor_mark_compact_collector()
|
||||
->non_atomic_marking_state()
|
||||
->ClearLiveness(page);
|
||||
heap()->non_atomic_marking_state()->ClearLiveness(page);
|
||||
}
|
||||
page->InitializationMemoryFence();
|
||||
return page;
|
||||
@ -76,8 +74,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
|
||||
}
|
||||
|
||||
// Add more pages if we have less than expected_pages.
|
||||
NonAtomicMarkingState* marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
NonAtomicMarkingState* marking_state = heap()->non_atomic_marking_state();
|
||||
while (actual_pages < expected_pages) {
|
||||
actual_pages++;
|
||||
current_page = heap()->memory_allocator()->AllocatePage(
|
||||
@ -183,8 +180,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
|
||||
DCHECK(IsAligned(delta, AllocatePageSize()));
|
||||
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
|
||||
DCHECK(last_page());
|
||||
NonAtomicMarkingState* marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
NonAtomicMarkingState* marking_state = heap()->non_atomic_marking_state();
|
||||
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
|
||||
Page* new_page = heap()->memory_allocator()->AllocatePage(
|
||||
MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
|
||||
@ -659,8 +655,7 @@ void SemiSpaceNewSpace::ResetLinearAllocationArea() {
|
||||
to_space_.Reset();
|
||||
UpdateLinearAllocationArea();
|
||||
// Clear all mark-bits in the to-space.
|
||||
NonAtomicMarkingState* marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
NonAtomicMarkingState* marking_state = heap()->non_atomic_marking_state();
|
||||
for (Page* p : to_space_) {
|
||||
marking_state->ClearLiveness(p);
|
||||
// Concurrent marking may have local live bytes for this page.
|
||||
@ -940,10 +935,7 @@ Page* PagedSpaceForNewSpace::InitializePage(MemoryChunk* chunk) {
|
||||
page->ResetAllocationStatistics();
|
||||
page->SetFlags(Page::TO_PAGE);
|
||||
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
|
||||
heap()
|
||||
->minor_mark_compact_collector()
|
||||
->non_atomic_marking_state()
|
||||
->ClearLiveness(page);
|
||||
heap()->non_atomic_marking_state()->ClearLiveness(page);
|
||||
page->AllocateFreeListCategories();
|
||||
page->InitializeFreeListCategories();
|
||||
page->list_node().Initialize();
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "src/heap/combined-heap.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/logging/counters.h"
|
||||
#include "src/objects/compilation-cache-table-inl.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
@ -448,9 +449,9 @@ class ObjectStatsCollectorImpl {
|
||||
return field_stats_collector_.cage_base();
|
||||
}
|
||||
|
||||
Heap* heap_;
|
||||
ObjectStats* stats_;
|
||||
NonAtomicMarkingState* marking_state_;
|
||||
Heap* const heap_;
|
||||
ObjectStats* const stats_;
|
||||
NonAtomicMarkingState* const marking_state_;
|
||||
std::unordered_set<HeapObject, Object::Hasher, Object::KeyEqualSafe>
|
||||
virtual_objects_;
|
||||
std::unordered_set<Address> external_resources_;
|
||||
@ -461,8 +462,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
|
||||
ObjectStats* stats)
|
||||
: heap_(heap),
|
||||
stats_(stats),
|
||||
marking_state_(
|
||||
heap->mark_compact_collector()->non_atomic_marking_state()),
|
||||
marking_state_(heap->non_atomic_marking_state()),
|
||||
field_stats_collector_(
|
||||
heap_, &stats->tagged_fields_count_, &stats->embedder_fields_count_,
|
||||
&stats->inobject_smi_fields_count_,
|
||||
@ -1089,8 +1089,7 @@ class ObjectStatsVisitor {
|
||||
ObjectStatsCollectorImpl::Phase phase)
|
||||
: live_collector_(live_collector),
|
||||
dead_collector_(dead_collector),
|
||||
marking_state_(
|
||||
heap->mark_compact_collector()->non_atomic_marking_state()),
|
||||
marking_state_(heap->non_atomic_marking_state()),
|
||||
phase_(phase) {}
|
||||
|
||||
void Visit(HeapObject obj) {
|
||||
@ -1105,9 +1104,9 @@ class ObjectStatsVisitor {
|
||||
}
|
||||
|
||||
private:
|
||||
ObjectStatsCollectorImpl* live_collector_;
|
||||
ObjectStatsCollectorImpl* dead_collector_;
|
||||
NonAtomicMarkingState* marking_state_;
|
||||
ObjectStatsCollectorImpl* const live_collector_;
|
||||
ObjectStatsCollectorImpl* const dead_collector_;
|
||||
NonAtomicMarkingState* const marking_state_;
|
||||
ObjectStatsCollectorImpl::Phase phase_;
|
||||
};
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
#include "src/heap/memory-chunk-layout.h"
|
||||
@ -286,8 +287,7 @@ bool PagedSpaceBase::ContainsSlow(Address addr) const {
|
||||
|
||||
void PagedSpaceBase::RefineAllocatedBytesAfterSweeping(Page* page) {
|
||||
CHECK(page->SweepingDone());
|
||||
auto marking_state =
|
||||
heap()->mark_compact_collector()->non_atomic_marking_state();
|
||||
auto marking_state = heap()->non_atomic_marking_state();
|
||||
// The live_byte on the page was accounted in the space allocated
|
||||
// bytes counter. After sweeping allocated_bytes() contains the
|
||||
// accurate live byte count on the page.
|
||||
@ -542,17 +542,15 @@ void PagedSpaceBase::FreeLinearAllocationArea() {
|
||||
GetUnprotectMemoryOrigin(is_compaction_space()));
|
||||
}
|
||||
|
||||
DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
|
||||
heap()->incremental_marking()->marking_state()->IsWhite(
|
||||
HeapObject::FromAddress(current_top)));
|
||||
DCHECK_IMPLIES(
|
||||
current_limit - current_top >= 2 * kTaggedSize,
|
||||
heap()->marking_state()->IsWhite(HeapObject::FromAddress(current_top)));
|
||||
Free(current_top, current_limit - current_top,
|
||||
SpaceAccountingMode::kSpaceAccounted);
|
||||
}
|
||||
|
||||
void PagedSpaceBase::ReleasePage(Page* page) {
|
||||
DCHECK_EQ(
|
||||
0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
|
||||
page));
|
||||
DCHECK_EQ(0, heap()->non_atomic_marking_state()->live_bytes(page));
|
||||
DCHECK_EQ(page->owner(), this);
|
||||
|
||||
DCHECK_IMPLIES(identity() == NEW_SPACE, page->IsFlagSet(Page::TO_PAGE));
|
||||
@ -874,7 +872,7 @@ void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
|
||||
}
|
||||
|
||||
void PagedSpaceBase::VerifyLiveBytes() const {
|
||||
MarkingState* marking_state = heap()->incremental_marking()->marking_state();
|
||||
MarkingState* marking_state = heap()->marking_state();
|
||||
PtrComprCageBase cage_base(heap()->isolate());
|
||||
for (const Page* page : *this) {
|
||||
CHECK(page->SweepingDone());
|
||||
@ -919,8 +917,7 @@ void PagedSpaceBase::VerifyCountersAfterSweeping(Heap* heap) const {
|
||||
void PagedSpaceBase::VerifyCountersBeforeConcurrentSweeping() const {
|
||||
size_t total_capacity = 0;
|
||||
size_t total_allocated = 0;
|
||||
auto marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
auto marking_state = heap()->non_atomic_marking_state();
|
||||
for (const Page* page : *this) {
|
||||
size_t page_allocated =
|
||||
page->SweepingDone()
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "src/heap/allocation-stats.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/read-only-heap.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
@ -335,10 +336,7 @@ ReadOnlyPage::ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
|
||||
std::move(reservation)) {
|
||||
allocated_bytes_ = 0;
|
||||
SetFlags(Flag::NEVER_EVACUATE | Flag::READ_ONLY_HEAP);
|
||||
heap->incremental_marking()
|
||||
->non_atomic_marking_state()
|
||||
->bitmap(this)
|
||||
->MarkAllBits();
|
||||
heap->non_atomic_marking_state()->bitmap(this)->MarkAllBits();
|
||||
}
|
||||
|
||||
void ReadOnlyPage::MakeHeaderRelocatable() {
|
||||
@ -578,7 +576,7 @@ void ReadOnlySpace::FreeLinearAllocationArea() {
|
||||
|
||||
// Clear the bits in the unused black area.
|
||||
ReadOnlyPage* page = pages_.back();
|
||||
heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
|
||||
heap()->marking_state()->bitmap(page)->ClearRange(
|
||||
page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
|
||||
|
||||
heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_));
|
||||
@ -690,7 +688,7 @@ AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
|
||||
: AllocateRawUnaligned(size_in_bytes);
|
||||
HeapObject heap_obj;
|
||||
if (result.To(&heap_obj)) {
|
||||
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
|
||||
DCHECK(heap()->marking_state()->IsBlack(heap_obj));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
#include "src/heap/evacuation-allocator-inl.h"
|
||||
#include "src/heap/incremental-marking-inl.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/heap/objects-visiting-inl.h"
|
||||
@ -132,8 +133,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
|
||||
|
||||
HeapObject target;
|
||||
if (allocation.To(&target)) {
|
||||
DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
|
||||
target));
|
||||
DCHECK(heap()->non_atomic_marking_state()->IsWhite(target));
|
||||
const bool self_success =
|
||||
MigrateObject(map, object, target, object_size, kPromoteIntoLocalHeap);
|
||||
if (!self_success) {
|
||||
@ -181,8 +181,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
|
||||
|
||||
HeapObject target;
|
||||
if (allocation.To(&target)) {
|
||||
DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
|
||||
target));
|
||||
DCHECK(heap()->non_atomic_marking_state()->IsWhite(target));
|
||||
const bool self_success =
|
||||
MigrateObject(map, object, target, object_size, promotion_heap_choice);
|
||||
if (!self_success) {
|
||||
|
@ -540,8 +540,7 @@ void ScavengerCollector::SweepArrayBufferExtensions() {
|
||||
|
||||
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
|
||||
const bool is_compacting = heap_->incremental_marking()->IsCompacting();
|
||||
AtomicMarkingState* marking_state =
|
||||
heap_->incremental_marking()->atomic_marking_state();
|
||||
AtomicMarkingState* marking_state = heap_->atomic_marking_state();
|
||||
|
||||
for (SurvivingNewLargeObjectMapEntry update_info :
|
||||
surviving_new_large_objects_) {
|
||||
@ -637,8 +636,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
|
||||
// the end of collection it would be a violation of the invariant to record
|
||||
// its slots.
|
||||
const bool record_slots =
|
||||
is_compacting_ &&
|
||||
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
|
||||
is_compacting_ && heap()->atomic_marking_state()->IsBlack(target);
|
||||
|
||||
IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
|
||||
|
||||
|
@ -174,7 +174,7 @@ void Page::CreateBlackArea(Address start, Address end) {
|
||||
DCHECK_EQ(Page::FromAddress(start), this);
|
||||
DCHECK_LT(start, end);
|
||||
DCHECK_EQ(Page::FromAddress(end - 1), this);
|
||||
MarkingState* marking_state = heap()->incremental_marking()->marking_state();
|
||||
MarkingState* marking_state = heap()->marking_state();
|
||||
marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
|
||||
AddressToMarkbitIndex(end));
|
||||
marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
|
||||
@ -186,8 +186,7 @@ void Page::CreateBlackAreaBackground(Address start, Address end) {
|
||||
DCHECK_EQ(Page::FromAddress(start), this);
|
||||
DCHECK_LT(start, end);
|
||||
DCHECK_EQ(Page::FromAddress(end - 1), this);
|
||||
AtomicMarkingState* marking_state =
|
||||
heap()->incremental_marking()->atomic_marking_state();
|
||||
AtomicMarkingState* marking_state = heap()->atomic_marking_state();
|
||||
marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
|
||||
AddressToMarkbitIndex(end));
|
||||
heap()->incremental_marking()->IncrementLiveBytesBackground(
|
||||
@ -200,7 +199,7 @@ void Page::DestroyBlackArea(Address start, Address end) {
|
||||
DCHECK_EQ(Page::FromAddress(start), this);
|
||||
DCHECK_LT(start, end);
|
||||
DCHECK_EQ(Page::FromAddress(end - 1), this);
|
||||
MarkingState* marking_state = heap()->incremental_marking()->marking_state();
|
||||
MarkingState* marking_state = heap()->marking_state();
|
||||
marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
|
||||
AddressToMarkbitIndex(end));
|
||||
marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
|
||||
@ -212,8 +211,7 @@ void Page::DestroyBlackAreaBackground(Address start, Address end) {
|
||||
DCHECK_EQ(Page::FromAddress(start), this);
|
||||
DCHECK_LT(start, end);
|
||||
DCHECK_EQ(Page::FromAddress(end - 1), this);
|
||||
AtomicMarkingState* marking_state =
|
||||
heap()->incremental_marking()->atomic_marking_state();
|
||||
AtomicMarkingState* marking_state = heap()->atomic_marking_state();
|
||||
marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
|
||||
AddressToMarkbitIndex(end));
|
||||
heap()->incremental_marking()->IncrementLiveBytesBackground(
|
||||
|
@ -114,9 +114,9 @@ class Sweeper::SweeperJob final : public JobTask {
|
||||
GCTracer* const tracer_;
|
||||
};
|
||||
|
||||
Sweeper::Sweeper(Heap* heap, NonAtomicMarkingState* marking_state)
|
||||
Sweeper::Sweeper(Heap* heap)
|
||||
: heap_(heap),
|
||||
marking_state_(marking_state),
|
||||
marking_state_(heap_->non_atomic_marking_state()),
|
||||
sweeping_in_progress_(false),
|
||||
should_reduce_memory_(false),
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
|
||||
|
@ -77,7 +77,7 @@ class Sweeper {
|
||||
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
|
||||
enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
|
||||
|
||||
Sweeper(Heap* heap, NonAtomicMarkingState* marking_state);
|
||||
Sweeper(Heap* heap);
|
||||
~Sweeper();
|
||||
|
||||
bool sweeping_in_progress() const { return sweeping_in_progress_; }
|
||||
@ -112,9 +112,9 @@ class Sweeper {
|
||||
|
||||
Page* GetSweptPageSafe(PagedSpaceBase* space);
|
||||
|
||||
private:
|
||||
NonAtomicMarkingState* marking_state() const { return marking_state_; }
|
||||
|
||||
private:
|
||||
void AddPageImpl(AllocationSpace space, Page* page, AddPageMode mode);
|
||||
|
||||
class ConcurrentSweeper;
|
||||
@ -189,7 +189,7 @@ class Sweeper {
|
||||
int NumberOfConcurrentSweepers() const;
|
||||
|
||||
Heap* const heap_;
|
||||
NonAtomicMarkingState* marking_state_;
|
||||
NonAtomicMarkingState* const marking_state_;
|
||||
std::unique_ptr<JobHandle> job_handle_;
|
||||
base::Mutex mutex_;
|
||||
base::ConditionVariable cv_page_swept_;
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "src/heap/factory.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/remembered-set-inl.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
@ -24,11 +25,7 @@ void CheckInvariantsOfAbortedPage(Page* page) {
|
||||
// 1) Markbits are cleared
|
||||
// 2) The page is not marked as evacuation candidate anymore
|
||||
// 3) The page is not marked as aborted compaction anymore.
|
||||
CHECK(page->heap()
|
||||
->mark_compact_collector()
|
||||
->non_atomic_marking_state()
|
||||
->bitmap(page)
|
||||
->IsClean());
|
||||
CHECK(page->heap()->non_atomic_marking_state()->bitmap(page)->IsClean());
|
||||
CHECK(!page->IsEvacuationCandidate());
|
||||
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "src/heap/concurrent-allocator-inl.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/local-heap-inl.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
#include "src/heap/safepoint.h"
|
||||
#include "src/objects/heap-number.h"
|
||||
@ -371,9 +372,9 @@ UNINITIALIZED_TEST(ConcurrentBlackAllocation) {
|
||||
HeapObject object = HeapObject::FromAddress(address);
|
||||
|
||||
if (i < kWhiteIterations * kObjectsAllocatedPerIteration) {
|
||||
CHECK(heap->incremental_marking()->marking_state()->IsWhite(object));
|
||||
CHECK(heap->marking_state()->IsWhite(object));
|
||||
} else {
|
||||
CHECK(heap->incremental_marking()->marking_state()->IsBlack(object));
|
||||
CHECK(heap->marking_state()->IsBlack(object));
|
||||
}
|
||||
}
|
||||
|
||||
@ -427,7 +428,7 @@ UNINITIALIZED_TEST(ConcurrentWriteBarrier) {
|
||||
}
|
||||
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
|
||||
i::GarbageCollectionReason::kTesting);
|
||||
CHECK(heap->incremental_marking()->marking_state()->IsWhite(value));
|
||||
CHECK(heap->marking_state()->IsWhite(value));
|
||||
|
||||
auto thread =
|
||||
std::make_unique<ConcurrentWriteBarrierThread>(heap, fixed_array, value);
|
||||
@ -435,7 +436,7 @@ UNINITIALIZED_TEST(ConcurrentWriteBarrier) {
|
||||
|
||||
thread->Join();
|
||||
|
||||
CHECK(heap->incremental_marking()->marking_state()->IsBlackOrGrey(value));
|
||||
CHECK(heap->marking_state()->IsBlackOrGrey(value));
|
||||
heap::InvokeMarkSweep(i_isolate);
|
||||
|
||||
isolate->Dispose();
|
||||
@ -513,7 +514,7 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
|
||||
}
|
||||
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
|
||||
i::GarbageCollectionReason::kTesting);
|
||||
CHECK(heap->incremental_marking()->marking_state()->IsWhite(value));
|
||||
CHECK(heap->marking_state()->IsWhite(value));
|
||||
|
||||
{
|
||||
// TODO(v8:13023): remove ResetPKUPermissionsForThreadSpawning in the
|
||||
@ -527,7 +528,7 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
|
||||
thread->Join();
|
||||
}
|
||||
|
||||
CHECK(heap->incremental_marking()->marking_state()->IsBlackOrGrey(value));
|
||||
CHECK(heap->marking_state()->IsBlackOrGrey(value));
|
||||
heap::InvokeMarkSweep(i_isolate);
|
||||
}
|
||||
isolate->Dispose();
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include "src/heap/large-spaces.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-barrier.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/memory-reducer.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
@ -2481,7 +2482,7 @@ TEST(InstanceOfStubWriteBarrier) {
|
||||
|
||||
CHECK(f->HasAttachedOptimizedCode());
|
||||
|
||||
MarkingState* marking_state = marking->marking_state();
|
||||
MarkingState* marking_state = CcTest::heap()->marking_state();
|
||||
|
||||
const double kStepSizeInMs = 100;
|
||||
while (!marking_state->IsBlack(f->code())) {
|
||||
@ -5756,7 +5757,7 @@ TEST(Regress598319) {
|
||||
|
||||
CHECK(heap->lo_space()->Contains(arr.get()));
|
||||
IncrementalMarking* marking = heap->incremental_marking();
|
||||
MarkingState* marking_state = marking->marking_state();
|
||||
MarkingState* marking_state = heap->marking_state();
|
||||
CHECK(marking_state->IsWhite(arr.get()));
|
||||
for (int i = 0; i < arr.get().length(); i++) {
|
||||
HeapObject arr_value = HeapObject::cast(arr.get().get(i));
|
||||
@ -5992,7 +5993,7 @@ TEST(LeftTrimFixedArrayInBlackArea) {
|
||||
Handle<FixedArray> array =
|
||||
isolate->factory()->NewFixedArray(50, AllocationType::kOld);
|
||||
CHECK(heap->old_space()->Contains(*array));
|
||||
MarkingState* marking_state = marking->marking_state();
|
||||
MarkingState* marking_state = heap->marking_state();
|
||||
CHECK(marking_state->IsBlack(*array));
|
||||
|
||||
// Now left trim the allocated black area. A filler has to be installed
|
||||
@ -6038,7 +6039,7 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
|
||||
Address start_address = array->address();
|
||||
Address end_address = start_address + array->Size();
|
||||
Page* page = Page::FromAddress(start_address);
|
||||
NonAtomicMarkingState* marking_state = marking->non_atomic_marking_state();
|
||||
NonAtomicMarkingState* marking_state = heap->non_atomic_marking_state();
|
||||
CHECK(marking_state->IsBlack(*array));
|
||||
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
|
||||
page->AddressToMarkbitIndex(start_address),
|
||||
@ -6103,13 +6104,12 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
|
||||
|
||||
// Allocate the fixed array that will be trimmed later.
|
||||
Handle<FixedArray> array =
|
||||
CcTest::i_isolate()->factory()->NewFixedArray(100, AllocationType::kOld);
|
||||
isolate->factory()->NewFixedArray(100, AllocationType::kOld);
|
||||
Address start_address = array->address();
|
||||
Address end_address = start_address + array->Size();
|
||||
Page* page = Page::FromAddress(start_address);
|
||||
NonAtomicMarkingState* marking_state = marking->non_atomic_marking_state();
|
||||
NonAtomicMarkingState* marking_state = heap->non_atomic_marking_state();
|
||||
CHECK(marking_state->IsBlack(*array));
|
||||
|
||||
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
|
||||
page->AddressToMarkbitIndex(start_address),
|
||||
page->AddressToMarkbitIndex(end_address)));
|
||||
|
@ -471,7 +471,7 @@ TEST(Regress5829) {
|
||||
heap->CreateFillerObjectAt(old_end - kTaggedSize, kTaggedSize);
|
||||
heap->old_space()->FreeLinearAllocationArea();
|
||||
Page* page = Page::FromAddress(array->address());
|
||||
MarkingState* marking_state = marking->marking_state();
|
||||
MarkingState* marking_state = heap->marking_state();
|
||||
for (auto object_and_size :
|
||||
LiveObjectRange<kGreyObjects>(page, marking_state->bitmap(page))) {
|
||||
CHECK(!object_and_size.first.IsFreeSpaceOrFiller());
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/objects/js-array-buffer-inl.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
@ -21,7 +22,7 @@ HEAP_TEST(WriteBarrier_Marking) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Factory* factory = isolate->factory();
|
||||
MarkCompactCollector* collector = isolate->heap()->mark_compact_collector();
|
||||
Heap* heap = isolate->heap();
|
||||
HandleScope outer(isolate);
|
||||
Handle<FixedArray> objects = factory->NewFixedArray(3);
|
||||
v8::Global<Value> global_objects(CcTest::isolate(), Utils::ToLocal(objects));
|
||||
@ -40,20 +41,19 @@ HEAP_TEST(WriteBarrier_Marking) {
|
||||
FixedArray host = FixedArray::cast(objects->get(0));
|
||||
HeapObject value1 = HeapObject::cast(objects->get(1));
|
||||
HeapObject value2 = HeapObject::cast(objects->get(2));
|
||||
CHECK(collector->marking_state()->IsWhite(host));
|
||||
CHECK(collector->marking_state()->IsWhite(value1));
|
||||
CHECK(heap->marking_state()->IsWhite(host));
|
||||
CHECK(heap->marking_state()->IsWhite(value1));
|
||||
WriteBarrier::Marking(host, host.RawFieldOfElementAt(0), value1);
|
||||
CHECK_EQ(V8_CONCURRENT_MARKING_BOOL,
|
||||
collector->marking_state()->IsGrey(value1));
|
||||
collector->marking_state()->WhiteToGrey(host);
|
||||
collector->marking_state()->GreyToBlack(host);
|
||||
CHECK(collector->marking_state()->IsWhite(value2));
|
||||
CHECK_EQ(V8_CONCURRENT_MARKING_BOOL, heap->marking_state()->IsGrey(value1));
|
||||
heap->marking_state()->WhiteToGrey(host);
|
||||
heap->marking_state()->GreyToBlack(host);
|
||||
CHECK(heap->marking_state()->IsWhite(value2));
|
||||
WriteBarrier::Marking(host, host.RawFieldOfElementAt(0), value2);
|
||||
CHECK(collector->marking_state()->IsGrey(value2));
|
||||
CHECK(heap->marking_state()->IsGrey(value2));
|
||||
heap::SimulateIncrementalMarking(CcTest::heap(), true);
|
||||
CHECK(collector->marking_state()->IsBlack(host));
|
||||
CHECK(collector->marking_state()->IsBlack(value1));
|
||||
CHECK(collector->marking_state()->IsBlack(value2));
|
||||
CHECK(heap->marking_state()->IsBlack(host));
|
||||
CHECK(heap->marking_state()->IsBlack(value1));
|
||||
CHECK(heap->marking_state()->IsBlack(value2));
|
||||
}
|
||||
|
||||
HEAP_TEST(WriteBarrier_MarkingExtension) {
|
||||
@ -62,7 +62,7 @@ HEAP_TEST(WriteBarrier_MarkingExtension) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Factory* factory = isolate->factory();
|
||||
MarkCompactCollector* collector = isolate->heap()->mark_compact_collector();
|
||||
Heap* heap = isolate->heap();
|
||||
HandleScope outer(isolate);
|
||||
Handle<FixedArray> objects = factory->NewFixedArray(1);
|
||||
ArrayBufferExtension* extension;
|
||||
@ -75,7 +75,7 @@ HEAP_TEST(WriteBarrier_MarkingExtension) {
|
||||
}
|
||||
heap::SimulateIncrementalMarking(CcTest::heap(), false);
|
||||
JSArrayBuffer host = JSArrayBuffer::cast(objects->get(0));
|
||||
CHECK(collector->marking_state()->IsWhite(host));
|
||||
CHECK(heap->marking_state()->IsWhite(host));
|
||||
CHECK(!extension->IsMarked());
|
||||
WriteBarrier::Marking(host, extension);
|
||||
// Concurrent marking barrier should mark this object.
|
||||
@ -84,7 +84,7 @@ HEAP_TEST(WriteBarrier_MarkingExtension) {
|
||||
v8::Global<ArrayBuffer> global_host(CcTest::isolate(),
|
||||
Utils::ToLocal(handle(host, isolate)));
|
||||
heap::SimulateIncrementalMarking(CcTest::heap(), true);
|
||||
CHECK(collector->marking_state()->IsBlack(host));
|
||||
CHECK(heap->marking_state()->IsBlack(host));
|
||||
CHECK(extension->IsMarked());
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "src/api/api-inl.h"
|
||||
#include "src/handles/global-handles.h"
|
||||
#include "src/heap/cppgc/visitor.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "test/unittests/heap/heap-utils.h"
|
||||
#include "test/unittests/test-utils.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "include/v8-object.h"
|
||||
#include "src/handles/handles-inl.h"
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/remembered-set.h"
|
||||
#include "src/heap/safepoint.h"
|
||||
@ -396,7 +397,7 @@ TEST_F(HeapTest, Regress978156) {
|
||||
marking->Start(GarbageCollector::MARK_COMPACTOR,
|
||||
i::GarbageCollectionReason::kTesting);
|
||||
}
|
||||
MarkingState* marking_state = marking->marking_state();
|
||||
MarkingState* marking_state = heap->marking_state();
|
||||
// 6. Mark the filler black to access its two markbits. This triggers
|
||||
// an out-of-bounds access of the marking bitmap in a bad case.
|
||||
marking_state->WhiteToGrey(filler);
|
||||
|
@ -3,6 +3,7 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/marking-state-inl.h"
|
||||
#include "test/unittests/heap/heap-utils.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -64,8 +65,7 @@ TEST_F(PagePromotionTest, PagePromotion_NewToOld) {
|
||||
const int threshold_bytes = static_cast<int>(
|
||||
v8_flags.page_promotion_threshold *
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage() / 100);
|
||||
CHECK_GE(heap->incremental_marking()->marking_state()->live_bytes(
|
||||
to_be_promoted_page),
|
||||
CHECK_GE(heap->marking_state()->live_bytes(to_be_promoted_page),
|
||||
threshold_bytes);
|
||||
|
||||
// Actual checks: The page is in new space first, but is moved to old space
|
||||
|
Loading…
Reference in New Issue
Block a user