[heap] Remove marking deque overflow handling
Removes - SequentialMarkingDeque - The ability to handle marking deque overflow - BlackToGrey transitions We switched to a different marking work list on M61 that fails in OOM upon failing to allocate Segments used in the work list. Bug: chromium:758570 Change-Id: I66e2ab912271bf84b085dccc9b4bdd96076b64fb Reviewed-on: https://chromium-review.googlesource.com/632676 Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#48078}
This commit is contained in:
parent
59e4b75187
commit
4e5db9a6c8
2
BUILD.gn
2
BUILD.gn
@ -1649,8 +1649,6 @@ v8_source_set("v8_base") {
|
||||
"src/heap/scavenger-inl.h",
|
||||
"src/heap/scavenger.cc",
|
||||
"src/heap/scavenger.h",
|
||||
"src/heap/sequential-marking-deque.cc",
|
||||
"src/heap/sequential-marking-deque.h",
|
||||
"src/heap/slot-set.h",
|
||||
"src/heap/spaces-inl.h",
|
||||
"src/heap/spaces.cc",
|
||||
|
@ -1729,8 +1729,6 @@ void Heap::MarkCompactEpilogue() {
|
||||
|
||||
PreprocessStackTraces();
|
||||
DCHECK(incremental_marking()->IsStopped());
|
||||
|
||||
mark_compact_collector()->marking_worklist()->StopUsing();
|
||||
}
|
||||
|
||||
|
||||
|
@ -122,8 +122,8 @@ void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
|
||||
if (marking_state()->GreyToBlack(obj)) {
|
||||
if (FLAG_concurrent_marking) {
|
||||
marking_worklist()->PushBailout(obj);
|
||||
} else if (!marking_worklist()->Push(obj)) {
|
||||
non_atomic_marking_state()->BlackToGrey(obj);
|
||||
} else {
|
||||
marking_worklist()->Push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -217,24 +217,15 @@ class IncrementalMarkingMarkingVisitor final
|
||||
if (FLAG_concurrent_marking) {
|
||||
incremental_marking_->marking_worklist()->PushBailout(object);
|
||||
} else {
|
||||
if (incremental_marking_->marking_state()->IsGrey(object)) {
|
||||
incremental_marking_->marking_worklist()->Push(object);
|
||||
} else {
|
||||
DCHECK(incremental_marking_->marking_state()->IsBlack(object));
|
||||
collector_->PushBlack(object);
|
||||
}
|
||||
incremental_marking_->marking_worklist()->Push(object);
|
||||
}
|
||||
int end_offset =
|
||||
Min(object_size, start_offset + kProgressBarScanningChunk);
|
||||
int already_scanned_offset = start_offset;
|
||||
bool scan_until_end = false;
|
||||
do {
|
||||
VisitPointers(object, HeapObject::RawField(object, start_offset),
|
||||
HeapObject::RawField(object, end_offset));
|
||||
start_offset = end_offset;
|
||||
end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
|
||||
scan_until_end = incremental_marking_->marking_worklist()->IsFull();
|
||||
} while (scan_until_end && start_offset < object_size);
|
||||
VisitPointers(object, HeapObject::RawField(object, start_offset),
|
||||
HeapObject::RawField(object, end_offset));
|
||||
start_offset = end_offset;
|
||||
end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
|
||||
chunk->set_progress_bar(start_offset);
|
||||
if (start_offset < object_size) {
|
||||
incremental_marking_->NotifyIncompleteScanOfObject(
|
||||
@ -543,8 +534,6 @@ void IncrementalMarking::StartMarking() {
|
||||
|
||||
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
|
||||
|
||||
marking_worklist()->StartUsing();
|
||||
|
||||
ActivateIncrementalWriteBarrier();
|
||||
|
||||
// Marking bits are cleared by the sweeper.
|
||||
|
@ -12,16 +12,9 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void MarkCompactCollector::PushBlack(HeapObject* obj) {
|
||||
DCHECK(non_atomic_marking_state()->IsBlack(obj));
|
||||
if (!marking_worklist()->Push(obj)) {
|
||||
non_atomic_marking_state()->BlackToGrey(obj);
|
||||
}
|
||||
}
|
||||
|
||||
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
|
||||
if (non_atomic_marking_state()->WhiteToBlack(obj)) {
|
||||
PushBlack(obj);
|
||||
marking_worklist()->Push(obj);
|
||||
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
|
||||
heap_->AddRetainer(host, obj);
|
||||
}
|
||||
@ -30,7 +23,7 @@ void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
|
||||
|
||||
void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
|
||||
if (non_atomic_marking_state()->WhiteToBlack(obj)) {
|
||||
PushBlack(obj);
|
||||
marking_worklist()->Push(obj);
|
||||
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
|
||||
heap_->AddRetainingRoot(root, obj);
|
||||
}
|
||||
@ -39,7 +32,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
|
||||
|
||||
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
|
||||
if (non_atomic_marking_state()->WhiteToBlack(obj)) {
|
||||
PushBlack(obj);
|
||||
marking_worklist()->Push(obj);
|
||||
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
|
||||
heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
|
||||
}
|
||||
|
@ -463,7 +463,6 @@ void MarkCompactCollector::SetUp() {
|
||||
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
|
||||
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
|
||||
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
|
||||
marking_worklist()->SetUp();
|
||||
}
|
||||
|
||||
void MinorMarkCompactCollector::SetUp() {}
|
||||
@ -1372,39 +1371,6 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
|
||||
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
|
||||
};
|
||||
|
||||
|
||||
// Fill the marking stack with overflowed objects returned by the given
|
||||
// iterator. Stop when the marking stack is filled or the end of the space
|
||||
// is reached, whichever comes first.
|
||||
template <class T>
|
||||
void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
|
||||
// The caller should ensure that the marking stack is initially not full,
|
||||
// so that we don't waste effort pointlessly scanning for objects.
|
||||
DCHECK(!marking_worklist()->IsFull());
|
||||
|
||||
Map* filler_map = heap()->one_pointer_filler_map();
|
||||
for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
|
||||
if ((object->map() != filler_map) &&
|
||||
non_atomic_marking_state()->GreyToBlack(object)) {
|
||||
PushBlack(object);
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
|
||||
DCHECK(!marking_worklist()->IsFull());
|
||||
for (auto object_and_size : LiveObjectRange<kGreyObjects>(
|
||||
p, non_atomic_marking_state()->bitmap(p))) {
|
||||
HeapObject* const object = object_and_size.first;
|
||||
bool success = non_atomic_marking_state()->GreyToBlack(object);
|
||||
DCHECK(success);
|
||||
USE(success);
|
||||
PushBlack(object);
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
}
|
||||
}
|
||||
|
||||
class RecordMigratedSlotVisitor : public ObjectVisitor {
|
||||
public:
|
||||
explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
|
||||
@ -1813,23 +1779,6 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
|
||||
Heap* heap_;
|
||||
};
|
||||
|
||||
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
|
||||
for (Page* p : *space) {
|
||||
DiscoverGreyObjectsOnPage(p);
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
|
||||
NewSpace* space = heap()->new_space();
|
||||
for (Page* page : PageRange(space->bottom(), space->top())) {
|
||||
DiscoverGreyObjectsOnPage(page);
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
|
||||
Object* o = *p;
|
||||
if (!o->IsHeapObject()) return false;
|
||||
@ -1860,12 +1809,6 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
|
||||
// Custom marking for string table and top optimized frame.
|
||||
MarkStringTable(custom_root_body_visitor);
|
||||
ProcessTopOptimizedFrame(custom_root_body_visitor);
|
||||
|
||||
// There may be overflowed objects in the heap. Visit them now.
|
||||
while (marking_worklist()->overflowed()) {
|
||||
RefillMarkingWorklist();
|
||||
EmptyMarkingWorklist();
|
||||
}
|
||||
}
|
||||
|
||||
// Mark all objects reachable from the objects on the marking stack.
|
||||
@ -1888,42 +1831,12 @@ void MarkCompactCollector::EmptyMarkingWorklist() {
|
||||
DCHECK(marking_worklist()->IsEmpty());
|
||||
}
|
||||
|
||||
|
||||
// Sweep the heap for overflowed objects, clear their overflow bits, and
|
||||
// push them on the marking stack. Stop early if the marking stack fills
|
||||
// before sweeping completes. If sweeping completes, there are no remaining
|
||||
// overflowed objects in the heap so the overflow flag on the markings stack
|
||||
// is cleared.
|
||||
void MarkCompactCollector::RefillMarkingWorklist() {
|
||||
isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
|
||||
DCHECK(marking_worklist()->overflowed());
|
||||
|
||||
DiscoverGreyObjectsInNewSpace();
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
|
||||
DiscoverGreyObjectsInSpace(heap()->old_space());
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
DiscoverGreyObjectsInSpace(heap()->code_space());
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
DiscoverGreyObjectsInSpace(heap()->map_space());
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
LargeObjectIterator lo_it(heap()->lo_space());
|
||||
DiscoverGreyObjectsWithIterator(&lo_it);
|
||||
if (marking_worklist()->IsFull()) return;
|
||||
|
||||
marking_worklist()->ClearOverflowed();
|
||||
}
|
||||
|
||||
// Mark all objects reachable (transitively) from objects on the marking
|
||||
// stack. Before: the marking stack contains zero or more heap object
|
||||
// pointers. After: the marking stack is empty and there are no overflowed
|
||||
// objects in the heap.
|
||||
void MarkCompactCollector::ProcessMarkingWorklist() {
|
||||
EmptyMarkingWorklist();
|
||||
while (marking_worklist()->overflowed()) {
|
||||
RefillMarkingWorklist();
|
||||
EmptyMarkingWorklist();
|
||||
}
|
||||
DCHECK(marking_worklist()->IsEmpty());
|
||||
}
|
||||
|
||||
@ -1931,7 +1844,7 @@ void MarkCompactCollector::ProcessMarkingWorklist() {
|
||||
// stack including references only considered in the atomic marking pause.
|
||||
void MarkCompactCollector::ProcessEphemeralMarking(
|
||||
bool only_process_harmony_weak_collections) {
|
||||
DCHECK(marking_worklist()->IsEmpty() && !marking_worklist()->overflowed());
|
||||
DCHECK(marking_worklist()->IsEmpty());
|
||||
bool work_to_do = true;
|
||||
while (work_to_do) {
|
||||
if (!only_process_harmony_weak_collections) {
|
||||
@ -2634,8 +2547,6 @@ void MarkCompactCollector::MarkLiveObjects() {
|
||||
state_ = MARK_LIVE_OBJECTS;
|
||||
#endif
|
||||
|
||||
marking_worklist()->StartUsing();
|
||||
|
||||
heap_->local_embedder_heap_tracer()->EnterFinalPause();
|
||||
|
||||
RootMarkingVisitor root_visitor(this);
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <vector>
|
||||
|
||||
#include "src/heap/marking.h"
|
||||
#include "src/heap/sequential-marking-deque.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/heap/worklist.h"
|
||||
|
||||
@ -62,14 +61,6 @@ class MarkingStateBase {
|
||||
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
|
||||
V8_INLINE bool BlackToGrey(HeapObject* obj) {
|
||||
MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
|
||||
MarkBit markbit = MarkBitFrom(p, obj->address());
|
||||
if (!Marking::BlackToGrey<access_mode>(markbit)) return false;
|
||||
static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, -obj->Size());
|
||||
return true;
|
||||
}
|
||||
|
||||
V8_INLINE bool WhiteToGrey(HeapObject* obj) {
|
||||
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
|
||||
}
|
||||
@ -496,10 +487,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
// The heap parameter is not used but needed to match the sequential case.
|
||||
explicit MarkingWorklist(Heap* heap) {}
|
||||
|
||||
bool Push(HeapObject* object) { return shared_.Push(kMainThread, object); }
|
||||
void Push(HeapObject* object) {
|
||||
bool success = shared_.Push(kMainThread, object);
|
||||
USE(success);
|
||||
DCHECK(success);
|
||||
}
|
||||
|
||||
bool PushBailout(HeapObject* object) {
|
||||
return bailout_.Push(kMainThread, object);
|
||||
void PushBailout(HeapObject* object) {
|
||||
bool success = bailout_.Push(kMainThread, object);
|
||||
USE(success);
|
||||
DCHECK(success);
|
||||
}
|
||||
|
||||
HeapObject* Pop() {
|
||||
@ -516,8 +513,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
shared_.Clear();
|
||||
}
|
||||
|
||||
bool IsFull() { return false; }
|
||||
|
||||
bool IsEmpty() {
|
||||
return bailout_.IsLocalEmpty(kMainThread) &&
|
||||
shared_.IsLocalEmpty(kMainThread) &&
|
||||
@ -542,15 +537,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
ConcurrentMarkingWorklist* shared() { return &shared_; }
|
||||
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
|
||||
|
||||
// These empty functions are needed to match the interface
|
||||
// of the sequential marking deque.
|
||||
void SetUp() {}
|
||||
void TearDown() { Clear(); }
|
||||
void StartUsing() {}
|
||||
void StopUsing() {}
|
||||
void ClearOverflowed() {}
|
||||
void SetOverflowed() {}
|
||||
bool overflowed() const { return false; }
|
||||
|
||||
void Print() {
|
||||
PrintWorklist("shared", &shared_);
|
||||
@ -772,9 +759,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
|
||||
void MarkLiveObjects() override;
|
||||
|
||||
// Pushes a black object onto the marking work list.
|
||||
V8_INLINE void PushBlack(HeapObject* obj);
|
||||
|
||||
// Marks the object black and adds it to the marking work list.
|
||||
// This is for non-incremental marking only.
|
||||
V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
|
||||
@ -816,19 +800,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
// in the heap, in which case the marking stack's overflow flag will be set.
|
||||
void EmptyMarkingWorklist() override;
|
||||
|
||||
// Refill the marking stack with overflowed objects from the heap. This
|
||||
// function either leaves the marking stack full or clears the overflow
|
||||
// flag on the marking stack.
|
||||
void RefillMarkingWorklist();
|
||||
|
||||
// Helper methods for refilling the marking stack by discovering grey objects
|
||||
// on various pages of the heap. Used by {RefillMarkingWorklist} only.
|
||||
template <class T>
|
||||
void DiscoverGreyObjectsWithIterator(T* it);
|
||||
void DiscoverGreyObjectsOnPage(MemoryChunk* p);
|
||||
void DiscoverGreyObjectsInSpace(PagedSpace* space);
|
||||
void DiscoverGreyObjectsInNewSpace();
|
||||
|
||||
// Callback function for telling whether the object *p is an unmarked
|
||||
// heap object.
|
||||
static bool IsUnmarkedHeapObject(Object** p);
|
||||
|
@ -266,13 +266,6 @@ class Marking : public AllStatic {
|
||||
markbit.Next().Set<mode>();
|
||||
}
|
||||
|
||||
template <AccessMode mode = AccessMode::NON_ATOMIC>
|
||||
INLINE(static bool BlackToGrey(MarkBit markbit)) {
|
||||
STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
|
||||
DCHECK(IsBlack(markbit));
|
||||
return markbit.Next().Clear<mode>();
|
||||
}
|
||||
|
||||
template <AccessMode mode = AccessMode::NON_ATOMIC>
|
||||
INLINE(static bool WhiteToGrey(MarkBit markbit)) {
|
||||
return markbit.Set<mode>();
|
||||
|
@ -1,100 +0,0 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/sequential-marking-deque.h"
|
||||
|
||||
#include "src/allocation.h"
|
||||
#include "src/base/bits.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/heap.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void SequentialMarkingDeque::SetUp() {
|
||||
base::VirtualMemory reservation;
|
||||
if (!AllocVirtualMemory(kMaxSize, heap_->GetRandomMmapAddr(), &reservation)) {
|
||||
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
|
||||
}
|
||||
backing_store_committed_size_ = 0;
|
||||
backing_store_.TakeControl(&reservation);
|
||||
}
|
||||
|
||||
void SequentialMarkingDeque::TearDown() {
|
||||
if (backing_store_.IsReserved()) backing_store_.Release();
|
||||
}
|
||||
|
||||
void SequentialMarkingDeque::StartUsing() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
if (in_use_) {
|
||||
// This can happen in mark-compact GC if the incremental marker already
|
||||
// started using the marking deque.
|
||||
return;
|
||||
}
|
||||
in_use_ = true;
|
||||
EnsureCommitted();
|
||||
array_ = reinterpret_cast<HeapObject**>(backing_store_.address());
|
||||
size_t size = FLAG_force_marking_deque_overflows
|
||||
? 64 * kPointerSize
|
||||
: backing_store_committed_size_;
|
||||
DCHECK(base::bits::IsPowerOfTwo(static_cast<uint32_t>(size / kPointerSize)));
|
||||
mask_ = static_cast<int>((size / kPointerSize) - 1);
|
||||
top_ = bottom_ = 0;
|
||||
overflowed_ = false;
|
||||
}
|
||||
|
||||
void SequentialMarkingDeque::StopUsing() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
if (!in_use_) return;
|
||||
DCHECK(IsEmpty());
|
||||
DCHECK(!overflowed_);
|
||||
top_ = bottom_ = mask_ = 0;
|
||||
in_use_ = false;
|
||||
if (FLAG_concurrent_sweeping) {
|
||||
StartUncommitTask();
|
||||
} else {
|
||||
Uncommit();
|
||||
}
|
||||
}
|
||||
|
||||
void SequentialMarkingDeque::Clear() {
|
||||
DCHECK(in_use_);
|
||||
top_ = bottom_ = 0;
|
||||
overflowed_ = false;
|
||||
}
|
||||
|
||||
void SequentialMarkingDeque::Uncommit() {
|
||||
DCHECK(!in_use_);
|
||||
bool success = backing_store_.Uncommit(backing_store_.address(),
|
||||
backing_store_committed_size_);
|
||||
backing_store_committed_size_ = 0;
|
||||
CHECK(success);
|
||||
}
|
||||
|
||||
void SequentialMarkingDeque::EnsureCommitted() {
|
||||
DCHECK(in_use_);
|
||||
if (backing_store_committed_size_ > 0) return;
|
||||
|
||||
for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
|
||||
if (backing_store_.Commit(backing_store_.address(), size, false)) {
|
||||
backing_store_committed_size_ = size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (backing_store_committed_size_ == 0) {
|
||||
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::EnsureCommitted");
|
||||
}
|
||||
}
|
||||
|
||||
void SequentialMarkingDeque::StartUncommitTask() {
|
||||
if (!uncommit_task_pending_) {
|
||||
uncommit_task_pending_ = true;
|
||||
UncommitTask* task = new UncommitTask(heap_->isolate(), this);
|
||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
||||
task, v8::Platform::kShortRunningTask);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -1,156 +0,0 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
|
||||
#define V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
|
||||
|
||||
#include <deque>
|
||||
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/cancelable-task.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Heap;
|
||||
class Isolate;
|
||||
class HeapObject;
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Marking deque for tracing live objects.
|
||||
class SequentialMarkingDeque {
|
||||
public:
|
||||
explicit SequentialMarkingDeque(Heap* heap)
|
||||
: backing_store_committed_size_(0),
|
||||
array_(nullptr),
|
||||
top_(0),
|
||||
bottom_(0),
|
||||
mask_(0),
|
||||
overflowed_(false),
|
||||
in_use_(false),
|
||||
uncommit_task_pending_(false),
|
||||
heap_(heap) {}
|
||||
|
||||
void SetUp();
|
||||
void TearDown();
|
||||
|
||||
// Ensures that the marking deque is committed and will stay committed until
|
||||
// StopUsing() is called.
|
||||
void StartUsing();
|
||||
void StopUsing();
|
||||
void Clear();
|
||||
|
||||
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
|
||||
|
||||
inline bool IsEmpty() { return top_ == bottom_; }
|
||||
|
||||
int Size() {
|
||||
// Return (top - bottom + capacity) % capacity, where capacity = mask + 1.
|
||||
return (top_ - bottom_ + mask_ + 1) & mask_;
|
||||
}
|
||||
|
||||
bool overflowed() const { return overflowed_; }
|
||||
|
||||
void ClearOverflowed() { overflowed_ = false; }
|
||||
|
||||
void SetOverflowed() { overflowed_ = true; }
|
||||
|
||||
// Push the object on the marking stack if there is room, otherwise mark the
|
||||
// deque as overflowed and wait for a rescan of the heap.
|
||||
INLINE(bool Push(HeapObject* object)) {
|
||||
if (IsFull()) {
|
||||
SetOverflowed();
|
||||
return false;
|
||||
} else {
|
||||
array_[top_] = object;
|
||||
top_ = ((top_ + 1) & mask_);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
INLINE(HeapObject* Pop()) {
|
||||
if (IsEmpty()) return nullptr;
|
||||
top_ = ((top_ - 1) & mask_);
|
||||
HeapObject* object = array_[top_];
|
||||
return object;
|
||||
}
|
||||
|
||||
// Calls the specified callback on each element of the deque and replaces
|
||||
// the element with the result of the callback. If the callback returns
|
||||
// nullptr then the element is removed from the deque.
|
||||
// The callback must accept HeapObject* and return HeapObject*.
|
||||
template <typename Callback>
|
||||
void Update(Callback callback) {
|
||||
int i = bottom_;
|
||||
int new_top = bottom_;
|
||||
while (i != top_) {
|
||||
if (callback(array_[i], &array_[new_top])) {
|
||||
new_top = (new_top + 1) & mask_;
|
||||
}
|
||||
i = (i + 1) & mask_;
|
||||
}
|
||||
top_ = new_top;
|
||||
}
|
||||
|
||||
private:
|
||||
// This task uncommits the marking_deque backing store if
|
||||
// markin_deque->in_use_ is false.
|
||||
class UncommitTask : public CancelableTask {
|
||||
public:
|
||||
explicit UncommitTask(Isolate* isolate,
|
||||
SequentialMarkingDeque* marking_deque)
|
||||
: CancelableTask(isolate), marking_deque_(marking_deque) {}
|
||||
|
||||
private:
|
||||
// CancelableTask override.
|
||||
void RunInternal() override {
|
||||
base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
|
||||
if (!marking_deque_->in_use_) {
|
||||
marking_deque_->Uncommit();
|
||||
}
|
||||
marking_deque_->uncommit_task_pending_ = false;
|
||||
}
|
||||
|
||||
SequentialMarkingDeque* marking_deque_;
|
||||
DISALLOW_COPY_AND_ASSIGN(UncommitTask);
|
||||
};
|
||||
|
||||
static const size_t kMaxSize = 4 * MB;
|
||||
static const size_t kMinSize = 256 * KB;
|
||||
|
||||
// Must be called with mutex lock.
|
||||
void EnsureCommitted();
|
||||
|
||||
// Must be called with mutex lock.
|
||||
void Uncommit();
|
||||
|
||||
// Must be called with mutex lock.
|
||||
void StartUncommitTask();
|
||||
|
||||
base::Mutex mutex_;
|
||||
|
||||
base::VirtualMemory backing_store_;
|
||||
size_t backing_store_committed_size_;
|
||||
HeapObject** array_;
|
||||
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
|
||||
// empty when top_ == bottom_. It is full when top_ + 1 == bottom
|
||||
// (mod mask + 1).
|
||||
int top_;
|
||||
int bottom_;
|
||||
int mask_;
|
||||
bool overflowed_;
|
||||
// in_use_ == true after taking mutex lock implies that the marking deque is
|
||||
// committed and will stay committed at least until in_use_ == false.
|
||||
bool in_use_;
|
||||
bool uncommit_task_pending_;
|
||||
Heap* heap_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(SequentialMarkingDeque);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_SEQUENTIAL_MARKING_DEQUE_
|
@ -1037,8 +1037,6 @@
|
||||
'heap/scavenger-inl.h',
|
||||
'heap/scavenger.cc',
|
||||
'heap/scavenger.h',
|
||||
'heap/sequential-marking-deque.cc',
|
||||
'heap/sequential-marking-deque.h',
|
||||
'heap/slot-set.h',
|
||||
'heap/spaces-inl.h',
|
||||
'heap/spaces.cc',
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "src/global-handles.h"
|
||||
#include "src/heap/mark-compact-inl.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/sequential-marking-deque.h"
|
||||
#include "src/objects-inl.h"
|
||||
#include "test/cctest/cctest.h"
|
||||
#include "test/cctest/heap/heap-tester.h"
|
||||
@ -52,30 +51,6 @@ namespace v8 {
|
||||
namespace internal {
|
||||
namespace heap {
|
||||
|
||||
TEST(SequentialMarkingDeque) {
|
||||
CcTest::InitializeVM();
|
||||
SequentialMarkingDeque s(CcTest::i_isolate()->heap());
|
||||
s.SetUp();
|
||||
s.StartUsing();
|
||||
Address original_address = reinterpret_cast<Address>(&s);
|
||||
Address current_address = original_address;
|
||||
while (!s.IsFull()) {
|
||||
s.Push(HeapObject::FromAddress(current_address));
|
||||
current_address += kPointerSize;
|
||||
}
|
||||
|
||||
while (!s.IsEmpty()) {
|
||||
Address value = s.Pop()->address();
|
||||
current_address -= kPointerSize;
|
||||
CHECK_EQ(current_address, value);
|
||||
}
|
||||
|
||||
CHECK_EQ(original_address, current_address);
|
||||
s.StopUsing();
|
||||
CcTest::i_isolate()->cancelable_task_manager()->CancelAndWait();
|
||||
s.TearDown();
|
||||
}
|
||||
|
||||
TEST(Promotion) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
|
@ -32,7 +32,7 @@ TEST(Marking, TransitionWhiteBlackWhite) {
|
||||
free(bitmap);
|
||||
}
|
||||
|
||||
TEST(Marking, TransitionWhiteGreyBlackGrey) {
|
||||
TEST(Marking, TransitionWhiteGreyBlack) {
|
||||
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
|
||||
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
|
||||
const int kLocationsSize = 3;
|
||||
@ -51,10 +51,6 @@ TEST(Marking, TransitionWhiteGreyBlackGrey) {
|
||||
CHECK(Marking::IsBlack(mark_bit));
|
||||
CHECK(Marking::IsBlackOrGrey(mark_bit));
|
||||
CHECK(!Marking::IsImpossible(mark_bit));
|
||||
Marking::BlackToGrey(mark_bit);
|
||||
CHECK(Marking::IsGrey(mark_bit));
|
||||
CHECK(Marking::IsBlackOrGrey(mark_bit));
|
||||
CHECK(!Marking::IsImpossible(mark_bit));
|
||||
Marking::MarkWhite(mark_bit);
|
||||
CHECK(Marking::IsWhite(mark_bit));
|
||||
CHECK(!Marking::IsImpossible(mark_bit));
|
||||
|
Loading…
Reference in New Issue
Block a user