diff --git a/BUILD.gn b/BUILD.gn index 6d060be0f8..cb37b7abec 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1601,6 +1601,8 @@ v8_source_set("v8_base") { "src/heap/scavenger-inl.h", "src/heap/scavenger.cc", "src/heap/scavenger.h", + "src/heap/sequential-marking-deque.cc", + "src/heap/sequential-marking-deque.h", "src/heap/slot-set.h", "src/heap/spaces-inl.h", "src/heap/spaces.cc", diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 6fae209336..f6d98bed55 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -2280,90 +2280,6 @@ void MarkCompactCollector::ProcessTopOptimizedFrame( } } -void MarkingDeque::SetUp() { - backing_store_ = new base::VirtualMemory(kMaxSize); - backing_store_committed_size_ = 0; - if (backing_store_ == nullptr) { - V8::FatalProcessOutOfMemory("MarkingDeque::SetUp"); - } -} - -void MarkingDeque::TearDown() { - delete backing_store_; -} - -void MarkingDeque::StartUsing() { - base::LockGuard guard(&mutex_); - if (in_use_) { - // This can happen in mark-compact GC if the incremental marker already - // started using the marking deque. - return; - } - in_use_ = true; - EnsureCommitted(); - array_ = reinterpret_cast(backing_store_->address()); - size_t size = FLAG_force_marking_deque_overflows - ? 64 * kPointerSize - : backing_store_committed_size_; - DCHECK( - base::bits::IsPowerOfTwo32(static_cast(size / kPointerSize))); - mask_ = static_cast((size / kPointerSize) - 1); - top_ = bottom_ = 0; - overflowed_ = false; -} - -void MarkingDeque::StopUsing() { - base::LockGuard guard(&mutex_); - if (!in_use_) return; - DCHECK(IsEmpty()); - DCHECK(!overflowed_); - top_ = bottom_ = mask_ = 0; - in_use_ = false; - if (FLAG_concurrent_sweeping) { - StartUncommitTask(); - } else { - Uncommit(); - } -} - -void MarkingDeque::Clear() { - DCHECK(in_use_); - top_ = bottom_ = 0; - overflowed_ = false; -} - -void MarkingDeque::Uncommit() { - DCHECK(!in_use_); - bool success = backing_store_->Uncommit(backing_store_->address(), - backing_store_committed_size_); - backing_store_committed_size_ = 0; - CHECK(success); -} - -void MarkingDeque::EnsureCommitted() { - DCHECK(in_use_); - if (backing_store_committed_size_ > 0) return; - - for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { - if (backing_store_->Commit(backing_store_->address(), size, false)) { - backing_store_committed_size_ = size; - break; - } - } - if (backing_store_committed_size_ == 0) { - V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted"); - } -} - -void MarkingDeque::StartUncommitTask() { - if (!uncommit_task_pending_) { - uncommit_task_pending_ = true; - UncommitTask* task = new UncommitTask(heap_->isolate(), this); - V8::GetCurrentPlatform()->CallOnBackgroundThread( - task, v8::Platform::kShortRunningTask); - } -} - class ObjectStatsVisitor : public HeapObjectVisitor { public: ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index 640a7f9dff..c63c0b0a66 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -11,6 +11,7 @@ #include "src/base/platform/condition-variable.h" #include "src/cancelable-task.h" #include "src/heap/marking.h" +#include "src/heap/sequential-marking-deque.h" #include "src/heap/spaces.h" #include "src/heap/store-buffer.h" @@ -30,6 +31,8 @@ class PageParallelJob; class RecordMigratedSlotVisitor; class ThreadLocalTop; +using MarkingDeque = SequentialMarkingDeque; + class ObjectMarking : public AllStatic { public: V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj, @@ -113,148 +116,6 @@ class ObjectMarking : public AllStatic { DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking); }; -// ---------------------------------------------------------------------------- -// Marking deque for tracing live objects. -class MarkingDeque { - public: - explicit MarkingDeque(Heap* heap) - : backing_store_(nullptr), - backing_store_committed_size_(0), - array_(nullptr), - top_(0), - bottom_(0), - mask_(0), - overflowed_(false), - in_use_(false), - uncommit_task_pending_(false), - heap_(heap) {} - - void SetUp(); - void TearDown(); - - // Ensures that the marking deque is committed and will stay committed until - // StopUsing() is called. - void StartUsing(); - void StopUsing(); - void Clear(); - - inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } - - inline bool IsEmpty() { return top_ == bottom_; } - - bool overflowed() const { return overflowed_; } - - void ClearOverflowed() { overflowed_ = false; } - - void SetOverflowed() { overflowed_ = true; } - - // Push the object on the marking stack if there is room, otherwise mark the - // deque as overflowed and wait for a rescan of the heap. - INLINE(bool Push(HeapObject* object)) { - DCHECK(object->IsHeapObject()); - if (IsFull()) { - SetOverflowed(); - return false; - } else { - array_[top_] = object; - top_ = ((top_ + 1) & mask_); - return true; - } - } - - INLINE(HeapObject* Pop()) { - DCHECK(!IsEmpty()); - top_ = ((top_ - 1) & mask_); - HeapObject* object = array_[top_]; - DCHECK(object->IsHeapObject()); - return object; - } - - // Unshift the object into the marking stack if there is room, otherwise mark - // the deque as overflowed and wait for a rescan of the heap. - INLINE(bool Unshift(HeapObject* object)) { - DCHECK(object->IsHeapObject()); - if (IsFull()) { - SetOverflowed(); - return false; - } else { - bottom_ = ((bottom_ - 1) & mask_); - array_[bottom_] = object; - return true; - } - } - - template - void Iterate(Callback callback) { - int i = bottom_; - while (i != top_) { - callback(array_[i]); - i = (i + 1) & mask_; - } - } - - HeapObject** array() { return array_; } - int bottom() { return bottom_; } - int top() { return top_; } - int mask() { return mask_; } - void set_top(int top) { top_ = top; } - - private: - // This task uncommits the marking_deque backing store if - // markin_deque->in_use_ is false. - class UncommitTask : public CancelableTask { - public: - explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque) - : CancelableTask(isolate), marking_deque_(marking_deque) {} - - private: - // CancelableTask override. - void RunInternal() override { - base::LockGuard guard(&marking_deque_->mutex_); - if (!marking_deque_->in_use_) { - marking_deque_->Uncommit(); - } - marking_deque_->uncommit_task_pending_ = false; - } - - MarkingDeque* marking_deque_; - DISALLOW_COPY_AND_ASSIGN(UncommitTask); - }; - - static const size_t kMaxSize = 4 * MB; - static const size_t kMinSize = 256 * KB; - - // Must be called with mutex lock. - void EnsureCommitted(); - - // Must be called with mutex lock. - void Uncommit(); - - // Must be called with mutex lock. - void StartUncommitTask(); - - base::Mutex mutex_; - - base::VirtualMemory* backing_store_; - size_t backing_store_committed_size_; - HeapObject** array_; - // array_[(top - 1) & mask_] is the top element in the deque. The Deque is - // empty when top_ == bottom_. It is full when top_ + 1 == bottom - // (mod mask + 1). - int top_; - int bottom_; - int mask_; - bool overflowed_; - // in_use_ == true after taking mutex lock implies that the marking deque is - // committed and will stay committed at least until in_use_ == false. - bool in_use_; - bool uncommit_task_pending_; - Heap* heap_; - - DISALLOW_COPY_AND_ASSIGN(MarkingDeque); -}; - - // CodeFlusher collects candidates for code flushing during marking and // processes those candidates after marking has completed in order to // reset those functions referencing code objects that would otherwise diff --git a/src/heap/sequential-marking-deque.cc b/src/heap/sequential-marking-deque.cc new file mode 100644 index 0000000000..a715b3fd85 --- /dev/null +++ b/src/heap/sequential-marking-deque.cc @@ -0,0 +1,98 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/heap/sequential-marking-deque.h" + +#include "src/allocation.h" +#include "src/base/bits.h" +#include "src/heap/heap-inl.h" +#include "src/heap/heap.h" + +namespace v8 { +namespace internal { + +void SequentialMarkingDeque::SetUp() { + backing_store_ = new base::VirtualMemory(kMaxSize); + backing_store_committed_size_ = 0; + if (backing_store_ == nullptr) { + V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp"); + } +} + +void SequentialMarkingDeque::TearDown() { delete backing_store_; } + +void SequentialMarkingDeque::StartUsing() { + base::LockGuard guard(&mutex_); + if (in_use_) { + // This can happen in mark-compact GC if the incremental marker already + // started using the marking deque. + return; + } + in_use_ = true; + EnsureCommitted(); + array_ = reinterpret_cast(backing_store_->address()); + size_t size = FLAG_force_marking_deque_overflows + ? 64 * kPointerSize + : backing_store_committed_size_; + DCHECK( + base::bits::IsPowerOfTwo32(static_cast(size / kPointerSize))); + mask_ = static_cast((size / kPointerSize) - 1); + top_ = bottom_ = 0; + overflowed_ = false; +} + +void SequentialMarkingDeque::StopUsing() { + base::LockGuard guard(&mutex_); + if (!in_use_) return; + DCHECK(IsEmpty()); + DCHECK(!overflowed_); + top_ = bottom_ = mask_ = 0; + in_use_ = false; + if (FLAG_concurrent_sweeping) { + StartUncommitTask(); + } else { + Uncommit(); + } +} + +void SequentialMarkingDeque::Clear() { + DCHECK(in_use_); + top_ = bottom_ = 0; + overflowed_ = false; +} + +void SequentialMarkingDeque::Uncommit() { + DCHECK(!in_use_); + bool success = backing_store_->Uncommit(backing_store_->address(), + backing_store_committed_size_); + backing_store_committed_size_ = 0; + CHECK(success); +} + +void SequentialMarkingDeque::EnsureCommitted() { + DCHECK(in_use_); + if (backing_store_committed_size_ > 0) return; + + for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { + if (backing_store_->Commit(backing_store_->address(), size, false)) { + backing_store_committed_size_ = size; + break; + } + } + if (backing_store_committed_size_ == 0) { + V8::FatalProcessOutOfMemory("SequentialMarkingDeque::EnsureCommitted"); + } +} + +void SequentialMarkingDeque::StartUncommitTask() { + if (!uncommit_task_pending_) { + uncommit_task_pending_ = true; + UncommitTask* task = new UncommitTask(heap_->isolate(), this); + V8::GetCurrentPlatform()->CallOnBackgroundThread( + task, v8::Platform::kShortRunningTask); + } +} + +} // namespace internal +} // namespace v8 diff --git a/src/heap/sequential-marking-deque.h b/src/heap/sequential-marking-deque.h new file mode 100644 index 0000000000..a727048173 --- /dev/null +++ b/src/heap/sequential-marking-deque.h @@ -0,0 +1,163 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_SEQUENTIAL_MARKING_DEQUE_ +#define V8_HEAP_SEQUENTIAL_MARKING_DEQUE_ + +#include + +#include "src/base/platform/mutex.h" +#include "src/base/platform/platform.h" +#include "src/cancelable-task.h" + +namespace v8 { +namespace internal { + +class Heap; +class Isolate; +class HeapObject; + +// ---------------------------------------------------------------------------- +// Marking deque for tracing live objects. +class SequentialMarkingDeque { + public: + explicit SequentialMarkingDeque(Heap* heap) + : backing_store_(nullptr), + backing_store_committed_size_(0), + array_(nullptr), + top_(0), + bottom_(0), + mask_(0), + overflowed_(false), + in_use_(false), + uncommit_task_pending_(false), + heap_(heap) {} + + void SetUp(); + void TearDown(); + + // Ensures that the marking deque is committed and will stay committed until + // StopUsing() is called. + void StartUsing(); + void StopUsing(); + void Clear(); + + inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } + + inline bool IsEmpty() { return top_ == bottom_; } + + bool overflowed() const { return overflowed_; } + + void ClearOverflowed() { overflowed_ = false; } + + void SetOverflowed() { overflowed_ = true; } + + // Push the object on the marking stack if there is room, otherwise mark the + // deque as overflowed and wait for a rescan of the heap. + INLINE(bool Push(HeapObject* object)) { + if (IsFull()) { + SetOverflowed(); + return false; + } else { + array_[top_] = object; + top_ = ((top_ + 1) & mask_); + return true; + } + } + + INLINE(HeapObject* Pop()) { + DCHECK(!IsEmpty()); + top_ = ((top_ - 1) & mask_); + HeapObject* object = array_[top_]; + return object; + } + + // Unshift the object into the marking stack if there is room, otherwise mark + // the deque as overflowed and wait for a rescan of the heap. + INLINE(bool Unshift(HeapObject* object)) { + if (IsFull()) { + SetOverflowed(); + return false; + } else { + bottom_ = ((bottom_ - 1) & mask_); + array_[bottom_] = object; + return true; + } + } + + template + void Iterate(Callback callback) { + int i = bottom_; + while (i != top_) { + callback(array_[i]); + i = (i + 1) & mask_; + } + } + + HeapObject** array() { return array_; } + int bottom() { return bottom_; } + int top() { return top_; } + int mask() { return mask_; } + void set_top(int top) { top_ = top; } + + private: + // This task uncommits the marking_deque backing store if + // markin_deque->in_use_ is false. + class UncommitTask : public CancelableTask { + public: + explicit UncommitTask(Isolate* isolate, + SequentialMarkingDeque* marking_deque) + : CancelableTask(isolate), marking_deque_(marking_deque) {} + + private: + // CancelableTask override. + void RunInternal() override { + base::LockGuard guard(&marking_deque_->mutex_); + if (!marking_deque_->in_use_) { + marking_deque_->Uncommit(); + } + marking_deque_->uncommit_task_pending_ = false; + } + + SequentialMarkingDeque* marking_deque_; + DISALLOW_COPY_AND_ASSIGN(UncommitTask); + }; + + static const size_t kMaxSize = 4 * MB; + static const size_t kMinSize = 256 * KB; + + // Must be called with mutex lock. + void EnsureCommitted(); + + // Must be called with mutex lock. + void Uncommit(); + + // Must be called with mutex lock. + void StartUncommitTask(); + + base::Mutex mutex_; + + base::VirtualMemory* backing_store_; + size_t backing_store_committed_size_; + HeapObject** array_; + // array_[(top - 1) & mask_] is the top element in the deque. The Deque is + // empty when top_ == bottom_. It is full when top_ + 1 == bottom + // (mod mask + 1). + int top_; + int bottom_; + int mask_; + bool overflowed_; + // in_use_ == true after taking mutex lock implies that the marking deque is + // committed and will stay committed at least until in_use_ == false. + bool in_use_; + bool uncommit_task_pending_; + Heap* heap_; + + DISALLOW_COPY_AND_ASSIGN(SequentialMarkingDeque); +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_SEQUENTIAL_MARKING_DEQUE_ diff --git a/src/v8.gyp b/src/v8.gyp index 92994391a3..a24f0e5312 100644 --- a/src/v8.gyp +++ b/src/v8.gyp @@ -985,6 +985,8 @@ 'heap/scavenger-inl.h', 'heap/scavenger.cc', 'heap/scavenger.h', + 'heap/sequential-marking-deque.cc', + 'heap/sequential-marking-deque.h', 'heap/slot-set.h', 'heap/spaces-inl.h', 'heap/spaces.cc',