[heap] Extract marking deque to separate file.

BUG=chromium:694255

Review-Url: https://codereview.chromium.org/2852953004
Cr-Commit-Position: refs/heads/master@{#45030}
This commit is contained in:
ulan 2017-05-02 05:48:04 -07:00 committed by Commit bot
parent 85f04b0eaf
commit 41af9bc51b
6 changed files with 268 additions and 226 deletions

View File

@ -1601,6 +1601,8 @@ v8_source_set("v8_base") {
"src/heap/scavenger-inl.h", "src/heap/scavenger-inl.h",
"src/heap/scavenger.cc", "src/heap/scavenger.cc",
"src/heap/scavenger.h", "src/heap/scavenger.h",
"src/heap/sequential-marking-deque.cc",
"src/heap/sequential-marking-deque.h",
"src/heap/slot-set.h", "src/heap/slot-set.h",
"src/heap/spaces-inl.h", "src/heap/spaces-inl.h",
"src/heap/spaces.cc", "src/heap/spaces.cc",

View File

@ -2280,90 +2280,6 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(
} }
} }
void MarkingDeque::SetUp() {
backing_store_ = new base::VirtualMemory(kMaxSize);
backing_store_committed_size_ = 0;
if (backing_store_ == nullptr) {
V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
}
}
void MarkingDeque::TearDown() {
delete backing_store_;
}
void MarkingDeque::StartUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (in_use_) {
// This can happen in mark-compact GC if the incremental marker already
// started using the marking deque.
return;
}
in_use_ = true;
EnsureCommitted();
array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
size_t size = FLAG_force_marking_deque_overflows
? 64 * kPointerSize
: backing_store_committed_size_;
DCHECK(
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
mask_ = static_cast<int>((size / kPointerSize) - 1);
top_ = bottom_ = 0;
overflowed_ = false;
}
void MarkingDeque::StopUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (!in_use_) return;
DCHECK(IsEmpty());
DCHECK(!overflowed_);
top_ = bottom_ = mask_ = 0;
in_use_ = false;
if (FLAG_concurrent_sweeping) {
StartUncommitTask();
} else {
Uncommit();
}
}
void MarkingDeque::Clear() {
DCHECK(in_use_);
top_ = bottom_ = 0;
overflowed_ = false;
}
void MarkingDeque::Uncommit() {
DCHECK(!in_use_);
bool success = backing_store_->Uncommit(backing_store_->address(),
backing_store_committed_size_);
backing_store_committed_size_ = 0;
CHECK(success);
}
void MarkingDeque::EnsureCommitted() {
DCHECK(in_use_);
if (backing_store_committed_size_ > 0) return;
for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
if (backing_store_->Commit(backing_store_->address(), size, false)) {
backing_store_committed_size_ = size;
break;
}
}
if (backing_store_committed_size_ == 0) {
V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
}
}
void MarkingDeque::StartUncommitTask() {
if (!uncommit_task_pending_) {
uncommit_task_pending_ = true;
UncommitTask* task = new UncommitTask(heap_->isolate(), this);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
}
}
class ObjectStatsVisitor : public HeapObjectVisitor { class ObjectStatsVisitor : public HeapObjectVisitor {
public: public:
ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,

View File

@ -11,6 +11,7 @@
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/cancelable-task.h" #include "src/cancelable-task.h"
#include "src/heap/marking.h" #include "src/heap/marking.h"
#include "src/heap/sequential-marking-deque.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
#include "src/heap/store-buffer.h" #include "src/heap/store-buffer.h"
@ -30,6 +31,8 @@ class PageParallelJob;
class RecordMigratedSlotVisitor; class RecordMigratedSlotVisitor;
class ThreadLocalTop; class ThreadLocalTop;
using MarkingDeque = SequentialMarkingDeque;
class ObjectMarking : public AllStatic { class ObjectMarking : public AllStatic {
public: public:
V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj, V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj,
@ -113,148 +116,6 @@ class ObjectMarking : public AllStatic {
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking); DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
}; };
// ----------------------------------------------------------------------------
// Marking deque for tracing live objects.
class MarkingDeque {
public:
explicit MarkingDeque(Heap* heap)
: backing_store_(nullptr),
backing_store_committed_size_(0),
array_(nullptr),
top_(0),
bottom_(0),
mask_(0),
overflowed_(false),
in_use_(false),
uncommit_task_pending_(false),
heap_(heap) {}
void SetUp();
void TearDown();
// Ensures that the marking deque is committed and will stay committed until
// StopUsing() is called.
void StartUsing();
void StopUsing();
void Clear();
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
inline bool IsEmpty() { return top_ == bottom_; }
bool overflowed() const { return overflowed_; }
void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; }
// Push the object on the marking stack if there is room, otherwise mark the
// deque as overflowed and wait for a rescan of the heap.
INLINE(bool Push(HeapObject* object)) {
DCHECK(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
return false;
} else {
array_[top_] = object;
top_ = ((top_ + 1) & mask_);
return true;
}
}
INLINE(HeapObject* Pop()) {
DCHECK(!IsEmpty());
top_ = ((top_ - 1) & mask_);
HeapObject* object = array_[top_];
DCHECK(object->IsHeapObject());
return object;
}
// Unshift the object into the marking stack if there is room, otherwise mark
// the deque as overflowed and wait for a rescan of the heap.
INLINE(bool Unshift(HeapObject* object)) {
DCHECK(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
return false;
} else {
bottom_ = ((bottom_ - 1) & mask_);
array_[bottom_] = object;
return true;
}
}
template <typename Callback>
void Iterate(Callback callback) {
int i = bottom_;
while (i != top_) {
callback(array_[i]);
i = (i + 1) & mask_;
}
}
HeapObject** array() { return array_; }
int bottom() { return bottom_; }
int top() { return top_; }
int mask() { return mask_; }
void set_top(int top) { top_ = top; }
private:
// This task uncommits the marking_deque backing store if
// markin_deque->in_use_ is false.
class UncommitTask : public CancelableTask {
public:
explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
: CancelableTask(isolate), marking_deque_(marking_deque) {}
private:
// CancelableTask override.
void RunInternal() override {
base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
if (!marking_deque_->in_use_) {
marking_deque_->Uncommit();
}
marking_deque_->uncommit_task_pending_ = false;
}
MarkingDeque* marking_deque_;
DISALLOW_COPY_AND_ASSIGN(UncommitTask);
};
static const size_t kMaxSize = 4 * MB;
static const size_t kMinSize = 256 * KB;
// Must be called with mutex lock.
void EnsureCommitted();
// Must be called with mutex lock.
void Uncommit();
// Must be called with mutex lock.
void StartUncommitTask();
base::Mutex mutex_;
base::VirtualMemory* backing_store_;
size_t backing_store_committed_size_;
HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
// empty when top_ == bottom_. It is full when top_ + 1 == bottom
// (mod mask + 1).
int top_;
int bottom_;
int mask_;
bool overflowed_;
// in_use_ == true after taking mutex lock implies that the marking deque is
// committed and will stay committed at least until in_use_ == false.
bool in_use_;
bool uncommit_task_pending_;
Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
};
// CodeFlusher collects candidates for code flushing during marking and // CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to // processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise // reset those functions referencing code objects that would otherwise

View File

@ -0,0 +1,98 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/sequential-marking-deque.h"
#include "src/allocation.h"
#include "src/base/bits.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
namespace v8 {
namespace internal {
void SequentialMarkingDeque::SetUp() {
backing_store_ = new base::VirtualMemory(kMaxSize);
backing_store_committed_size_ = 0;
if (backing_store_ == nullptr) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
}
}
void SequentialMarkingDeque::TearDown() { delete backing_store_; }
void SequentialMarkingDeque::StartUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (in_use_) {
// This can happen in mark-compact GC if the incremental marker already
// started using the marking deque.
return;
}
in_use_ = true;
EnsureCommitted();
array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
size_t size = FLAG_force_marking_deque_overflows
? 64 * kPointerSize
: backing_store_committed_size_;
DCHECK(
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
mask_ = static_cast<int>((size / kPointerSize) - 1);
top_ = bottom_ = 0;
overflowed_ = false;
}
void SequentialMarkingDeque::StopUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (!in_use_) return;
DCHECK(IsEmpty());
DCHECK(!overflowed_);
top_ = bottom_ = mask_ = 0;
in_use_ = false;
if (FLAG_concurrent_sweeping) {
StartUncommitTask();
} else {
Uncommit();
}
}
void SequentialMarkingDeque::Clear() {
DCHECK(in_use_);
top_ = bottom_ = 0;
overflowed_ = false;
}
void SequentialMarkingDeque::Uncommit() {
DCHECK(!in_use_);
bool success = backing_store_->Uncommit(backing_store_->address(),
backing_store_committed_size_);
backing_store_committed_size_ = 0;
CHECK(success);
}
void SequentialMarkingDeque::EnsureCommitted() {
DCHECK(in_use_);
if (backing_store_committed_size_ > 0) return;
for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
if (backing_store_->Commit(backing_store_->address(), size, false)) {
backing_store_committed_size_ = size;
break;
}
}
if (backing_store_committed_size_ == 0) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::EnsureCommitted");
}
}
void SequentialMarkingDeque::StartUncommitTask() {
if (!uncommit_task_pending_) {
uncommit_task_pending_ = true;
UncommitTask* task = new UncommitTask(heap_->isolate(), this);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
}
}
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,163 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
#define V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
#include <deque>
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
#include "src/cancelable-task.h"
namespace v8 {
namespace internal {
class Heap;
class Isolate;
class HeapObject;
// ----------------------------------------------------------------------------
// Marking deque for tracing live objects.
class SequentialMarkingDeque {
public:
explicit SequentialMarkingDeque(Heap* heap)
: backing_store_(nullptr),
backing_store_committed_size_(0),
array_(nullptr),
top_(0),
bottom_(0),
mask_(0),
overflowed_(false),
in_use_(false),
uncommit_task_pending_(false),
heap_(heap) {}
void SetUp();
void TearDown();
// Ensures that the marking deque is committed and will stay committed until
// StopUsing() is called.
void StartUsing();
void StopUsing();
void Clear();
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
inline bool IsEmpty() { return top_ == bottom_; }
bool overflowed() const { return overflowed_; }
void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; }
// Push the object on the marking stack if there is room, otherwise mark the
// deque as overflowed and wait for a rescan of the heap.
INLINE(bool Push(HeapObject* object)) {
if (IsFull()) {
SetOverflowed();
return false;
} else {
array_[top_] = object;
top_ = ((top_ + 1) & mask_);
return true;
}
}
INLINE(HeapObject* Pop()) {
DCHECK(!IsEmpty());
top_ = ((top_ - 1) & mask_);
HeapObject* object = array_[top_];
return object;
}
// Unshift the object into the marking stack if there is room, otherwise mark
// the deque as overflowed and wait for a rescan of the heap.
INLINE(bool Unshift(HeapObject* object)) {
if (IsFull()) {
SetOverflowed();
return false;
} else {
bottom_ = ((bottom_ - 1) & mask_);
array_[bottom_] = object;
return true;
}
}
template <typename Callback>
void Iterate(Callback callback) {
int i = bottom_;
while (i != top_) {
callback(array_[i]);
i = (i + 1) & mask_;
}
}
HeapObject** array() { return array_; }
int bottom() { return bottom_; }
int top() { return top_; }
int mask() { return mask_; }
void set_top(int top) { top_ = top; }
private:
// This task uncommits the marking_deque backing store if
// markin_deque->in_use_ is false.
class UncommitTask : public CancelableTask {
public:
explicit UncommitTask(Isolate* isolate,
SequentialMarkingDeque* marking_deque)
: CancelableTask(isolate), marking_deque_(marking_deque) {}
private:
// CancelableTask override.
void RunInternal() override {
base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
if (!marking_deque_->in_use_) {
marking_deque_->Uncommit();
}
marking_deque_->uncommit_task_pending_ = false;
}
SequentialMarkingDeque* marking_deque_;
DISALLOW_COPY_AND_ASSIGN(UncommitTask);
};
static const size_t kMaxSize = 4 * MB;
static const size_t kMinSize = 256 * KB;
// Must be called with mutex lock.
void EnsureCommitted();
// Must be called with mutex lock.
void Uncommit();
// Must be called with mutex lock.
void StartUncommitTask();
base::Mutex mutex_;
base::VirtualMemory* backing_store_;
size_t backing_store_committed_size_;
HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
// empty when top_ == bottom_. It is full when top_ + 1 == bottom
// (mod mask + 1).
int top_;
int bottom_;
int mask_;
bool overflowed_;
// in_use_ == true after taking mutex lock implies that the marking deque is
// committed and will stay committed at least until in_use_ == false.
bool in_use_;
bool uncommit_task_pending_;
Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(SequentialMarkingDeque);
};
} // namespace internal
} // namespace v8
#endif // V8_SEQUENTIAL_MARKING_DEQUE_

View File

@ -985,6 +985,8 @@
'heap/scavenger-inl.h', 'heap/scavenger-inl.h',
'heap/scavenger.cc', 'heap/scavenger.cc',
'heap/scavenger.h', 'heap/scavenger.h',
'heap/sequential-marking-deque.cc',
'heap/sequential-marking-deque.h',
'heap/slot-set.h', 'heap/slot-set.h',
'heap/spaces-inl.h', 'heap/spaces-inl.h',
'heap/spaces.cc', 'heap/spaces.cc',