[heap] Use local live byte counters in concurrent marking.

This makes live byte count updates on the main thread non-atomic.

Bug: chromium:694255
TBR: mlippautz@chromium.org
Change-Id: I84da2b0647f63ad9d8f2be757d305d58945a00ff
Reviewed-on: https://chromium-review.googlesource.com/613623
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47468}
This commit is contained in:
Ulan Degenbaev 2017-08-19 14:02:41 +02:00 committed by Commit Bot
parent 7daf8cf3ee
commit 87613860c6
7 changed files with 138 additions and 57 deletions

View File

@ -27,25 +27,22 @@ namespace internal {
class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public:
explicit ConcurrentMarkingState(LiveBytesMap* live_bytes)
: live_bytes_(live_bytes) {}
Bitmap* bitmap(const MemoryChunk* chunk) {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
->Increment(by);
(*live_bytes_)[chunk] += by;
}
intptr_t live_bytes(MemoryChunk* chunk) {
return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
&chunk->live_byte_count_)
->Value();
}
// The live_bytes and SetLiveBytes methods of the marking state are
// not used by the concurrent marker.
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
->SetValue(value);
}
private:
LiveBytesMap* live_bytes_;
};
// Helper class for storing in-object slot addresses and values.
@ -76,10 +73,12 @@ class ConcurrentMarkingVisitor final
explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout,
LiveBytesMap* live_bytes,
WeakObjects* weak_objects, int task_id)
: shared_(shared, task_id),
bailout_(bailout, task_id),
weak_objects_(weak_objects),
marking_state_(live_bytes),
task_id_(task_id) {}
bool ShouldVisit(HeapObject* object) {
@ -325,10 +324,10 @@ class ConcurrentMarkingVisitor final
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
TaskInterrupt* interrupt, int task_id)
TaskState* task_state, int task_id)
: CancelableTask(isolate),
concurrent_marking_(concurrent_marking),
interrupt_(interrupt),
task_state_(task_state),
task_id_(task_id) {}
virtual ~Task() {}
@ -336,11 +335,11 @@ class ConcurrentMarking::Task : public CancelableTask {
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
concurrent_marking_->Run(task_id_, interrupt_);
concurrent_marking_->Run(task_id_, task_state_);
}
ConcurrentMarking* concurrent_marking_;
TaskInterrupt* interrupt_;
TaskState* task_state_;
int task_id_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
@ -362,10 +361,16 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
}
}
void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) {
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(shared_, bailout_, weak_objects_, task_id);
LiveBytesMap* live_bytes = nullptr;
{
base::LockGuard<base::Mutex> guard(&task_state->lock);
live_bytes = &task_state->live_bytes;
}
ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_,
task_id);
double time_ms;
size_t total_bytes_marked = 0;
if (FLAG_trace_concurrent_marking) {
@ -376,7 +381,7 @@ void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) {
TimedScope scope(&time_ms);
bool done = false;
while (!done) {
base::LockGuard<base::Mutex> guard(&interrupt->lock);
base::LockGuard<base::Mutex> guard(&task_state->lock);
size_t bytes_marked = 0;
int objects_processed = 0;
while (bytes_marked < kBytesUntilInterruptCheck &&
@ -398,14 +403,14 @@ void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) {
}
}
total_bytes_marked += bytes_marked;
if (interrupt->request.Value()) {
interrupt->condition.Wait(&interrupt->lock);
if (task_state->interrupt_request.Value()) {
task_state->interrupt_condition.Wait(&task_state->lock);
}
}
{
// Take the lock to synchronize with worklist update after
// young generation GC.
base::LockGuard<base::Mutex> guard(&interrupt->lock);
base::LockGuard<base::Mutex> guard(&task_state->lock);
bailout_->FlushToGlobal(task_id);
}
weak_objects_->weak_cells.FlushToGlobal(task_id);
@ -435,11 +440,11 @@ void ConcurrentMarking::ScheduleTasks() {
heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i);
}
task_interrupt_[i].request.SetValue(false);
task_state_[i].interrupt_request.SetValue(false);
is_pending_[i] = true;
++pending_task_count_;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new Task(heap_->isolate(), this, &task_interrupt_[i], i),
new Task(heap_->isolate(), this, &task_state_[i], i),
v8::Platform::kShortRunningTask);
}
}
@ -465,25 +470,45 @@ void ConcurrentMarking::EnsureCompleted() {
}
}
void ConcurrentMarking::FlushLiveBytes(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
for (int i = 1; i <= kTasks; i++) {
LiveBytesMap& live_bytes = task_state_[i].live_bytes;
for (auto pair : live_bytes) {
marking_state->IncrementLiveBytes(pair.first, pair.second);
}
live_bytes.clear();
}
}
void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
for (int i = 1; i <= kTasks; i++) {
if (task_state_[i].live_bytes.count(chunk)) {
task_state_[i].live_bytes[chunk] = 0;
}
}
}
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking) {
if (!FLAG_concurrent_marking) return;
// Request interrupt for all tasks.
// Request task_state for all tasks.
for (int i = 1; i <= kTasks; i++) {
concurrent_marking_->task_interrupt_[i].request.SetValue(true);
concurrent_marking_->task_state_[i].interrupt_request.SetValue(true);
}
// Now take a lock to ensure that the tasks are waiting.
for (int i = 1; i <= kTasks; i++) {
concurrent_marking_->task_interrupt_[i].lock.Lock();
concurrent_marking_->task_state_[i].lock.Lock();
}
}
ConcurrentMarking::PauseScope::~PauseScope() {
if (!FLAG_concurrent_marking) return;
for (int i = kTasks; i >= 1; i--) {
concurrent_marking_->task_interrupt_[i].request.SetValue(false);
concurrent_marking_->task_interrupt_[i].condition.NotifyAll();
concurrent_marking_->task_interrupt_[i].lock.Unlock();
concurrent_marking_->task_state_[i].interrupt_request.SetValue(false);
concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll();
concurrent_marking_->task_state_[i].lock.Unlock();
}
}

View File

@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/cancelable-task.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
#include "src/utils.h"
#include "src/v8.h"
@ -16,8 +17,12 @@ namespace internal {
class Heap;
class Isolate;
class MajorNonAtomicMarkingState;
struct WeakObjects;
using LiveBytesMap =
std::unordered_map<MemoryChunk*, intptr_t, MemoryChunk::Hasher>;
class ConcurrentMarking {
public:
// When the scope is entered, the concurrent marking tasks
@ -40,27 +45,33 @@ class ConcurrentMarking {
void ScheduleTasks();
void EnsureCompleted();
void RescheduleTasksIfNeeded();
// Flushes the local live bytes into the given marking state.
void FlushLiveBytes(MajorNonAtomicMarkingState* marking_state);
// This function is called for a new space page that was cleared after
// scavenge and is going to be re-used.
void ClearLiveness(MemoryChunk* chunk);
private:
struct TaskInterrupt {
struct TaskState {
// When the concurrent marking task has this lock, then objects in the
// heap are guaranteed to not move.
base::Mutex lock;
// The main thread sets this flag to true, when it wants the concurrent
// maker to give up the lock.
base::AtomicValue<bool> request;
base::AtomicValue<bool> interrupt_request;
// The concurrent marker waits on this condition until the request
// flag is cleared by the main thread.
base::ConditionVariable condition;
base::ConditionVariable interrupt_condition;
LiveBytesMap live_bytes;
char cache_line_padding[64];
};
class Task;
void Run(int task_id, TaskInterrupt* interrupt);
void Run(int task_id, TaskState* task_state);
Heap* heap_;
MarkingWorklist* shared_;
MarkingWorklist* bailout_;
WeakObjects* weak_objects_;
TaskInterrupt task_interrupt_[kTasks + 1];
TaskState task_state_[kTasks + 1];
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_;

View File

@ -5904,6 +5904,19 @@ bool Heap::SetUp() {
mark_compact_collector_ = new MarkCompactCollector(this);
incremental_marking_ = new IncrementalMarking(this);
incremental_marking_->set_marking_worklist(
mark_compact_collector_->marking_worklist());
if (FLAG_concurrent_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
mark_compact_collector_->weak_objects());
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr);
}
for (int i = 0; i <= LAST_SPACE; i++) {
space_[i] = nullptr;
}
@ -5940,18 +5953,6 @@ bool Heap::SetUp() {
}
tracer_ = new GCTracer(this);
incremental_marking_->set_marking_worklist(
mark_compact_collector_->marking_worklist());
if (FLAG_concurrent_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
mark_compact_collector_->weak_objects());
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr);
}
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);

View File

@ -22,6 +22,28 @@ class PagedSpace;
enum class StepOrigin { kV8, kTask };
// This marking state is used when concurrent marking is running.
class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
}
// Concurrent marking uses local live bytes.
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
chunk->live_byte_count_ += by;
}
intptr_t live_bytes(MemoryChunk* chunk) const {
return chunk->live_byte_count_;
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
chunk->live_byte_count_ = value;
}
};
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
@ -33,7 +55,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
#ifdef V8_CONCURRENT_MARKING
using MarkingState = MajorAtomicMarkingState;
using MarkingState = IncrementalMarkingState;
#else
using MarkingState = MajorNonAtomicMarkingState;
#endif

View File

@ -1001,6 +1001,13 @@ void MarkCompactCollector::Prepare() {
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
heap()->concurrent_marking()->EnsureCompleted();
heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
#ifdef VERIFY_HEAP
heap()->old_space()->VerifyLiveBytes();
heap()->map_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
#endif
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {

View File

@ -12,6 +12,7 @@
#include "src/base/platform/semaphore.h"
#include "src/counters.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
@ -1695,8 +1696,6 @@ void PagedSpace::Print() {}
void PagedSpace::Verify(ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->owner() == this);
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
@ -1706,7 +1705,6 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
int black_size = 0;
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
CHECK(end_of_previous_object <= object->address());
@ -1729,20 +1727,32 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
if (marking_state->IsBlack(object)) {
black_size += size;
}
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
CHECK_LE(black_size, marking_state->live_bytes(page));
}
CHECK(allocation_pointer_found_in_space);
#ifdef DEBUG
VerifyCountersAfterSweeping();
#endif
}
void PagedSpace::VerifyLiveBytes() {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
HeapObjectIterator it(page);
int black_size = 0;
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
black_size += object->Size();
}
}
CHECK_LE(black_size, marking_state->live_bytes(page));
}
}
#endif // VERIFY_HEAP
#ifdef DEBUG
@ -1993,6 +2003,8 @@ void NewSpace::ResetAllocationInfo() {
heap()->incremental_marking()->non_atomic_marking_state();
for (Page* p : to_space_) {
marking_state->ClearLiveness(p);
// Concurrent marking may have local live bytes for this page.
heap()->concurrent_marking()->ClearLiveness(p);
}
InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
}

View File

@ -247,8 +247,8 @@ class MemoryChunk {
public:
// Use with std data structures.
struct Hasher {
size_t operator()(Page* const p) const {
return reinterpret_cast<size_t>(p) >> kPageSizeBits;
size_t operator()(MemoryChunk* const chunk) const {
return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
}
};
@ -700,6 +700,7 @@ class MemoryChunk {
void InitializeReservedMemory() { reservation_.Reset(); }
friend class ConcurrentMarkingState;
friend class IncrementalMarkingState;
friend class MajorAtomicMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
@ -2120,6 +2121,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
void VerifyLiveBytes();
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {}