Synchronize on concurrent slot buffer entries during migration.

BUG=chromium:524425
LOG=n

Review URL: https://codereview.chromium.org/1314133004

Cr-Commit-Position: refs/heads/master@{#30423}
This commit is contained in:
hpayer 2015-08-27 09:54:05 -07:00 committed by Commit bot
parent a6754d8c3c
commit 7ab389a437
2 changed files with 73 additions and 10 deletions

View File

@ -2692,6 +2692,8 @@ void MarkCompactCollector::AbortWeakCells() {
void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
// When parallel compaction is in progress, store and slots buffer entries
// require synchronization.
if (heap_->InNewSpace(value)) {
if (parallel_compaction_in_progress_) {
heap_->store_buffer()->MarkSynchronized(slot);
@ -2699,11 +2701,49 @@ void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
heap_->store_buffer()->Mark(slot);
}
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
if (parallel_compaction_in_progress_) {
SlotsBuffer::AddToSynchronized(
&slots_buffer_allocator_, &migration_slots_buffer_,
&migration_slots_buffer_mutex_, reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
} else {
SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
}
}
}
void MarkCompactCollector::RecordMigratedCodeEntrySlot(
Address code_entry, Address code_entry_slot) {
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
if (parallel_compaction_in_progress_) {
SlotsBuffer::AddToSynchronized(
&slots_buffer_allocator_, &migration_slots_buffer_,
&migration_slots_buffer_mutex_, SlotsBuffer::CODE_ENTRY_SLOT,
code_entry_slot, SlotsBuffer::IGNORE_OVERFLOW);
} else {
SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
}
}
void MarkCompactCollector::RecordMigratedCodeObjectSlot(Address code_object) {
if (parallel_compaction_in_progress_) {
SlotsBuffer::AddToSynchronized(
&slots_buffer_allocator_, &migration_slots_buffer_,
&migration_slots_buffer_mutex_, SlotsBuffer::RELOCATED_CODE_OBJECT,
code_object, SlotsBuffer::IGNORE_OVERFLOW);
} else {
SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
SlotsBuffer::IGNORE_OVERFLOW);
}
}
// We scavenge new space simultaneously with sweeping. This is done in two
@ -2745,19 +2785,12 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
if (compacting_ && dst->IsJSFunction()) {
Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
RecordMigratedCodeEntrySlot(code_entry, code_entry_slot);
}
} else if (dest == CODE_SPACE) {
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
SlotsBuffer::IGNORE_OVERFLOW);
RecordMigratedCodeObjectSlot(dst_addr);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
DCHECK(dest == NEW_SPACE);
@ -4489,6 +4522,15 @@ bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
}
bool SlotsBuffer::AddToSynchronized(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address,
base::Mutex* buffer_mutex, SlotType type,
Address addr, AdditionMode mode) {
base::LockGuard<base::Mutex> lock_guard(buffer_mutex);
return AddTo(allocator, buffer_address, type, addr, mode);
}
bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type,
Address addr, AdditionMode mode) {

View File

@ -374,6 +374,14 @@ class SlotsBuffer {
return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
}
INLINE(static bool AddToSynchronized(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address,
base::Mutex* buffer_mutex,
ObjectSlot slot, AdditionMode mode)) {
base::LockGuard<base::Mutex> lock_guard(buffer_mutex);
return AddTo(allocator, buffer_address, slot, mode);
}
INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, ObjectSlot slot,
AdditionMode mode)) {
@ -392,6 +400,11 @@ class SlotsBuffer {
static bool IsTypedSlot(ObjectSlot slot);
static bool AddToSynchronized(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address,
base::Mutex* buffer_mutex, SlotType type,
Address addr, AdditionMode mode);
static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type, Address addr,
AdditionMode mode);
@ -722,6 +735,8 @@ class MarkCompactCollector {
SlotsBuffer* migration_slots_buffer_;
base::Mutex migration_slots_buffer_mutex_;
// Finishes GC, performs heap verification if enabled.
void Finish();
@ -897,6 +912,12 @@ class MarkCompactCollector {
// Updates store buffer and slot buffer for a pointer in a migrating object.
void RecordMigratedSlot(Object* value, Address slot);
// Adds the code entry slot to the slots buffer.
void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot);
// Adds the slot of a moved code object.
void RecordMigratedCodeObjectSlot(Address code_object);
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);