[heap] Parallel Scavenge

Bug: chromium:738865
Change-Id: Ie18574bb067438816238e2cf930e6d2a7bc5ecef
Reviewed-on: https://chromium-review.googlesource.com/570579
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46798}
This commit is contained in:
Michael Lippautz 2017-07-20 15:34:04 +02:00 committed by Commit Bot
parent ecd06ed09f
commit a45048e205
11 changed files with 227 additions and 98 deletions

View File

@ -1804,10 +1804,11 @@ class ScavengingItem : public ItemParallelJob::Item {
class ScavengingTask final : public ItemParallelJob::Task {
public:
ScavengingTask(Heap* heap, Scavenger* scavenger)
ScavengingTask(Heap* heap, Scavenger* scavenger, Scavenger::Barrier* barrier)
: ItemParallelJob::Task(heap->isolate()),
heap_(heap),
scavenger_(scavenger) {}
scavenger_(scavenger),
barrier_(barrier) {}
void RunInParallel() final {
double scavenging_time = 0.0;
@ -1818,6 +1819,10 @@ class ScavengingTask final : public ItemParallelJob::Task {
item->Process(scavenger_);
item->MarkFinished();
}
while (!barrier_->Done()) {
scavenger_->Process(barrier_);
barrier_->Wait();
}
scavenger_->Process();
}
if (FLAG_trace_parallel_scavenge) {
@ -1831,6 +1836,7 @@ class ScavengingTask final : public ItemParallelJob::Task {
private:
Heap* const heap_;
Scavenger* const scavenger_;
Scavenger::Barrier* const barrier_;
};
class PageScavengingItem final : public ScavengingItem {
@ -1868,8 +1874,14 @@ class PageScavengingItem final : public ScavengingItem {
};
int Heap::NumberOfScavengeTasks() {
CHECK(!FLAG_parallel_scavenge);
return 1;
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
static_cast<int>(new_space()->TotalCapacity()) / MB;
return Max(
1,
Min(Min(num_scavenge_tasks, kMaxScavengerTasks),
static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())));
}
void Heap::Scavenge() {
@ -1910,12 +1922,13 @@ void Heap::Scavenge() {
const bool is_logging = IsLogging(isolate());
const bool is_incremental_marking = incremental_marking()->IsMarking();
const int num_scavenge_tasks = NumberOfScavengeTasks();
Scavenger::Barrier barrier(num_scavenge_tasks);
CopiedList copied_list(num_scavenge_tasks);
PromotionList promotion_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i] = new Scavenger(this, is_logging, is_incremental_marking,
&copied_list, &promotion_list, i);
job.AddTask(new ScavengingTask(this, scavengers[i]));
job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
}
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(

View File

@ -1631,7 +1631,11 @@ class Heap {
static const int kInitialFeedbackCapacity = 256;
static const int kMaxScavengerTasks = 1;
#ifdef V8_TARGET_ARCH_ARM
static const int kMaxScavengerTasks = 2;
#else
static const int kMaxScavengerTasks = 8;
#endif
Heap();

View File

@ -55,6 +55,21 @@ class LocalAllocator {
}
}
void FreeLast(AllocationSpace space, HeapObject* object, int object_size) {
switch (space) {
case NEW_SPACE:
FreeLastInNewSpace(object, object_size);
return;
case OLD_SPACE:
FreeLastInOldSpace(object, object_size);
return;
default:
// Only new and old space supported.
UNREACHABLE();
break;
}
}
private:
AllocationResult AllocateInNewSpace(int object_size,
AllocationAlignment alignment) {
@ -97,6 +112,22 @@ class LocalAllocator {
return allocation;
}
void FreeLastInNewSpace(HeapObject* object, int object_size) {
if (!new_space_lab_.TryFreeLast(object, object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
}
}
void FreeLastInOldSpace(HeapObject* object, int object_size) {
if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
}
}
Heap* const heap_;
NewSpace* const new_space_;
CompactionSpaceCollection compaction_spaces_;

View File

@ -34,13 +34,32 @@ bool ContainsOnlyData(VisitorId visitor_id) {
} // namespace
void Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
void Scavenger::PageMemoryFence(Object* object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// with page initialization.
if (object->IsHeapObject()) {
MemoryChunk* chunk =
MemoryChunk::FromAddress(HeapObject::cast(object)->address());
CHECK_NOT_NULL(chunk->synchronized_heap());
}
#endif
}
bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
int size) {
// Copy the content of source to target.
heap()->CopyBlock(target->address(), source->address(), size);
target->set_map_word(MapWord::FromMap(map));
heap()->CopyBlock(target->address() + kPointerSize,
source->address() + kPointerSize, size - kPointerSize);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
HeapObject* old = base::AsAtomicWord::Release_CompareAndSwap(
reinterpret_cast<HeapObject**>(source->address()), map,
MapWord::FromForwardingAddress(target).ToMap());
if (old != map) {
// Other task migrated the object.
return false;
}
if (V8_UNLIKELY(is_logging_)) {
// Update NewSpace stats if necessary.
@ -49,10 +68,12 @@ void Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
}
if (is_incremental_marking_) {
heap()->incremental_marking()->TransferColor(source, target);
heap()->incremental_marking()->TransferColor<AccessMode::ATOMIC>(source,
target);
}
heap()->UpdateAllocationSite<Heap::kCached>(map, source,
&local_pretenuring_feedback_);
return true;
}
bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
@ -66,10 +87,16 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
if (allocation.To(&target)) {
DCHECK(ObjectMarking::IsWhite(
target, heap()->mark_compact_collector()->marking_state(target)));
MigrateObject(map, object, target, object_size);
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
MapWord map_word = object->map_word();
*slot = map_word.ToForwardingAddress();
return true;
}
*slot = target;
copied_list_.Insert(target, object_size);
copied_list_.Push(ObjectAndSize(target, object_size));
copied_size_ += object_size;
return true;
}
@ -86,7 +113,13 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
if (allocation.To(&target)) {
DCHECK(ObjectMarking::IsWhite(
target, heap()->mark_compact_collector()->marking_state(target)));
MigrateObject(map, object, target, object_size);
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
MapWord map_word = object->map_word();
*slot = map_word.ToForwardingAddress();
return true;
}
*slot = target;
if (!ContainsOnlyData(static_cast<VisitorId>(map->visitor_id()))) {
@ -106,14 +139,10 @@ void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot,
if (!heap()->ShouldBePromoted(object->address())) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
if (SemiSpaceCopyObject(map, slot, object, object_size)) {
return;
}
if (SemiSpaceCopyObject(map, slot, object, object_size)) return;
}
if (PromoteObject(map, slot, object, object_size)) {
return;
}
if (PromoteObject(map, slot, object, object_size)) return;
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject(map, slot, object, object_size)) return;
@ -124,12 +153,15 @@ void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot,
void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
ThinString* object, int object_size) {
if (!is_incremental_marking_) {
// Loading actual is fine in a parallel setting is there is no write.
HeapObject* actual = object->actual();
*slot = actual;
// ThinStrings always refer to internalized strings, which are
// always in old space.
DCHECK(!heap()->InNewSpace(actual));
object->set_map_word(MapWord::FromForwardingAddress(actual));
base::AsAtomicWord::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(actual).ToMap());
return;
}
@ -146,7 +178,9 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
*slot = first;
if (!heap()->InNewSpace(first)) {
object->set_map_word(MapWord::FromForwardingAddress(first));
base::AsAtomicWord::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(first).ToMap());
return;
}
@ -155,12 +189,16 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
HeapObject* target = first_word.ToForwardingAddress();
*slot = target;
object->set_map_word(MapWord::FromForwardingAddress(target));
base::AsAtomicWord::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(target).ToMap());
return;
}
EvacuateObject(slot, first_word.ToMap(), first);
object->set_map_word(MapWord::FromForwardingAddress(*slot));
Map* map = first_word.ToMap();
EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map));
base::AsAtomicWord::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(*slot).ToMap());
return;
}
@ -172,12 +210,16 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
SLOW_DCHECK(heap_->InFromSpace(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
int size = source->SizeFromMap(map);
// Cannot use ::cast() below because that would add checks in debug mode
// that require re-reading the map.
switch (static_cast<VisitorId>(map->visitor_id())) {
case kVisitThinString:
EvacuateThinString(map, slot, ThinString::cast(source), size);
EvacuateThinString(map, slot, reinterpret_cast<ThinString*>(source),
size);
break;
case kVisitShortcutCandidate:
EvacuateShortcutCandidate(map, slot, ConsString::cast(source), size);
EvacuateShortcutCandidate(map, slot,
reinterpret_cast<ConsString*>(source), size);
break;
default:
EvacuateObjectDefault(map, slot, source, size);
@ -188,10 +230,7 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
DCHECK(heap()->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
// point to an old space, the code space, or the to space of the new
// generation.
// Relaxed load here. We either load a forwarding pointer or the map.
MapWord first_word = object->map_word();
// If the first word is a forwarding address, the object has already been
@ -225,9 +264,14 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
PageMemoryFence(object);
if (heap->InToSpace(object)) {
return KEEP_SLOT;
}
} else if (heap->InToSpace(object)) {
// Already updated slot. This can happen when processing of the work list
// is interleaved with processing roots.
return KEEP_SLOT;
}
// Slots can point to "to" space if the slot has been recorded multiple
// times in the remembered set. We remove the redundant slot now.

View File

@ -26,12 +26,15 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
slot_address += kPointerSize) {
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* target = *slot;
scavenger_->PageMemoryFence(target);
if (target->IsHeapObject()) {
if (heap_->InFromSpace(target)) {
scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(slot),
HeapObject::cast(target));
target = *slot;
scavenger_->PageMemoryFence(target);
if (heap_->InNewSpace(target)) {
SLOW_DCHECK(target->IsHeapObject());
SLOW_DCHECK(heap_->InToSpace(target));
@ -73,9 +76,9 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
// objects. Grey object's slots would be rescanned.
// White object might not survive until the end of collection
// it would be a violation of the invariant to record it's slots.
const bool record_slots =
heap()->incremental_marking()->IsCompacting() &&
ObjectMarking::IsBlack(target, MarkingState::Internal(target));
const bool record_slots = heap()->incremental_marking()->IsCompacting() &&
ObjectMarking::IsBlack<AccessMode::ATOMIC>(
target, MarkingState::Internal(target));
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
if (target->IsJSFunction()) {
// JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
@ -86,34 +89,41 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
}
}
void Scavenger::Process() {
void Scavenger::Process(Barrier* barrier) {
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
ScavengeVisitor scavenge_visitor(heap(), this);
const bool have_barrier = barrier != nullptr;
bool done;
size_t objects = 0;
do {
done = true;
AddressRange range;
ObjectAndSize object_and_size;
while ((promotion_list_.LocalPushSegmentSize() <
kProcessPromotionListThreshold) &&
copied_list_.Pop(&range)) {
for (Address current = range.first; current < range.second;) {
HeapObject* object = HeapObject::FromAddress(current);
int size = object->Size();
scavenge_visitor.Visit(object);
current += size;
}
copied_list_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
if (!copied_list_.IsGlobalPoolEmpty()) {
barrier->NotifyAll();
}
}
}
ObjectAndSize object_and_size;
while (promotion_list_.Pop(&object_and_size)) {
HeapObject* target = object_and_size.first;
int size = object_and_size.second;
DCHECK(!target->IsMap());
IterateAndScavengePromotedObject(target, size);
done = false;
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
if (!promotion_list_.IsGlobalPoolEmpty()) {
barrier->NotifyAll();
}
}
}
} while (!done);
}

View File

@ -5,6 +5,7 @@
#ifndef V8_HEAP_SCAVENGER_H_
#define V8_HEAP_SCAVENGER_H_
#include "src/base/platform/condition-variable.h"
#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/slot-set.h"
@ -13,62 +14,50 @@
namespace v8 {
namespace internal {
static const int kCopiedListSegmentSize = 64;
static const int kPromotionListSegmentSize = 64;
static const int kCopiedListSegmentSize = 256;
static const int kPromotionListSegmentSize = 256;
using AddressRange = std::pair<Address, Address>;
using CopiedList = Worklist<AddressRange, kCopiedListSegmentSize>;
using ObjectAndSize = std::pair<HeapObject*, int>;
using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
// A list of copied ranges. Keeps the last consecutive range local and announces
// all other ranges to a global work list.
class CopiedRangesList {
public:
CopiedRangesList(CopiedList* copied_list, int task_id)
: current_start_(nullptr),
current_end_(nullptr),
copied_list_(copied_list, task_id) {}
~CopiedRangesList() {
CHECK_NULL(current_start_);
CHECK_NULL(current_end_);
}
void Insert(HeapObject* object, int size) {
const Address object_address = object->address();
if (current_end_ != object_address) {
if (current_start_ != nullptr) {
copied_list_.Push(AddressRange(current_start_, current_end_));
}
current_start_ = object_address;
current_end_ = current_start_ + size;
return;
}
DCHECK_EQ(current_end_, object_address);
current_end_ += size;
return;
}
bool Pop(AddressRange* entry) {
if (copied_list_.Pop(entry)) {
return true;
} else if (current_start_ != nullptr) {
*entry = AddressRange(current_start_, current_end_);
current_start_ = current_end_ = nullptr;
return true;
}
return false;
}
private:
Address current_start_;
Address current_end_;
CopiedList::View copied_list_;
};
class Scavenger {
public:
class Barrier {
public:
explicit Barrier(int tasks) : tasks_(tasks), waiting_(0), done_(false) {}
void NotifyAll() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (waiting_ > 0) condition_.NotifyAll();
}
void Wait() {
base::LockGuard<base::Mutex> guard(&mutex_);
waiting_++;
if (waiting_ == tasks_) {
done_ = true;
condition_.NotifyAll();
} else {
// Spurious wakeup is ok here.
condition_.Wait(&mutex_);
}
waiting_--;
}
void Reset() { done_ = false; }
bool Done() { return done_; }
private:
base::ConditionVariable condition_;
base::Mutex mutex_;
int tasks_;
int waiting_;
bool done_;
};
Scavenger(Heap* heap, bool is_logging, bool is_incremental_marking,
CopiedList* copied_list, PromotionList* promotion_list, int task_id)
: heap_(heap),
@ -92,7 +81,7 @@ class Scavenger {
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
void Process();
void Process(Barrier* barrier = nullptr);
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
@ -101,12 +90,17 @@ class Scavenger {
size_t bytes_promoted() const { return promoted_size_; }
private:
// Number of objects to process before interrupting for potentially waking
// up other tasks.
static const int kInterruptThreshold = 128;
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
inline Heap* heap() { return heap_; }
inline void PageMemoryFence(Object* object);
// Copies |source| to |target| and sets the forwarding pointer in |source|.
V8_INLINE void MigrateObject(Map* map, HeapObject* source, HeapObject* target,
V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target,
int size);
V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
@ -138,13 +132,15 @@ class Scavenger {
Heap* const heap_;
PromotionList::View promotion_list_;
CopiedRangesList copied_list_;
CopiedList::View copied_list_;
base::HashMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
LocalAllocator allocator_;
bool is_logging_;
bool is_incremental_marking_;
friend class IterateAndScavengePromotedObjectsVisitor;
};
// Helper class for turning the scavenger into an object visitor that is also

View File

@ -172,6 +172,17 @@ intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
return added;
}
bool PagedSpace::TryFreeLast(HeapObject* object, int object_size) {
if (allocation_info_.top() != nullptr) {
const Address object_address = object->address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
}
}
return false;
}
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
@ -519,6 +530,17 @@ bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
return false;
}
bool LocalAllocationBuffer::TryFreeLast(HeapObject* object, int object_size) {
if (IsValid()) {
const Address object_address = object->address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
}
}
return false;
}
} // namespace internal
} // namespace v8

View File

@ -1967,6 +1967,8 @@ class LocalAllocationBuffer {
// Returns true if the merge was successful, false otherwise.
inline bool TryMerge(LocalAllocationBuffer* other);
inline bool TryFreeLast(HeapObject* object, int object_size);
// Close a LAB, effectively invalidating it. Returns the unused area.
AllocationInfo Close();
@ -2099,6 +2101,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
return size_in_bytes - wasted;
}
inline bool TryFreeLast(HeapObject* object, int object_size);
void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info.

View File

@ -514,7 +514,7 @@ String* ConsString::second() {
}
Object* ConsString::unchecked_second() {
return READ_FIELD(this, kSecondOffset);
return RELAXED_READ_FIELD(this, kSecondOffset);
}
void ConsString::set_second(String* value, WriteBarrierMode mode) {

View File

@ -26,6 +26,8 @@ v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
FLAG_parallel_compaction = false;
FLAG_page_promotion = true;
FLAG_page_promotion_threshold = 0;
// Parallel scavenge introduces too much fragmentation.
FLAG_parallel_scavenge = false;
FLAG_min_semi_space_size = min_semi_space_size;
// We cannot optimize for size as we require a new space with more than one
// page.

View File

@ -47,6 +47,7 @@
#include "src/execution.h"
#include "src/futex-emulation.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/local-allocator.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
#include "src/parsing/preparse-data.h"
@ -7577,6 +7578,8 @@ static void SetFlag(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
static void IndependentWeakHandle(bool global_gc, bool interlinked) {
i::FLAG_stress_incremental_marking = false;
// Parallel scavenge introduces too much fragmentation.
i::FLAG_parallel_scavenge = false;
v8::Isolate* iso = CcTest::isolate();
v8::HandleScope scope(iso);
v8::Local<Context> context = Context::New(iso);