[heap] Refactor and simplify pretenuring infrastructure

Bug: 
Change-Id: I81132af45d8fb649d4239fa0e0ef75b95e148208
Reviewed-on: https://chromium-review.googlesource.com/633604
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47606}
This commit is contained in:
Michael Lippautz 2017-08-25 10:55:55 +02:00 committed by Commit Bot
parent 08bfcb293c
commit 234d4307d7
6 changed files with 47 additions and 87 deletions

View File

@ -518,9 +518,9 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
UNREACHABLE();
}
template <Heap::UpdateAllocationSiteMode mode>
void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
base::HashMap* pretenuring_feedback) {
PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
DCHECK(InFromSpace(object) ||
(InToSpace(object) &&
Page::FromAddress(object->address())
@ -535,37 +535,16 @@ void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
FindAllocationMemento<kForGC>(map, object);
if (memento_candidate == nullptr) return;
if (mode == kGlobal) {
DCHECK_EQ(pretenuring_feedback, global_pretenuring_feedback_);
// Entering global pretenuring feedback is only used in the scavenger, where
// we are allowed to actually touch the allocation site.
if (!memento_candidate->IsValid()) return;
AllocationSite* site = memento_candidate->GetAllocationSite();
DCHECK(!site->IsZombie());
// For inserting in the global pretenuring storage we need to first
// increment the memento found count on the allocation site.
if (site->IncrementMementoFoundCount()) {
global_pretenuring_feedback_->LookupOrInsert(site,
ObjectHash(site->address()));
}
} else {
DCHECK_EQ(mode, kCached);
DCHECK_NE(pretenuring_feedback, global_pretenuring_feedback_);
// Entering cached feedback is used in the parallel case. We are not allowed
// to dereference the allocation site and rather have to postpone all checks
// till actually merging the data.
Address key = memento_candidate->GetAllocationSiteUnchecked();
base::HashMap::Entry* e =
pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
DCHECK(e != nullptr);
(*bit_cast<intptr_t*>(&e->value))++;
}
// Entering cached feedback is used in the parallel case. We are not allowed
// to dereference the allocation site and rather have to postpone all checks
// till actually merging the data.
Address key = memento_candidate->GetAllocationSiteUnchecked();
(*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
}
void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
global_pretenuring_feedback_->Remove(
site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
global_pretenuring_feedback_.erase(site);
}
Isolate* Heap::isolate() {

View File

@ -164,7 +164,7 @@ Heap::Heap()
new_space_allocation_counter_(0),
old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0),
global_pretenuring_feedback_(nullptr),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
is_marking_flag_(false),
ring_buffer_full_(false),
ring_buffer_end_(0),
@ -603,13 +603,11 @@ void Heap::RepairFreeListsAfterDeserialization() {
}
void Heap::MergeAllocationSitePretenuringFeedback(
const base::HashMap& local_pretenuring_feedback) {
const PretenuringFeedbackMap& local_pretenuring_feedback) {
AllocationSite* site = nullptr;
for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
local_entry != nullptr;
local_entry = local_pretenuring_feedback.Next(local_entry)) {
site = reinterpret_cast<AllocationSite*>(local_entry->key);
MapWord map_word = site->map_word();
for (auto& site_and_count : local_pretenuring_feedback) {
site = site_and_count.first;
MapWord map_word = site_and_count.first->map_word();
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
@ -619,13 +617,11 @@ void Heap::MergeAllocationSitePretenuringFeedback(
// This is an inlined check of AllocationMemento::IsValid.
if (!site->IsAllocationSite() || site->IsZombie()) continue;
int value =
static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
DCHECK_GT(value, 0);
const int value = static_cast<int>(site_and_count.second);
DCHECK_LT(0, value);
if (site->IncrementMementoFoundCount(value)) {
global_pretenuring_feedback_->LookupOrInsert(site,
ObjectHash(site->address()));
// For sites in the global map the count is accessed through the site.
global_pretenuring_feedback_.insert(std::make_pair(site, 0));
}
}
}
@ -647,22 +643,6 @@ class Heap::SkipStoreBufferScope {
StoreBuffer* store_buffer_;
};
class Heap::PretenuringScope {
public:
explicit PretenuringScope(Heap* heap) : heap_(heap) {
heap_->global_pretenuring_feedback_ =
new base::HashMap(kInitialFeedbackCapacity);
}
~PretenuringScope() {
delete heap_->global_pretenuring_feedback_;
heap_->global_pretenuring_feedback_ = nullptr;
}
private:
Heap* heap_;
};
namespace {
inline bool MakePretenureDecision(
AllocationSite* site, AllocationSite::PretenureDecision current_decision,
@ -736,10 +716,11 @@ void Heap::ProcessPretenuringFeedback() {
// Step 1: Digest feedback for recorded allocation sites.
bool maximum_size_scavenge = MaximumSizeScavenge();
for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
for (auto& site_and_count : global_pretenuring_feedback_) {
allocation_sites++;
site = reinterpret_cast<AllocationSite*>(e->key);
site = site_and_count.first;
// Count is always access through the site.
DCHECK_EQ(0, site_and_count.second);
int found_count = site->memento_found_count();
// An entry in the storage does not imply that the count is > 0 because
// allocation sites might have been reset due to too many objects dying
@ -790,6 +771,9 @@ void Heap::ProcessPretenuringFeedback() {
active_allocation_sites, allocation_mementos_found,
tenure_decisions, dont_tenure_decisions);
}
global_pretenuring_feedback_.clear();
global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
}
}
@ -1516,7 +1500,6 @@ bool Heap::PerformGarbageCollection(
int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
{
Heap::PretenuringScope pretenuring_scope(this);
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
switch (collector) {

View File

@ -6,6 +6,7 @@
#define V8_HEAP_HEAP_H_
#include <cmath>
#include <unordered_map>
#include <vector>
// Clients of this interface shouldn't depend on lots of heap internals.
@ -554,7 +555,7 @@ class Heap {
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT };
enum UpdateAllocationSiteMode { kGlobal, kCached };
using PretenuringFeedbackMap = std::unordered_map<AllocationSite*, size_t>;
// Taking this mutex prevents the GC from entering a phase that relocates
// object references.
@ -1473,14 +1474,11 @@ class Heap {
// Allocation site tracking. =================================================
// ===========================================================================
// Updates the AllocationSite of a given {object}. If the global prenuring
// storage is passed as {pretenuring_feedback} the memento found count on
// the corresponding allocation site is immediately updated and an entry
// in the hash map is created. Otherwise the entry (including a the count
// value) is cached on the local pretenuring feedback.
template <UpdateAllocationSiteMode mode>
inline void UpdateAllocationSite(Map* map, HeapObject* object,
base::HashMap* pretenuring_feedback);
// Updates the AllocationSite of a given {object}. The entry (including the
// count) is cached on the local pretenuring feedback.
inline void UpdateAllocationSite(
Map* map, HeapObject* object,
PretenuringFeedbackMap* pretenuring_feedback);
// Removes an entry from the global pretenuring storage.
inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
@ -1489,7 +1487,7 @@ class Heap {
// method needs to be called after evacuation, as allocation sites may be
// evacuated and this method resolves forward pointers accordingly.
void MergeAllocationSitePretenuringFeedback(
const base::HashMap& local_pretenuring_feedback);
const PretenuringFeedbackMap& local_pretenuring_feedback);
// ===========================================================================
// Retaining path tracking. ==================================================
@ -2375,7 +2373,7 @@ class Heap {
// storage is only alive temporary during a GC. The invariant is that all
// pointers in this map are already fixed, i.e., they do not point to
// forwarding pointers.
base::HashMap* global_pretenuring_feedback_;
PretenuringFeedbackMap global_pretenuring_feedback_;
char trace_ring_buffer_[kTraceRingBufferSize];

View File

@ -1680,9 +1680,10 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public:
explicit EvacuateNewSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor,
base::HashMap* local_pretenuring_feedback)
explicit EvacuateNewSpaceVisitor(
Heap* heap, LocalAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
: EvacuateVisitorBase(heap, local_allocator, record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
@ -1696,8 +1697,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
promoted_size_ += size;
return true;
}
heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object,
local_pretenuring_feedback_);
heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_);
HeapObject* target = nullptr;
AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
@ -1739,7 +1740,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
LocalAllocationBuffer buffer_;
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
base::HashMap* local_pretenuring_feedback_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
};
template <PageEvacuationMode mode>
@ -1747,7 +1748,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateNewSpacePageVisitor(
Heap* heap, RecordMigratedSlotVisitor* record_visitor,
base::HashMap* local_pretenuring_feedback)
Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
: heap_(heap),
record_visitor_(record_visitor),
moved_bytes_(0),
@ -1771,8 +1772,8 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject* object, int size) {
if (mode == NEW_TO_NEW) {
heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object,
local_pretenuring_feedback_);
heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) {
object->IterateBodyFast(record_visitor_);
}
@ -1786,7 +1787,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
Heap* heap_;
RecordMigratedSlotVisitor* record_visitor_;
intptr_t moved_bytes_;
base::HashMap* local_pretenuring_feedback_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
};
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
@ -3290,7 +3291,7 @@ class Evacuator : public Malloced {
// Locally cached collector data.
LocalAllocator local_allocator_;
CompactionSpaceCollection compaction_spaces_;
base::HashMap local_pretenuring_feedback_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;

View File

@ -70,8 +70,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
if (is_incremental_marking_) {
heap()->incremental_marking()->TransferColor(source, target);
}
heap()->UpdateAllocationSite<Heap::kCached>(map, source,
&local_pretenuring_feedback_);
heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
return true;
}

View File

@ -129,7 +129,7 @@ class Scavenger {
Heap* const heap_;
PromotionList::View promotion_list_;
CopiedList::View copied_list_;
base::HashMap local_pretenuring_feedback_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
LocalAllocator allocator_;