[heap] Refactor pretenuring logic out of heap
This unblocks moving sweeper to the heap by resolving include cycles. Bug: v8:12612 Change-Id: I555182206ee28190ebf23a7ae0b10ba6d532e330 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3932719 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Commit-Queue: Omer Katz <omerkatz@chromium.org> Cr-Commit-Position: refs/heads/main@{#83543}
This commit is contained in:
parent
8efa1719f0
commit
7717862546
@ -1533,6 +1533,9 @@ filegroup(
|
||||
"src/heap/paged-spaces.h",
|
||||
"src/heap/parallel-work-item.h",
|
||||
"src/heap/parked-scope.h",
|
||||
"src/heap/pretenuring-handler-inl.h",
|
||||
"src/heap/pretenuring-handler.cc",
|
||||
"src/heap/pretenuring-handler.h",
|
||||
"src/heap/progress-bar.h",
|
||||
"src/heap/read-only-heap-inl.h",
|
||||
"src/heap/read-only-heap.cc",
|
||||
|
3
BUILD.gn
3
BUILD.gn
@ -3102,6 +3102,8 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/heap/paged-spaces.h",
|
||||
"src/heap/parallel-work-item.h",
|
||||
"src/heap/parked-scope.h",
|
||||
"src/heap/pretenuring-handler-inl.h",
|
||||
"src/heap/pretenuring-handler.h",
|
||||
"src/heap/progress-bar.h",
|
||||
"src/heap/read-only-heap-inl.h",
|
||||
"src/heap/read-only-heap.h",
|
||||
@ -4463,6 +4465,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/heap/object-stats.cc",
|
||||
"src/heap/objects-visiting.cc",
|
||||
"src/heap/paged-spaces.cc",
|
||||
"src/heap/pretenuring-handler.cc",
|
||||
"src/heap/read-only-heap.cc",
|
||||
"src/heap/read-only-spaces.cc",
|
||||
"src/heap/safepoint.cc",
|
||||
|
1
src/DEPS
1
src/DEPS
@ -32,6 +32,7 @@ include_rules = [
|
||||
"+src/heap/local-factory.h",
|
||||
"+src/heap/local-heap.h",
|
||||
"+src/heap/local-heap-inl.h",
|
||||
"+src/heap/pretenuring-handler-inl.h",
|
||||
# TODO(v8:10496): Don't expose memory chunk outside of heap/.
|
||||
"+src/heap/memory-chunk.h",
|
||||
"+src/heap/memory-chunk-inl.h",
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include "src/base/atomicops.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/base/sanitizer/msan.h"
|
||||
#include "src/common/assert-scope.h"
|
||||
#include "src/common/code-memory-access-inl.h"
|
||||
#include "src/execution/isolate-data.h"
|
||||
@ -353,96 +352,6 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
|
||||
CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
|
||||
}
|
||||
|
||||
template <Heap::FindMementoMode mode>
|
||||
AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
|
||||
Address object_address = object.address();
|
||||
Address memento_address =
|
||||
object_address + ALIGN_TO_ALLOCATION_ALIGNMENT(object.SizeFromMap(map));
|
||||
Address last_memento_word_address = memento_address + kTaggedSize;
|
||||
// If the memento would be on another page, bail out immediately.
|
||||
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
|
||||
return AllocationMemento();
|
||||
}
|
||||
HeapObject candidate = HeapObject::FromAddress(memento_address);
|
||||
ObjectSlot candidate_map_slot = candidate.map_slot();
|
||||
// This fast check may peek at an uninitialized word. However, the slow check
|
||||
// below (memento_address == top) ensures that this is safe. Mark the word as
|
||||
// initialized to silence MemorySanitizer warnings.
|
||||
MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
|
||||
if (!candidate_map_slot.contains_map_value(
|
||||
ReadOnlyRoots(this).allocation_memento_map().ptr())) {
|
||||
return AllocationMemento();
|
||||
}
|
||||
|
||||
// Bail out if the memento is below the age mark, which can happen when
|
||||
// mementos survived because a page got moved within new space.
|
||||
Page* object_page = Page::FromAddress(object_address);
|
||||
if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
|
||||
Address age_mark =
|
||||
reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
|
||||
if (!object_page->Contains(age_mark)) {
|
||||
return AllocationMemento();
|
||||
}
|
||||
// Do an exact check in the case where the age mark is on the same page.
|
||||
if (object_address < age_mark) {
|
||||
return AllocationMemento();
|
||||
}
|
||||
}
|
||||
|
||||
AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
|
||||
|
||||
// Depending on what the memento is used for, we might need to perform
|
||||
// additional checks.
|
||||
Address top;
|
||||
switch (mode) {
|
||||
case Heap::kForGC:
|
||||
return memento_candidate;
|
||||
case Heap::kForRuntime:
|
||||
if (memento_candidate.is_null()) return AllocationMemento();
|
||||
// Either the object is the last object in the new space, or there is
|
||||
// another object of at least word size (the header map word) following
|
||||
// it, so suffices to compare ptr and top here.
|
||||
top = NewSpaceTop();
|
||||
DCHECK(memento_address >= new_space()->limit() ||
|
||||
memento_address +
|
||||
ALIGN_TO_ALLOCATION_ALIGNMENT(AllocationMemento::kSize) <=
|
||||
top);
|
||||
if ((memento_address != top) && memento_candidate.IsValid()) {
|
||||
return memento_candidate;
|
||||
}
|
||||
return AllocationMemento();
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void Heap::UpdateAllocationSite(Map map, HeapObject object,
|
||||
PretenuringFeedbackMap* pretenuring_feedback) {
|
||||
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
|
||||
#ifdef DEBUG
|
||||
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
|
||||
DCHECK_IMPLIES(chunk->IsToPage(),
|
||||
v8_flags.minor_mc ||
|
||||
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
|
||||
DCHECK_IMPLIES(!chunk->InYoungGeneration(),
|
||||
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
|
||||
#endif
|
||||
if (!v8_flags.allocation_site_pretenuring ||
|
||||
!AllocationSite::CanTrack(map.instance_type())) {
|
||||
return;
|
||||
}
|
||||
AllocationMemento memento_candidate =
|
||||
FindAllocationMemento<kForGC>(map, object);
|
||||
if (memento_candidate.is_null()) return;
|
||||
|
||||
// Entering cached feedback is used in the parallel case. We are not allowed
|
||||
// to dereference the allocation site and rather have to postpone all checks
|
||||
// till actually merging the data.
|
||||
Address key = memento_candidate.GetAllocationSiteUnchecked();
|
||||
(*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
|
||||
}
|
||||
|
||||
bool Heap::IsPendingAllocationInternal(HeapObject object) {
|
||||
DCHECK(deserialization_complete());
|
||||
|
||||
|
228
src/heap/heap.cc
228
src/heap/heap.cc
@ -75,6 +75,7 @@
|
||||
#include "src/heap/objects-visiting.h"
|
||||
#include "src/heap/paged-spaces-inl.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
#include "src/heap/read-only-heap.h"
|
||||
#include "src/heap/remembered-set.h"
|
||||
#include "src/heap/safepoint.h"
|
||||
@ -220,7 +221,6 @@ Heap::Heap()
|
||||
: isolate_(isolate()),
|
||||
heap_allocator_(this),
|
||||
memory_pressure_level_(MemoryPressureLevel::kNone),
|
||||
global_pretenuring_feedback_(kInitialFeedbackCapacity),
|
||||
safepoint_(std::make_unique<IsolateSafepoint>(this)),
|
||||
external_string_table_(this),
|
||||
allocation_type_for_in_place_internalizable_strings_(
|
||||
@ -229,7 +229,8 @@ Heap::Heap()
|
||||
collection_barrier_(new CollectionBarrier(this)),
|
||||
marking_state_(isolate_),
|
||||
non_atomic_marking_state_(isolate_),
|
||||
atomic_marking_state_(isolate_) {
|
||||
atomic_marking_state_(isolate_),
|
||||
pretenuring_handler_(this) {
|
||||
// Ensure old_generation_size_ is a multiple of kPageSize.
|
||||
DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
|
||||
|
||||
@ -1132,31 +1133,6 @@ size_t Heap::UsedGlobalHandlesSize() {
|
||||
return isolate_->global_handles()->UsedSize();
|
||||
}
|
||||
|
||||
void Heap::MergeAllocationSitePretenuringFeedback(
|
||||
const PretenuringFeedbackMap& local_pretenuring_feedback) {
|
||||
PtrComprCageBase cage_base(isolate());
|
||||
AllocationSite site;
|
||||
for (auto& site_and_count : local_pretenuring_feedback) {
|
||||
site = site_and_count.first;
|
||||
MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
|
||||
if (map_word.IsForwardingAddress()) {
|
||||
site = AllocationSite::cast(map_word.ToForwardingAddress());
|
||||
}
|
||||
|
||||
// We have not validated the allocation site yet, since we have not
|
||||
// dereferenced the site during collecting information.
|
||||
// This is an inlined check of AllocationMemento::IsValid.
|
||||
if (!site.IsAllocationSite() || site.IsZombie()) continue;
|
||||
|
||||
const int value = static_cast<int>(site_and_count.second);
|
||||
DCHECK_LT(0, value);
|
||||
if (site.IncrementMementoFoundCount(value)) {
|
||||
// For sites in the global map the count is accessed through the site.
|
||||
global_pretenuring_feedback_.insert(std::make_pair(site, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Heap::AddAllocationObserversToAllSpaces(
|
||||
AllocationObserver* observer, AllocationObserver* new_space_observer) {
|
||||
DCHECK(observer && new_space_observer);
|
||||
@ -1198,197 +1174,6 @@ void Heap::PublishPendingAllocations() {
|
||||
code_lo_space_->ResetPendingObject();
|
||||
}
|
||||
|
||||
namespace {
|
||||
inline bool MakePretenureDecision(
|
||||
AllocationSite site, AllocationSite::PretenureDecision current_decision,
|
||||
double ratio, bool maximum_size_scavenge) {
|
||||
// Here we just allow state transitions from undecided or maybe tenure
|
||||
// to don't tenure, maybe tenure, or tenure.
|
||||
if ((current_decision == AllocationSite::kUndecided ||
|
||||
current_decision == AllocationSite::kMaybeTenure)) {
|
||||
if (ratio >= AllocationSite::kPretenureRatio) {
|
||||
// We just transition into tenure state when the semi-space was at
|
||||
// maximum capacity.
|
||||
if (maximum_size_scavenge) {
|
||||
site.set_deopt_dependent_code(true);
|
||||
site.set_pretenure_decision(AllocationSite::kTenure);
|
||||
// Currently we just need to deopt when we make a state transition to
|
||||
// tenure.
|
||||
return true;
|
||||
}
|
||||
site.set_pretenure_decision(AllocationSite::kMaybeTenure);
|
||||
} else {
|
||||
site.set_pretenure_decision(AllocationSite::kDontTenure);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear feedback calculation fields until the next gc.
|
||||
inline void ResetPretenuringFeedback(AllocationSite site) {
|
||||
site.set_memento_found_count(0);
|
||||
site.set_memento_create_count(0);
|
||||
}
|
||||
|
||||
inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
|
||||
bool maximum_size_scavenge) {
|
||||
bool deopt = false;
|
||||
int create_count = site.memento_create_count();
|
||||
int found_count = site.memento_found_count();
|
||||
bool minimum_mementos_created =
|
||||
create_count >= AllocationSite::kPretenureMinimumCreated;
|
||||
double ratio =
|
||||
minimum_mementos_created || v8_flags.trace_pretenuring_statistics
|
||||
? static_cast<double>(found_count) / create_count
|
||||
: 0.0;
|
||||
AllocationSite::PretenureDecision current_decision =
|
||||
site.pretenure_decision();
|
||||
|
||||
if (minimum_mementos_created) {
|
||||
deopt = MakePretenureDecision(site, current_decision, ratio,
|
||||
maximum_size_scavenge);
|
||||
}
|
||||
|
||||
if (v8_flags.trace_pretenuring_statistics) {
|
||||
PrintIsolate(isolate,
|
||||
"pretenuring: AllocationSite(%p): (created, found, ratio) "
|
||||
"(%d, %d, %f) %s => %s\n",
|
||||
reinterpret_cast<void*>(site.ptr()), create_count, found_count,
|
||||
ratio, site.PretenureDecisionName(current_decision),
|
||||
site.PretenureDecisionName(site.pretenure_decision()));
|
||||
}
|
||||
|
||||
ResetPretenuringFeedback(site);
|
||||
return deopt;
|
||||
}
|
||||
|
||||
bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
|
||||
AllocationSite::PretenureDecision current_decision =
|
||||
site.pretenure_decision();
|
||||
bool deopt = true;
|
||||
if (current_decision == AllocationSite::kUndecided ||
|
||||
current_decision == AllocationSite::kMaybeTenure) {
|
||||
site.set_deopt_dependent_code(true);
|
||||
site.set_pretenure_decision(AllocationSite::kTenure);
|
||||
} else {
|
||||
deopt = false;
|
||||
}
|
||||
if (v8_flags.trace_pretenuring_statistics) {
|
||||
PrintIsolate(isolate,
|
||||
"pretenuring manually requested: AllocationSite(%p): "
|
||||
"%s => %s\n",
|
||||
reinterpret_cast<void*>(site.ptr()),
|
||||
site.PretenureDecisionName(current_decision),
|
||||
site.PretenureDecisionName(site.pretenure_decision()));
|
||||
}
|
||||
|
||||
ResetPretenuringFeedback(site);
|
||||
return deopt;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
|
||||
global_pretenuring_feedback_.erase(site);
|
||||
}
|
||||
|
||||
bool Heap::DeoptMaybeTenuredAllocationSites() {
|
||||
return new_space_ && new_space_->IsAtMaximumCapacity() &&
|
||||
maximum_size_minor_gcs_ == 0;
|
||||
}
|
||||
|
||||
void Heap::ProcessPretenuringFeedback() {
|
||||
bool trigger_deoptimization = false;
|
||||
if (v8_flags.allocation_site_pretenuring) {
|
||||
int tenure_decisions = 0;
|
||||
int dont_tenure_decisions = 0;
|
||||
int allocation_mementos_found = 0;
|
||||
int allocation_sites = 0;
|
||||
int active_allocation_sites = 0;
|
||||
|
||||
AllocationSite site;
|
||||
|
||||
// Step 1: Digest feedback for recorded allocation sites.
|
||||
bool maximum_size_minor_gc = MaximumSizeMinorGC();
|
||||
for (auto& site_and_count : global_pretenuring_feedback_) {
|
||||
allocation_sites++;
|
||||
site = site_and_count.first;
|
||||
// Count is always access through the site.
|
||||
DCHECK_EQ(0, site_and_count.second);
|
||||
int found_count = site.memento_found_count();
|
||||
// An entry in the storage does not imply that the count is > 0 because
|
||||
// allocation sites might have been reset due to too many objects dying
|
||||
// in old space.
|
||||
if (found_count > 0) {
|
||||
DCHECK(site.IsAllocationSite());
|
||||
active_allocation_sites++;
|
||||
allocation_mementos_found += found_count;
|
||||
if (DigestPretenuringFeedback(isolate_, site, maximum_size_minor_gc)) {
|
||||
trigger_deoptimization = true;
|
||||
}
|
||||
if (site.GetAllocationType() == AllocationType::kOld) {
|
||||
tenure_decisions++;
|
||||
} else {
|
||||
dont_tenure_decisions++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Pretenure allocation sites for manual requests.
|
||||
if (allocation_sites_to_pretenure_) {
|
||||
while (!allocation_sites_to_pretenure_->empty()) {
|
||||
auto pretenure_site = allocation_sites_to_pretenure_->Pop();
|
||||
if (PretenureAllocationSiteManually(isolate_, pretenure_site)) {
|
||||
trigger_deoptimization = true;
|
||||
}
|
||||
}
|
||||
allocation_sites_to_pretenure_.reset();
|
||||
}
|
||||
|
||||
// Step 3: Deopt maybe tenured allocation sites if necessary.
|
||||
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
|
||||
if (deopt_maybe_tenured) {
|
||||
ForeachAllocationSite(
|
||||
allocation_sites_list(),
|
||||
[&allocation_sites, &trigger_deoptimization](AllocationSite site) {
|
||||
DCHECK(site.IsAllocationSite());
|
||||
allocation_sites++;
|
||||
if (site.IsMaybeTenure()) {
|
||||
site.set_deopt_dependent_code(true);
|
||||
trigger_deoptimization = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (trigger_deoptimization) {
|
||||
isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
|
||||
}
|
||||
|
||||
if (v8_flags.trace_pretenuring_statistics &&
|
||||
(allocation_mementos_found > 0 || tenure_decisions > 0 ||
|
||||
dont_tenure_decisions > 0)) {
|
||||
PrintIsolate(isolate(),
|
||||
"pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
|
||||
"active_sites=%d "
|
||||
"mementos=%d tenured=%d not_tenured=%d\n",
|
||||
deopt_maybe_tenured ? 1 : 0, allocation_sites,
|
||||
active_allocation_sites, allocation_mementos_found,
|
||||
tenure_decisions, dont_tenure_decisions);
|
||||
}
|
||||
|
||||
global_pretenuring_feedback_.clear();
|
||||
global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
|
||||
}
|
||||
}
|
||||
|
||||
void Heap::PretenureAllocationSiteOnNextCollection(AllocationSite site) {
|
||||
if (!allocation_sites_to_pretenure_) {
|
||||
allocation_sites_to_pretenure_.reset(
|
||||
new GlobalHandleVector<AllocationSite>(this));
|
||||
}
|
||||
allocation_sites_to_pretenure_->Push(site);
|
||||
}
|
||||
|
||||
void Heap::InvalidateCodeDeoptimizationData(Code code) {
|
||||
CodePageMemoryModificationScope modification_scope(code);
|
||||
code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
|
||||
@ -2390,7 +2175,7 @@ size_t Heap::PerformGarbageCollection(
|
||||
Scavenge();
|
||||
}
|
||||
|
||||
ProcessPretenuringFeedback();
|
||||
pretenuring_handler_.ProcessPretenuringFeedback();
|
||||
|
||||
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
|
||||
ConfigureInitialOldGenerationSize();
|
||||
@ -3131,7 +2916,8 @@ void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
|
||||
site.ResetPretenureDecision();
|
||||
site.set_deopt_dependent_code(true);
|
||||
marked = true;
|
||||
RemoveAllocationSitePretenuringFeedback(site);
|
||||
pretenuring_handler_
|
||||
.RemoveAllocationSitePretenuringFeedback(site);
|
||||
return;
|
||||
}
|
||||
});
|
||||
@ -6022,7 +5808,7 @@ void Heap::TearDown() {
|
||||
|
||||
tracer_.reset();
|
||||
|
||||
allocation_sites_to_pretenure_.reset();
|
||||
pretenuring_handler_.reset();
|
||||
|
||||
shared_space_allocator_.reset();
|
||||
shared_map_allocator_.reset();
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "src/heap/gc-callbacks.h"
|
||||
#include "src/heap/heap-allocator.h"
|
||||
#include "src/heap/marking-state.h"
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
#include "src/init/heap-symbols.h"
|
||||
#include "src/objects/allocation-site.h"
|
||||
#include "src/objects/fixed-array.h"
|
||||
@ -99,8 +100,6 @@ class CppHeap;
|
||||
class GCIdleTimeHandler;
|
||||
class GCIdleTimeHeapState;
|
||||
class GCTracer;
|
||||
template <typename T>
|
||||
class GlobalHandleVector;
|
||||
class IsolateSafepoint;
|
||||
class HeapObjectAllocationTracker;
|
||||
class HeapObjectsFilter;
|
||||
@ -257,7 +256,6 @@ class Heap {
|
||||
// and the key of the entry is in new-space. Such keys do not appear in the
|
||||
// usual OLD_TO_NEW remembered set.
|
||||
EphemeronRememberedSet ephemeron_remembered_set_;
|
||||
enum FindMementoMode { kForRuntime, kForGC };
|
||||
|
||||
enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
|
||||
|
||||
@ -337,9 +335,6 @@ class Heap {
|
||||
std::atomic<int64_t> low_since_mark_compact_{0};
|
||||
};
|
||||
|
||||
using PretenuringFeedbackMap =
|
||||
std::unordered_map<AllocationSite, size_t, Object::Hasher>;
|
||||
|
||||
// Taking this mutex prevents the GC from entering a phase that relocates
|
||||
// object references.
|
||||
base::Mutex* relocation_mutex() { return &relocation_mutex_; }
|
||||
@ -698,11 +693,6 @@ class Heap {
|
||||
|
||||
bool IsGCWithStack() const;
|
||||
|
||||
// If an object has an AllocationMemento trailing it, return it, otherwise
|
||||
// return a null AllocationMemento.
|
||||
template <FindMementoMode mode>
|
||||
inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
|
||||
|
||||
// Performs GC after background allocation failure.
|
||||
void CollectGarbageForBackground(LocalHeap* local_heap);
|
||||
|
||||
@ -1186,8 +1176,6 @@ class Heap {
|
||||
|
||||
void DeoptMarkedAllocationSites();
|
||||
|
||||
bool DeoptMaybeTenuredAllocationSites();
|
||||
|
||||
// ===========================================================================
|
||||
// Embedder heap tracer support. =============================================
|
||||
// ===========================================================================
|
||||
@ -1548,27 +1536,6 @@ class Heap {
|
||||
V8_EXPORT_PRIVATE void* AllocateExternalBackingStore(
|
||||
const std::function<void*(size_t)>& allocate, size_t byte_length);
|
||||
|
||||
// ===========================================================================
|
||||
// Allocation site tracking. =================================================
|
||||
// ===========================================================================
|
||||
|
||||
// Updates the AllocationSite of a given {object}. The entry (including the
|
||||
// count) is cached on the local pretenuring feedback.
|
||||
inline void UpdateAllocationSite(
|
||||
Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
|
||||
|
||||
// Merges local pretenuring feedback into the global one. Note that this
|
||||
// method needs to be called after evacuation, as allocation sites may be
|
||||
// evacuated and this method resolves forward pointers accordingly.
|
||||
void MergeAllocationSitePretenuringFeedback(
|
||||
const PretenuringFeedbackMap& local_pretenuring_feedback);
|
||||
|
||||
// Adds an allocation site to the list of sites to be pretenured during the
|
||||
// next collection. Added allocation sites are pretenured independent of
|
||||
// their feedback.
|
||||
V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection(
|
||||
AllocationSite site);
|
||||
|
||||
// ===========================================================================
|
||||
// Allocation tracking. ======================================================
|
||||
// ===========================================================================
|
||||
@ -1708,6 +1675,8 @@ class Heap {
|
||||
|
||||
AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
|
||||
|
||||
PretenturingHandler* pretenuring_handler() { return &pretenuring_handler_; }
|
||||
|
||||
private:
|
||||
class AllocationTrackerForDebugging;
|
||||
|
||||
@ -1788,8 +1757,6 @@ class Heap {
|
||||
|
||||
static const int kMaxMarkCompactsInIdleRound = 7;
|
||||
|
||||
static const int kInitialFeedbackCapacity = 256;
|
||||
|
||||
Heap();
|
||||
~Heap();
|
||||
|
||||
@ -1925,18 +1892,6 @@ class Heap {
|
||||
void InvokeIncrementalMarkingPrologueCallbacks();
|
||||
void InvokeIncrementalMarkingEpilogueCallbacks();
|
||||
|
||||
// ===========================================================================
|
||||
// Pretenuring. ==============================================================
|
||||
// ===========================================================================
|
||||
|
||||
// Pretenuring decisions are made based on feedback collected during new space
|
||||
// evacuation. Note that between feedback collection and calling this method
|
||||
// object in old space must not move.
|
||||
void ProcessPretenuringFeedback();
|
||||
|
||||
// Removes an entry from the global pretenuring storage.
|
||||
void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
|
||||
|
||||
// ===========================================================================
|
||||
// Actual GC. ================================================================
|
||||
// ===========================================================================
|
||||
@ -2387,16 +2342,6 @@ class Heap {
|
||||
// The size of global memory after the last MarkCompact GC.
|
||||
size_t global_memory_at_last_gc_ = 0;
|
||||
|
||||
// The feedback storage is used to store allocation sites (keys) and how often
|
||||
// they have been visited (values) by finding a memento behind an object. The
|
||||
// storage is only alive temporary during a GC. The invariant is that all
|
||||
// pointers in this map are already fixed, i.e., they do not point to
|
||||
// forwarding pointers.
|
||||
PretenuringFeedbackMap global_pretenuring_feedback_;
|
||||
|
||||
std::unique_ptr<GlobalHandleVector<AllocationSite>>
|
||||
allocation_sites_to_pretenure_;
|
||||
|
||||
char trace_ring_buffer_[kTraceRingBufferSize];
|
||||
|
||||
// If it's not full then the data is from 0 to ring_buffer_end_. If it's
|
||||
@ -2467,6 +2412,8 @@ class Heap {
|
||||
NonAtomicMarkingState non_atomic_marking_state_;
|
||||
AtomicMarkingState atomic_marking_state_;
|
||||
|
||||
PretenturingHandler pretenuring_handler_;
|
||||
|
||||
// Classes in "heap" can be friends.
|
||||
friend class AlwaysAllocateScope;
|
||||
friend class ArrayBufferCollector;
|
||||
@ -2500,6 +2447,7 @@ class Heap {
|
||||
friend class ObjectStatsCollector;
|
||||
friend class Page;
|
||||
friend class PagedSpaceBase;
|
||||
friend class PretenturingHandler;
|
||||
friend class ReadOnlyRoots;
|
||||
friend class Scavenger;
|
||||
friend class ScavengerCollector;
|
||||
|
@ -47,6 +47,8 @@
|
||||
#include "src/heap/object-stats.h"
|
||||
#include "src/heap/objects-visiting-inl.h"
|
||||
#include "src/heap/parallel-work-item.h"
|
||||
#include "src/heap/pretenuring-handler-inl.h"
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
#include "src/heap/read-only-heap.h"
|
||||
#include "src/heap/read-only-spaces.h"
|
||||
#include "src/heap/remembered-set.h"
|
||||
@ -1947,13 +1949,14 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
|
||||
Heap* heap, EvacuationAllocator* local_allocator,
|
||||
ConcurrentAllocator* shared_old_allocator,
|
||||
RecordMigratedSlotVisitor* record_visitor,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
|
||||
AlwaysPromoteYoung always_promote_young)
|
||||
: EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
|
||||
record_visitor),
|
||||
buffer_(LocalAllocationBuffer::InvalidBuffer()),
|
||||
promoted_size_(0),
|
||||
semispace_copied_size_(0),
|
||||
pretenuring_handler_(heap_->pretenuring_handler()),
|
||||
local_pretenuring_feedback_(local_pretenuring_feedback),
|
||||
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
|
||||
always_promote_young_(always_promote_young) {}
|
||||
@ -1963,8 +1966,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
|
||||
HeapObject target_object;
|
||||
|
||||
if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
|
||||
heap_->UpdateAllocationSite(object.map(), object,
|
||||
local_pretenuring_feedback_);
|
||||
pretenuring_handler_->UpdateAllocationSite(object.map(), object,
|
||||
local_pretenuring_feedback_);
|
||||
|
||||
if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
|
||||
heap_->FatalProcessOutOfMemory(
|
||||
@ -1985,8 +1988,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
|
||||
return true;
|
||||
}
|
||||
|
||||
heap_->UpdateAllocationSite(object.map(), object,
|
||||
local_pretenuring_feedback_);
|
||||
pretenuring_handler_->UpdateAllocationSite(object.map(), object,
|
||||
local_pretenuring_feedback_);
|
||||
|
||||
HeapObject target;
|
||||
AllocationSpace space = AllocateTargetObject(object, size, &target);
|
||||
@ -2048,7 +2051,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
|
||||
LocalAllocationBuffer buffer_;
|
||||
intptr_t promoted_size_;
|
||||
intptr_t semispace_copied_size_;
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
|
||||
PretenturingHandler* const pretenuring_handler_;
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback_;
|
||||
bool is_incremental_marking_;
|
||||
AlwaysPromoteYoung always_promote_young_;
|
||||
};
|
||||
@ -2058,10 +2062,11 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
|
||||
public:
|
||||
explicit EvacuateNewSpacePageVisitor(
|
||||
Heap* heap, RecordMigratedSlotVisitor* record_visitor,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback)
|
||||
: heap_(heap),
|
||||
record_visitor_(record_visitor),
|
||||
moved_bytes_(0),
|
||||
pretenuring_handler_(heap_->pretenuring_handler()),
|
||||
local_pretenuring_feedback_(local_pretenuring_feedback) {}
|
||||
|
||||
static void Move(Page* page) {
|
||||
@ -2080,12 +2085,12 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
|
||||
inline bool Visit(HeapObject object, int size) override {
|
||||
if (mode == NEW_TO_NEW) {
|
||||
DCHECK(!v8_flags.minor_mc);
|
||||
heap_->UpdateAllocationSite(object.map(), object,
|
||||
local_pretenuring_feedback_);
|
||||
pretenuring_handler_->UpdateAllocationSite(object.map(), object,
|
||||
local_pretenuring_feedback_);
|
||||
} else if (mode == NEW_TO_OLD) {
|
||||
if (v8_flags.minor_mc) {
|
||||
heap_->UpdateAllocationSite(object.map(), object,
|
||||
local_pretenuring_feedback_);
|
||||
pretenuring_handler_->UpdateAllocationSite(object.map(), object,
|
||||
local_pretenuring_feedback_);
|
||||
}
|
||||
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
|
||||
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
|
||||
@ -2104,7 +2109,8 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
|
||||
Heap* heap_;
|
||||
RecordMigratedSlotVisitor* record_visitor_;
|
||||
intptr_t moved_bytes_;
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
|
||||
PretenturingHandler* const pretenuring_handler_;
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback_;
|
||||
};
|
||||
|
||||
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
|
||||
@ -4189,7 +4195,8 @@ class Evacuator : public Malloced {
|
||||
EvacuationAllocator* local_allocator,
|
||||
AlwaysPromoteYoung always_promote_young)
|
||||
: heap_(heap),
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
|
||||
local_pretenuring_feedback_(
|
||||
PretenturingHandler::kInitialFeedbackCapacity),
|
||||
shared_old_allocator_(CreateSharedOldAllocator(heap_)),
|
||||
new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
|
||||
record_visitor, &local_pretenuring_feedback_,
|
||||
@ -4222,8 +4229,6 @@ class Evacuator : public Malloced {
|
||||
virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
|
||||
|
||||
protected:
|
||||
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
|
||||
|
||||
// |saved_live_bytes| returns the live bytes of the page that was processed.
|
||||
virtual bool RawEvacuatePage(MemoryChunk* chunk,
|
||||
intptr_t* saved_live_bytes) = 0;
|
||||
@ -4237,7 +4242,7 @@ class Evacuator : public Malloced {
|
||||
|
||||
Heap* heap_;
|
||||
|
||||
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
|
||||
// Allocator for the shared heap.
|
||||
std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
|
||||
@ -4299,7 +4304,8 @@ void Evacuator::Finalize() {
|
||||
new_space_visitor_.semispace_copied_size() +
|
||||
new_to_old_page_visitor_.moved_bytes() +
|
||||
new_to_new_page_visitor_.moved_bytes());
|
||||
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
|
||||
heap()->pretenuring_handler()->MergeAllocationSitePretenuringFeedback(
|
||||
local_pretenuring_feedback_);
|
||||
}
|
||||
|
||||
class FullEvacuator : public Evacuator {
|
||||
|
112
src/heap/pretenuring-handler-inl.h
Normal file
112
src/heap/pretenuring-handler-inl.h
Normal file
@ -0,0 +1,112 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_PRETENURING_HANDLER_INL_H_
|
||||
#define V8_HEAP_PRETENURING_HANDLER_INL_H_
|
||||
|
||||
#include "src/base/sanitizer/msan.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/objects/allocation-site-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void PretenturingHandler::UpdateAllocationSite(
|
||||
Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback) {
|
||||
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
|
||||
#ifdef DEBUG
|
||||
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
|
||||
DCHECK_IMPLIES(chunk->IsToPage(),
|
||||
v8_flags.minor_mc ||
|
||||
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
|
||||
DCHECK_IMPLIES(!chunk->InYoungGeneration(),
|
||||
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
|
||||
#endif
|
||||
if (!v8_flags.allocation_site_pretenuring ||
|
||||
!AllocationSite::CanTrack(map.instance_type())) {
|
||||
return;
|
||||
}
|
||||
AllocationMemento memento_candidate =
|
||||
FindAllocationMemento<kForGC>(map, object);
|
||||
if (memento_candidate.is_null()) return;
|
||||
|
||||
// Entering cached feedback is used in the parallel case. We are not allowed
|
||||
// to dereference the allocation site and rather have to postpone all checks
|
||||
// till actually merging the data.
|
||||
Address key = memento_candidate.GetAllocationSiteUnchecked();
|
||||
(*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
|
||||
}
|
||||
|
||||
template <PretenturingHandler::FindMementoMode mode>
|
||||
AllocationMemento PretenturingHandler::FindAllocationMemento(
|
||||
Map map, HeapObject object) {
|
||||
Address object_address = object.address();
|
||||
Address memento_address =
|
||||
object_address + ALIGN_TO_ALLOCATION_ALIGNMENT(object.SizeFromMap(map));
|
||||
Address last_memento_word_address = memento_address + kTaggedSize;
|
||||
// If the memento would be on another page, bail out immediately.
|
||||
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
|
||||
return AllocationMemento();
|
||||
}
|
||||
HeapObject candidate = HeapObject::FromAddress(memento_address);
|
||||
ObjectSlot candidate_map_slot = candidate.map_slot();
|
||||
// This fast check may peek at an uninitialized word. However, the slow check
|
||||
// below (memento_address == top) ensures that this is safe. Mark the word as
|
||||
// initialized to silence MemorySanitizer warnings.
|
||||
MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
|
||||
if (!candidate_map_slot.contains_map_value(
|
||||
ReadOnlyRoots(heap_).allocation_memento_map().ptr())) {
|
||||
return AllocationMemento();
|
||||
}
|
||||
|
||||
// Bail out if the memento is below the age mark, which can happen when
|
||||
// mementos survived because a page got moved within new space.
|
||||
Page* object_page = Page::FromAddress(object_address);
|
||||
if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
|
||||
Address age_mark =
|
||||
reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
|
||||
if (!object_page->Contains(age_mark)) {
|
||||
return AllocationMemento();
|
||||
}
|
||||
// Do an exact check in the case where the age mark is on the same page.
|
||||
if (object_address < age_mark) {
|
||||
return AllocationMemento();
|
||||
}
|
||||
}
|
||||
|
||||
AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
|
||||
|
||||
// Depending on what the memento is used for, we might need to perform
|
||||
// additional checks.
|
||||
Address top;
|
||||
switch (mode) {
|
||||
case kForGC:
|
||||
return memento_candidate;
|
||||
case kForRuntime:
|
||||
if (memento_candidate.is_null()) return AllocationMemento();
|
||||
// Either the object is the last object in the new space, or there is
|
||||
// another object of at least word size (the header map word) following
|
||||
// it, so suffices to compare ptr and top here.
|
||||
top = heap_->NewSpaceTop();
|
||||
DCHECK(memento_address >= heap_->new_space()->limit() ||
|
||||
memento_address +
|
||||
ALIGN_TO_ALLOCATION_ALIGNMENT(AllocationMemento::kSize) <=
|
||||
top);
|
||||
if ((memento_address != top) && memento_candidate.IsValid()) {
|
||||
return memento_candidate;
|
||||
}
|
||||
return AllocationMemento();
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_PRETENURING_HANDLER_INL_H_
|
244
src/heap/pretenuring-handler.cc
Normal file
244
src/heap/pretenuring-handler.cc
Normal file
@ -0,0 +1,244 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/handles/global-handles-inl.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/objects/allocation-site-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
PretenturingHandler::PretenturingHandler(Heap* heap)
|
||||
: heap_(heap), global_pretenuring_feedback_(kInitialFeedbackCapacity) {}
|
||||
|
||||
PretenturingHandler::~PretenturingHandler() = default;
|
||||
|
||||
void PretenturingHandler::MergeAllocationSitePretenuringFeedback(
|
||||
const PretenuringFeedbackMap& local_pretenuring_feedback) {
|
||||
PtrComprCageBase cage_base(heap_->isolate());
|
||||
AllocationSite site;
|
||||
for (auto& site_and_count : local_pretenuring_feedback) {
|
||||
site = site_and_count.first;
|
||||
MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
|
||||
if (map_word.IsForwardingAddress()) {
|
||||
site = AllocationSite::cast(map_word.ToForwardingAddress());
|
||||
}
|
||||
|
||||
// We have not validated the allocation site yet, since we have not
|
||||
// dereferenced the site during collecting information.
|
||||
// This is an inlined check of AllocationMemento::IsValid.
|
||||
if (!site.IsAllocationSite() || site.IsZombie()) continue;
|
||||
|
||||
const int value = static_cast<int>(site_and_count.second);
|
||||
DCHECK_LT(0, value);
|
||||
if (site.IncrementMementoFoundCount(value)) {
|
||||
// For sites in the global map the count is accessed through the site.
|
||||
global_pretenuring_feedback_.insert(std::make_pair(site, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool PretenturingHandler::DeoptMaybeTenuredAllocationSites() const {
|
||||
NewSpace* new_space = heap_->new_space();
|
||||
return new_space && new_space->IsAtMaximumCapacity() &&
|
||||
!heap_->MaximumSizeMinorGC();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
inline bool MakePretenureDecision(
|
||||
AllocationSite site, AllocationSite::PretenureDecision current_decision,
|
||||
double ratio, bool maximum_size_scavenge) {
|
||||
// Here we just allow state transitions from undecided or maybe tenure
|
||||
// to don't tenure, maybe tenure, or tenure.
|
||||
if ((current_decision == AllocationSite::kUndecided ||
|
||||
current_decision == AllocationSite::kMaybeTenure)) {
|
||||
if (ratio >= AllocationSite::kPretenureRatio) {
|
||||
// We just transition into tenure state when the semi-space was at
|
||||
// maximum capacity.
|
||||
if (maximum_size_scavenge) {
|
||||
site.set_deopt_dependent_code(true);
|
||||
site.set_pretenure_decision(AllocationSite::kTenure);
|
||||
// Currently we just need to deopt when we make a state transition to
|
||||
// tenure.
|
||||
return true;
|
||||
}
|
||||
site.set_pretenure_decision(AllocationSite::kMaybeTenure);
|
||||
} else {
|
||||
site.set_pretenure_decision(AllocationSite::kDontTenure);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear feedback calculation fields until the next gc.
|
||||
inline void ResetPretenuringFeedback(AllocationSite site) {
|
||||
site.set_memento_found_count(0);
|
||||
site.set_memento_create_count(0);
|
||||
}
|
||||
|
||||
inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
|
||||
bool maximum_size_scavenge) {
|
||||
bool deopt = false;
|
||||
int create_count = site.memento_create_count();
|
||||
int found_count = site.memento_found_count();
|
||||
bool minimum_mementos_created =
|
||||
create_count >= AllocationSite::kPretenureMinimumCreated;
|
||||
double ratio =
|
||||
minimum_mementos_created || v8_flags.trace_pretenuring_statistics
|
||||
? static_cast<double>(found_count) / create_count
|
||||
: 0.0;
|
||||
AllocationSite::PretenureDecision current_decision =
|
||||
site.pretenure_decision();
|
||||
|
||||
if (minimum_mementos_created) {
|
||||
deopt = MakePretenureDecision(site, current_decision, ratio,
|
||||
maximum_size_scavenge);
|
||||
}
|
||||
|
||||
if (v8_flags.trace_pretenuring_statistics) {
|
||||
PrintIsolate(isolate,
|
||||
"pretenuring: AllocationSite(%p): (created, found, ratio) "
|
||||
"(%d, %d, %f) %s => %s\n",
|
||||
reinterpret_cast<void*>(site.ptr()), create_count, found_count,
|
||||
ratio, site.PretenureDecisionName(current_decision),
|
||||
site.PretenureDecisionName(site.pretenure_decision()));
|
||||
}
|
||||
|
||||
ResetPretenuringFeedback(site);
|
||||
return deopt;
|
||||
}
|
||||
|
||||
bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
|
||||
AllocationSite::PretenureDecision current_decision =
|
||||
site.pretenure_decision();
|
||||
bool deopt = true;
|
||||
if (current_decision == AllocationSite::kUndecided ||
|
||||
current_decision == AllocationSite::kMaybeTenure) {
|
||||
site.set_deopt_dependent_code(true);
|
||||
site.set_pretenure_decision(AllocationSite::kTenure);
|
||||
} else {
|
||||
deopt = false;
|
||||
}
|
||||
if (v8_flags.trace_pretenuring_statistics) {
|
||||
PrintIsolate(isolate,
|
||||
"pretenuring manually requested: AllocationSite(%p): "
|
||||
"%s => %s\n",
|
||||
reinterpret_cast<void*>(site.ptr()),
|
||||
site.PretenureDecisionName(current_decision),
|
||||
site.PretenureDecisionName(site.pretenure_decision()));
|
||||
}
|
||||
|
||||
ResetPretenuringFeedback(site);
|
||||
return deopt;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void PretenturingHandler::RemoveAllocationSitePretenuringFeedback(
|
||||
AllocationSite site) {
|
||||
global_pretenuring_feedback_.erase(site);
|
||||
}
|
||||
|
||||
void PretenturingHandler::ProcessPretenuringFeedback() {
|
||||
bool trigger_deoptimization = false;
|
||||
if (v8_flags.allocation_site_pretenuring) {
|
||||
int tenure_decisions = 0;
|
||||
int dont_tenure_decisions = 0;
|
||||
int allocation_mementos_found = 0;
|
||||
int allocation_sites = 0;
|
||||
int active_allocation_sites = 0;
|
||||
|
||||
AllocationSite site;
|
||||
|
||||
// Step 1: Digest feedback for recorded allocation sites.
|
||||
bool maximum_size_scavenge = heap_->MaximumSizeMinorGC();
|
||||
for (auto& site_and_count : global_pretenuring_feedback_) {
|
||||
allocation_sites++;
|
||||
site = site_and_count.first;
|
||||
// Count is always access through the site.
|
||||
DCHECK_EQ(0, site_and_count.second);
|
||||
int found_count = site.memento_found_count();
|
||||
// An entry in the storage does not imply that the count is > 0 because
|
||||
// allocation sites might have been reset due to too many objects dying
|
||||
// in old space.
|
||||
if (found_count > 0) {
|
||||
DCHECK(site.IsAllocationSite());
|
||||
active_allocation_sites++;
|
||||
allocation_mementos_found += found_count;
|
||||
if (DigestPretenuringFeedback(heap_->isolate(), site,
|
||||
maximum_size_scavenge)) {
|
||||
trigger_deoptimization = true;
|
||||
}
|
||||
if (site.GetAllocationType() == AllocationType::kOld) {
|
||||
tenure_decisions++;
|
||||
} else {
|
||||
dont_tenure_decisions++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Pretenure allocation sites for manual requests.
|
||||
if (allocation_sites_to_pretenure_) {
|
||||
while (!allocation_sites_to_pretenure_->empty()) {
|
||||
auto pretenure_site = allocation_sites_to_pretenure_->Pop();
|
||||
if (PretenureAllocationSiteManually(heap_->isolate(), pretenure_site)) {
|
||||
trigger_deoptimization = true;
|
||||
}
|
||||
}
|
||||
allocation_sites_to_pretenure_.reset();
|
||||
}
|
||||
|
||||
// Step 3: Deopt maybe tenured allocation sites if necessary.
|
||||
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
|
||||
if (deopt_maybe_tenured) {
|
||||
heap_->ForeachAllocationSite(
|
||||
heap_->allocation_sites_list(),
|
||||
[&allocation_sites, &trigger_deoptimization](AllocationSite site) {
|
||||
DCHECK(site.IsAllocationSite());
|
||||
allocation_sites++;
|
||||
if (site.IsMaybeTenure()) {
|
||||
site.set_deopt_dependent_code(true);
|
||||
trigger_deoptimization = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (trigger_deoptimization) {
|
||||
heap_->isolate()->stack_guard()->RequestDeoptMarkedAllocationSites();
|
||||
}
|
||||
|
||||
if (v8_flags.trace_pretenuring_statistics &&
|
||||
(allocation_mementos_found > 0 || tenure_decisions > 0 ||
|
||||
dont_tenure_decisions > 0)) {
|
||||
PrintIsolate(heap_->isolate(),
|
||||
"pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
|
||||
"active_sites=%d "
|
||||
"mementos=%d tenured=%d not_tenured=%d\n",
|
||||
deopt_maybe_tenured ? 1 : 0, allocation_sites,
|
||||
active_allocation_sites, allocation_mementos_found,
|
||||
tenure_decisions, dont_tenure_decisions);
|
||||
}
|
||||
|
||||
global_pretenuring_feedback_.clear();
|
||||
global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
|
||||
}
|
||||
}
|
||||
|
||||
void PretenturingHandler::PretenureAllocationSiteOnNextCollection(
|
||||
AllocationSite site) {
|
||||
if (!allocation_sites_to_pretenure_) {
|
||||
allocation_sites_to_pretenure_.reset(
|
||||
new GlobalHandleVector<AllocationSite>(heap_));
|
||||
}
|
||||
allocation_sites_to_pretenure_->Push(site);
|
||||
}
|
||||
|
||||
void PretenturingHandler::reset() { allocation_sites_to_pretenure_.reset(); }
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
90
src/heap/pretenuring-handler.h
Normal file
90
src/heap/pretenuring-handler.h
Normal file
@ -0,0 +1,90 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_PRETENURING_HANDLER_H_
|
||||
#define V8_HEAP_PRETENURING_HANDLER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "src/objects/allocation-site.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
#include "src/objects/map.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
class GlobalHandleVector;
|
||||
class Heap;
|
||||
|
||||
class PretenturingHandler final {
|
||||
public:
|
||||
static const int kInitialFeedbackCapacity = 256;
|
||||
using PretenuringFeedbackMap =
|
||||
std::unordered_map<AllocationSite, size_t, Object::Hasher>;
|
||||
enum FindMementoMode { kForRuntime, kForGC };
|
||||
|
||||
explicit PretenturingHandler(Heap* heap);
|
||||
~PretenturingHandler();
|
||||
|
||||
void reset();
|
||||
|
||||
// If an object has an AllocationMemento trailing it, return it, otherwise
|
||||
// return a null AllocationMemento.
|
||||
template <FindMementoMode mode>
|
||||
inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
|
||||
|
||||
// ===========================================================================
|
||||
// Allocation site tracking. =================================================
|
||||
// ===========================================================================
|
||||
|
||||
// Updates the AllocationSite of a given {object}. The entry (including the
|
||||
// count) is cached on the local pretenuring feedback.
|
||||
inline void UpdateAllocationSite(
|
||||
Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
|
||||
|
||||
// Merges local pretenuring feedback into the global one. Note that this
|
||||
// method needs to be called after evacuation, as allocation sites may be
|
||||
// evacuated and this method resolves forward pointers accordingly.
|
||||
void MergeAllocationSitePretenuringFeedback(
|
||||
const PretenuringFeedbackMap& local_pretenuring_feedback);
|
||||
|
||||
// Adds an allocation site to the list of sites to be pretenured during the
|
||||
// next collection. Added allocation sites are pretenured independent of
|
||||
// their feedback.
|
||||
V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection(
|
||||
AllocationSite site);
|
||||
|
||||
// ===========================================================================
|
||||
// Pretenuring. ==============================================================
|
||||
// ===========================================================================
|
||||
|
||||
// Pretenuring decisions are made based on feedback collected during new space
|
||||
// evacuation. Note that between feedback collection and calling this method
|
||||
// object in old space must not move.
|
||||
void ProcessPretenuringFeedback();
|
||||
|
||||
// Removes an entry from the global pretenuring storage.
|
||||
void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
|
||||
|
||||
private:
|
||||
bool DeoptMaybeTenuredAllocationSites() const;
|
||||
|
||||
Heap* const heap_;
|
||||
|
||||
// The feedback storage is used to store allocation sites (keys) and how often
|
||||
// they have been visited (values) by finding a memento behind an object. The
|
||||
// storage is only alive temporary during a GC. The invariant is that all
|
||||
// pointers in this map are already fixed, i.e., they do not point to
|
||||
// forwarding pointers.
|
||||
PretenuringFeedbackMap global_pretenuring_feedback_;
|
||||
|
||||
std::unique_ptr<GlobalHandleVector<AllocationSite>>
|
||||
allocation_sites_to_pretenure_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_PRETENURING_HANDLER_H_
|
@ -12,6 +12,7 @@
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/heap/objects-visiting-inl.h"
|
||||
#include "src/heap/pretenuring-handler-inl.h"
|
||||
#include "src/heap/scavenger.h"
|
||||
#include "src/objects/map.h"
|
||||
#include "src/objects/objects-body-descriptors-inl.h"
|
||||
@ -114,7 +115,8 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
|
||||
(promotion_heap_choice != kPromoteIntoSharedHeap || mark_shared_heap_)) {
|
||||
heap()->incremental_marking()->TransferColor(source, target);
|
||||
}
|
||||
heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
|
||||
pretenuring_handler_->UpdateAllocationSite(map, source,
|
||||
&local_pretenuring_feedback_);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/objects-visiting-inl.h"
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
#include "src/heap/remembered-set-inl.h"
|
||||
#include "src/heap/scavenger-inl.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
@ -614,7 +615,9 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
|
||||
promotion_list_local_(promotion_list),
|
||||
copied_list_local_(*copied_list),
|
||||
ephemeron_table_list_local_(*ephemeron_table_list),
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
|
||||
pretenuring_handler_(heap_->pretenuring_handler()),
|
||||
local_pretenuring_feedback_(
|
||||
PretenturingHandler::kInitialFeedbackCapacity),
|
||||
copied_size_(0),
|
||||
promoted_size_(0),
|
||||
allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
|
||||
@ -811,7 +814,8 @@ void ScavengerCollector::ClearOldEphemerons() {
|
||||
}
|
||||
|
||||
void Scavenger::Finalize() {
|
||||
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
|
||||
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
|
||||
local_pretenuring_feedback_);
|
||||
heap()->IncrementNewSpaceSurvivingObjectSize(copied_size_);
|
||||
heap()->IncrementPromotedObjectsSize(promoted_size_);
|
||||
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/objects-visiting.h"
|
||||
#include "src/heap/parallel-work-item.h"
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -116,7 +117,6 @@ class Scavenger {
|
||||
// Number of objects to process before interrupting for potentially waking
|
||||
// up other tasks.
|
||||
static const int kInterruptThreshold = 128;
|
||||
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
|
||||
|
||||
inline Heap* heap() { return heap_; }
|
||||
|
||||
@ -199,7 +199,8 @@ class Scavenger {
|
||||
PromotionList::Local promotion_list_local_;
|
||||
CopiedList::Local copied_list_local_;
|
||||
EphemeronTableList::Local ephemeron_table_list_local_;
|
||||
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
PretenturingHandler* const pretenuring_handler_;
|
||||
PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
size_t copied_size_;
|
||||
size_t promoted_size_;
|
||||
EvacuationAllocator allocator_;
|
||||
|
@ -19,21 +19,20 @@
|
||||
#include "src/heap/mark-compact-inl.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/heap/paged-spaces.h"
|
||||
#include "src/heap/pretenuring-handler-inl.h"
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
#include "src/heap/remembered-set.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
|
||||
} // namespace
|
||||
|
||||
class Sweeper::ConcurrentSweeper final {
|
||||
public:
|
||||
explicit ConcurrentSweeper(Sweeper* sweeper)
|
||||
: sweeper_(sweeper),
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
|
||||
local_pretenuring_feedback_(
|
||||
PretenturingHandler::kInitialFeedbackCapacity) {}
|
||||
|
||||
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate) {
|
||||
while (!delegate->ShouldYield()) {
|
||||
@ -45,13 +44,13 @@ class Sweeper::ConcurrentSweeper final {
|
||||
return false;
|
||||
}
|
||||
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback() {
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback() {
|
||||
return &local_pretenuring_feedback_;
|
||||
}
|
||||
|
||||
private:
|
||||
Sweeper* const sweeper_;
|
||||
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
};
|
||||
|
||||
class Sweeper::SweeperJob final : public JobTask {
|
||||
@ -119,7 +118,9 @@ Sweeper::Sweeper(Heap* heap)
|
||||
marking_state_(heap_->non_atomic_marking_state()),
|
||||
sweeping_in_progress_(false),
|
||||
should_reduce_memory_(false),
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
|
||||
pretenuring_handler_(heap_->pretenuring_handler()),
|
||||
local_pretenuring_feedback_(
|
||||
PretenturingHandler::kInitialFeedbackCapacity) {}
|
||||
|
||||
Sweeper::~Sweeper() {
|
||||
DCHECK(concurrent_sweepers_.empty());
|
||||
@ -239,9 +240,10 @@ void Sweeper::EnsureCompleted(SweepingMode sweeping_mode) {
|
||||
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
|
||||
});
|
||||
|
||||
heap_->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
|
||||
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
|
||||
local_pretenuring_feedback_);
|
||||
for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
|
||||
heap_->MergeAllocationSitePretenuringFeedback(
|
||||
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
|
||||
*concurrent_sweeper.local_pretenuring_feedback());
|
||||
}
|
||||
local_pretenuring_feedback_.clear();
|
||||
@ -351,7 +353,7 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(Page* page,
|
||||
int Sweeper::RawSweep(
|
||||
Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
|
||||
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback) {
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback) {
|
||||
Space* space = p->owner();
|
||||
DCHECK_NOT_NULL(space);
|
||||
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
|
||||
@ -441,7 +443,8 @@ int Sweeper::RawSweep(
|
||||
free_start = free_end + size;
|
||||
|
||||
if (p->InYoungGeneration()) {
|
||||
heap_->UpdateAllocationSite(map, object, local_pretenuring_feedback);
|
||||
pretenuring_handler_->UpdateAllocationSite(map, object,
|
||||
local_pretenuring_feedback);
|
||||
}
|
||||
|
||||
if (active_system_pages_after_sweeping) {
|
||||
@ -521,7 +524,7 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
|
||||
|
||||
int Sweeper::ParallelSweepPage(
|
||||
Page* page, AllocationSpace identity,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
|
||||
SweepingMode sweeping_mode) {
|
||||
DCHECK(IsValidSweepingSpace(identity));
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include "src/base/platform/semaphore.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/flags/flags.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/pretenuring-handler.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
#include "src/tasks/cancelable-task.h"
|
||||
|
||||
@ -91,14 +91,15 @@ class Sweeper {
|
||||
int required_freed_bytes, int max_pages = 0);
|
||||
int ParallelSweepPage(
|
||||
Page* page, AllocationSpace identity,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
|
||||
SweepingMode sweeping_mode);
|
||||
|
||||
void EnsurePageIsSwept(Page* page);
|
||||
|
||||
int RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
|
||||
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback);
|
||||
int RawSweep(
|
||||
Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
|
||||
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
|
||||
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback);
|
||||
|
||||
// After calling this function sweeping is considered to be in progress
|
||||
// and the main thread can sweep lazily, but the background sweeper tasks
|
||||
@ -200,7 +201,8 @@ class Sweeper {
|
||||
// path checks this flag to see whether it could support concurrent sweeping.
|
||||
std::atomic<bool> sweeping_in_progress_;
|
||||
bool should_reduce_memory_;
|
||||
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
PretenturingHandler* const pretenuring_handler_;
|
||||
PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
base::Optional<GarbageCollector> current_collector_;
|
||||
};
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "src/heap/factory-inl.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/pretenuring-handler-inl.h"
|
||||
#include "src/init/bootstrapper.h"
|
||||
#include "src/logging/counters.h"
|
||||
#include "src/logging/log.h"
|
||||
@ -5329,8 +5330,11 @@ bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
|
||||
DisallowGarbageCollection no_gc;
|
||||
|
||||
Heap* heap = object->GetHeap();
|
||||
PretenturingHandler* pretunring_handler = heap->pretenuring_handler();
|
||||
AllocationMemento memento =
|
||||
heap->FindAllocationMemento<Heap::kForRuntime>(object->map(), *object);
|
||||
pretunring_handler
|
||||
->FindAllocationMemento<PretenturingHandler::kForRuntime>(
|
||||
object->map(), *object);
|
||||
if (memento.is_null()) return false;
|
||||
|
||||
// Walk through to the Allocation Site
|
||||
|
@ -20,8 +20,8 @@
|
||||
#include "src/execution/isolate-inl.h"
|
||||
#include "src/execution/protectors-inl.h"
|
||||
#include "src/execution/tiering-manager.h"
|
||||
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
#include "src/heap/pretenuring-handler-inl.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/objects/js-collection-inl.h"
|
||||
#ifdef V8_ENABLE_MAGLEV
|
||||
@ -1379,12 +1379,15 @@ RUNTIME_FUNCTION(Runtime_PretenureAllocationSite) {
|
||||
return ReturnFuzzSafe(ReadOnlyRoots(isolate).false_value(), isolate);
|
||||
}
|
||||
|
||||
PretenturingHandler* pretenuring_handler = heap->pretenuring_handler();
|
||||
AllocationMemento memento =
|
||||
heap->FindAllocationMemento<Heap::kForRuntime>(object.map(), object);
|
||||
pretenuring_handler
|
||||
->FindAllocationMemento<PretenturingHandler::kForRuntime>(
|
||||
object.map(), object);
|
||||
if (memento.is_null())
|
||||
return ReturnFuzzSafe(ReadOnlyRoots(isolate).false_value(), isolate);
|
||||
AllocationSite site = memento.GetAllocationSite();
|
||||
heap->PretenureAllocationSiteOnNextCollection(site);
|
||||
pretenuring_handler->PretenureAllocationSiteOnNextCollection(site);
|
||||
return ReturnFuzzSafe(ReadOnlyRoots(isolate).true_value(), isolate);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user