[heap] Extend and rename NewSpace base class
NewSpace is renamed to SemiSpaceNewSpace and NewSpaceBase is renamed to NewSpace (the new PagedSpace new space implementation will be named PagedNewSpace). Most usecases are updated to use the base class rather than the concrete semi space based implementation. To that end, the base class is extended with additional virtual methods (for delegating to the concrete class). This CL follows these guidelines: (*) If at a method callsite we should know the exact new space implementation we use, we cast to the concrete class. This is the case for example for callsites in scavenger.*. (*) If a method is called from outside the heap implementation or should be present regardless of the concrete implementation, that method is made virtual. (*) Other cases are usually methods that are specific to a concrete implementation but the concrete implementation is not known at the callsite and there's no clear way to nicely abstract the method. In such cases we cast to the concrete SemiSpaceNewSpace implementation for now and we will revisit these cases once PagedNewSpace exists. Bug: v8:12612 Change-Id: I7b85626774ce0d785b0257bf8d32b9f50eeaf292 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3625975 Commit-Queue: Omer Katz <omerkatz@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/main@{#80482}
This commit is contained in:
parent
a680570338
commit
b415cd7c05
@ -323,13 +323,6 @@ Heap* Heap::FromWritableHeapObject(HeapObject obj) {
|
||||
return heap;
|
||||
}
|
||||
|
||||
bool Heap::ShouldBePromoted(Address old_address) {
|
||||
Page* page = Page::FromAddress(old_address);
|
||||
Address age_mark = new_space_->age_mark();
|
||||
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
|
||||
(!page->ContainsLimit(age_mark) || old_address < age_mark);
|
||||
}
|
||||
|
||||
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
|
||||
DCHECK(IsAligned(byte_size, kTaggedSize));
|
||||
CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
|
||||
|
@ -39,35 +39,37 @@ void HeapLayoutTracer::GCEpiloguePrintHeapLayout(v8::Isolate* isolate,
|
||||
|
||||
// static
|
||||
void HeapLayoutTracer::PrintBasicMemoryChunk(std::ostream& os,
|
||||
BasicMemoryChunk* chunk,
|
||||
const BasicMemoryChunk& chunk,
|
||||
const char* owner_name) {
|
||||
os << "{owner:" << owner_name << ","
|
||||
<< "address:" << chunk << ","
|
||||
<< "size:" << chunk->size() << ","
|
||||
<< "allocated_bytes:" << chunk->allocated_bytes() << ","
|
||||
<< "wasted_memory:" << chunk->wasted_memory() << "}" << std::endl;
|
||||
<< "address:" << &chunk << ","
|
||||
<< "size:" << chunk.size() << ","
|
||||
<< "allocated_bytes:" << chunk.allocated_bytes() << ","
|
||||
<< "wasted_memory:" << chunk.wasted_memory() << "}" << std::endl;
|
||||
}
|
||||
|
||||
// static
|
||||
void HeapLayoutTracer::PrintHeapLayout(std::ostream& os, Heap* heap) {
|
||||
for (PageIterator it = heap->new_space()->to_space().begin();
|
||||
it != heap->new_space()->to_space().end(); ++it) {
|
||||
PrintBasicMemoryChunk(os, *it, "to_space");
|
||||
const SemiSpaceNewSpace* semi_space_new_space =
|
||||
SemiSpaceNewSpace::From(heap->new_space());
|
||||
for (ConstPageIterator it = semi_space_new_space->to_space().begin();
|
||||
it != semi_space_new_space->to_space().end(); ++it) {
|
||||
PrintBasicMemoryChunk(os, **it, "to_space");
|
||||
}
|
||||
|
||||
for (PageIterator it = heap->new_space()->from_space().begin();
|
||||
it != heap->new_space()->from_space().end(); ++it) {
|
||||
PrintBasicMemoryChunk(os, *it, "from_space");
|
||||
for (ConstPageIterator it = semi_space_new_space->from_space().begin();
|
||||
it != semi_space_new_space->from_space().end(); ++it) {
|
||||
PrintBasicMemoryChunk(os, **it, "from_space");
|
||||
}
|
||||
|
||||
OldGenerationMemoryChunkIterator it(heap);
|
||||
MemoryChunk* chunk;
|
||||
while ((chunk = it.next()) != nullptr) {
|
||||
PrintBasicMemoryChunk(os, chunk, chunk->owner()->name());
|
||||
PrintBasicMemoryChunk(os, *chunk, chunk->owner()->name());
|
||||
}
|
||||
|
||||
for (ReadOnlyPage* page : heap->read_only_space()->pages()) {
|
||||
PrintBasicMemoryChunk(os, page, "ro_space");
|
||||
PrintBasicMemoryChunk(os, *page, "ro_space");
|
||||
}
|
||||
}
|
||||
} // namespace internal
|
||||
|
@ -24,7 +24,8 @@ class HeapLayoutTracer : AllStatic {
|
||||
v8::GCCallbackFlags flags, void* data);
|
||||
|
||||
private:
|
||||
static void PrintBasicMemoryChunk(std::ostream& os, BasicMemoryChunk* chunk,
|
||||
static void PrintBasicMemoryChunk(std::ostream& os,
|
||||
const BasicMemoryChunk& chunk,
|
||||
const char* owner_name);
|
||||
static void PrintHeapLayout(std::ostream& os, Heap* heap);
|
||||
};
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include "src/heap/gc-idle-time-handler.h"
|
||||
#include "src/heap/gc-tracer-inl.h"
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/heap-allocator.h"
|
||||
#include "src/heap/heap-controller.h"
|
||||
#include "src/heap/heap-layout-tracer.h"
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
@ -1015,7 +1016,10 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
|
||||
deferred_counters_[feature]++;
|
||||
}
|
||||
|
||||
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
|
||||
void Heap::UncommitFromSpace() {
|
||||
DCHECK_NOT_NULL(new_space_);
|
||||
SemiSpaceNewSpace::From(new_space_)->UncommitFromSpace();
|
||||
}
|
||||
|
||||
void Heap::GarbageCollectionPrologue(
|
||||
GarbageCollectionReason gc_reason,
|
||||
@ -2159,11 +2163,7 @@ void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
|
||||
|
||||
void Heap::EnsureFromSpaceIsCommitted() {
|
||||
if (!new_space_) return;
|
||||
if (new_space_->CommitFromSpaceIfNeeded()) return;
|
||||
|
||||
// Committing memory to from space failed.
|
||||
// Memory is exhausted and we will die.
|
||||
FatalProcessOutOfMemory("Committing semi space failed.");
|
||||
SemiSpaceNewSpace::From(new_space_)->CommitFromSpaceIfNeeded();
|
||||
}
|
||||
|
||||
bool Heap::CollectionRequested() {
|
||||
@ -2699,22 +2699,25 @@ void Heap::EvacuateYoungGeneration() {
|
||||
DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
|
||||
}
|
||||
|
||||
SemiSpaceNewSpace* semi_space_new_space =
|
||||
SemiSpaceNewSpace::From(new_space());
|
||||
// Move pages from new->old generation.
|
||||
PageRange range(new_space()->first_allocatable_address(), new_space()->top());
|
||||
PageRange range(semi_space_new_space->first_allocatable_address(),
|
||||
semi_space_new_space->top());
|
||||
for (auto it = range.begin(); it != range.end();) {
|
||||
Page* p = (*++it)->prev_page();
|
||||
new_space()->from_space().RemovePage(p);
|
||||
semi_space_new_space->from_space().RemovePage(p);
|
||||
Page::ConvertNewToOld(p);
|
||||
if (incremental_marking()->IsMarking())
|
||||
mark_compact_collector()->RecordLiveSlotsOnPage(p);
|
||||
}
|
||||
|
||||
// Reset new space.
|
||||
if (!new_space()->Rebalance()) {
|
||||
if (!semi_space_new_space->Rebalance()) {
|
||||
FatalProcessOutOfMemory("NewSpace::Rebalance");
|
||||
}
|
||||
new_space()->ResetLinearAllocationArea();
|
||||
new_space()->set_age_mark(new_space()->top());
|
||||
semi_space_new_space->ResetLinearAllocationArea();
|
||||
semi_space_new_space->set_age_mark(semi_space_new_space->top());
|
||||
|
||||
for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
|
||||
LargePage* page = *it;
|
||||
@ -2783,8 +2786,12 @@ void Heap::Scavenge() {
|
||||
|
||||
// Flip the semispaces. After flipping, to space is empty, from space has
|
||||
// live objects.
|
||||
new_space()->Flip();
|
||||
new_space()->ResetLinearAllocationArea();
|
||||
{
|
||||
SemiSpaceNewSpace* semi_space_new_space =
|
||||
SemiSpaceNewSpace::From(new_space());
|
||||
semi_space_new_space->Flip();
|
||||
semi_space_new_space->ResetLinearAllocationArea();
|
||||
}
|
||||
|
||||
// We also flip the young generation large object space. All large objects
|
||||
// will be in the from space.
|
||||
@ -3232,7 +3239,7 @@ void* Heap::AllocateExternalBackingStore(
|
||||
const std::function<void*(size_t)>& allocate, size_t byte_length) {
|
||||
if (!always_allocate() && new_space()) {
|
||||
size_t new_space_backing_store_bytes =
|
||||
new_space()->ExternalBackingStoreBytes();
|
||||
new_space()->ExternalBackingStoreOverallBytes();
|
||||
if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
|
||||
new_space_backing_store_bytes >= byte_length) {
|
||||
// Performing a young generation GC amortizes over the allocated backing
|
||||
@ -4853,8 +4860,11 @@ void Heap::VerifyCommittedPhysicalMemory() {
|
||||
#endif // DEBUG
|
||||
|
||||
void Heap::ZapFromSpace() {
|
||||
if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
|
||||
for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
|
||||
if (!new_space_) return;
|
||||
SemiSpaceNewSpace* semi_space_new_space = SemiSpaceNewSpace::From(new_space_);
|
||||
if (!semi_space_new_space->IsFromSpaceCommitted()) return;
|
||||
for (Page* page :
|
||||
PageRange(semi_space_new_space->from_space().first_page(), nullptr)) {
|
||||
memory_allocator()->ZapBlock(page->area_start(),
|
||||
page->HighWaterMark() - page->area_start(),
|
||||
ZapValue());
|
||||
@ -5831,7 +5841,7 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
|
||||
DCHECK_NOT_NULL(read_only_space_);
|
||||
const bool has_young_gen = !FLAG_single_generation && !IsShared();
|
||||
if (has_young_gen) {
|
||||
space_[NEW_SPACE] = new_space_ = new NewSpace(
|
||||
space_[NEW_SPACE] = new_space_ = new SemiSpaceNewSpace(
|
||||
this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
|
||||
max_semi_space_size_, new_allocation_info);
|
||||
space_[NEW_LO_SPACE] = new_lo_space_ =
|
||||
|
@ -737,10 +737,6 @@ class Heap {
|
||||
|
||||
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
|
||||
|
||||
// An object should be promoted if the object has survived a
|
||||
// scavenge operation.
|
||||
inline bool ShouldBePromoted(Address old_address);
|
||||
|
||||
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
|
||||
|
||||
inline int NextScriptId();
|
||||
@ -1818,7 +1814,7 @@ class Heap {
|
||||
void EnsureFromSpaceIsCommitted();
|
||||
|
||||
// Uncommit unused semi space.
|
||||
V8_EXPORT_PRIVATE bool UncommitFromSpace();
|
||||
V8_EXPORT_PRIVATE void UncommitFromSpace();
|
||||
|
||||
// Fill in bogus values in from space
|
||||
void ZapFromSpace();
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/memory-measurement-inl.h"
|
||||
#include "src/heap/memory-measurement.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/heap/object-stats.h"
|
||||
#include "src/heap/objects-visiting-inl.h"
|
||||
#include "src/heap/parallel-work-item.h"
|
||||
@ -1860,7 +1861,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (heap_->ShouldBePromoted(object.address()) &&
|
||||
if (heap_->new_space()->ShouldBePromoted(object.address()) &&
|
||||
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
|
||||
promoted_size_ += size;
|
||||
return true;
|
||||
@ -1948,11 +1949,14 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
|
||||
static void Move(Page* page) {
|
||||
switch (mode) {
|
||||
case NEW_TO_NEW:
|
||||
page->heap()->new_space()->MovePageFromSpaceToSpace(page);
|
||||
SemiSpaceNewSpace::From(page->heap()->new_space())
|
||||
->MovePageFromSpaceToSpace(page);
|
||||
page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
|
||||
break;
|
||||
case NEW_TO_OLD: {
|
||||
page->heap()->new_space()->from_space().RemovePage(page);
|
||||
SemiSpaceNewSpace::From(page->heap()->new_space())
|
||||
->from_space()
|
||||
.RemovePage(page);
|
||||
Page* new_page = Page::ConvertNewToOld(page);
|
||||
DCHECK(!new_page->InYoungGeneration());
|
||||
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
|
||||
@ -3616,7 +3620,7 @@ void MarkCompactCollector::EvacuatePrologue() {
|
||||
PageRange(new_space->first_allocatable_address(), new_space->top())) {
|
||||
new_space_evacuation_pages_.push_back(p);
|
||||
}
|
||||
new_space->Flip();
|
||||
SemiSpaceNewSpace::From(new_space)->Flip();
|
||||
new_space->ResetLinearAllocationArea();
|
||||
|
||||
DCHECK_EQ(new_space->Size(), 0);
|
||||
@ -3640,7 +3644,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
|
||||
|
||||
// New space.
|
||||
if (heap()->new_space()) {
|
||||
heap()->new_space()->set_age_mark(heap()->new_space()->top());
|
||||
SemiSpaceNewSpace::From(heap()->new_space())
|
||||
->set_age_mark(heap()->new_space()->top());
|
||||
DCHECK_EQ(0, heap()->new_space()->Size());
|
||||
}
|
||||
|
||||
@ -3814,7 +3819,8 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
|
||||
chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
|
||||
chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
|
||||
chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
|
||||
chunk->Contains(heap()->new_space()->age_mark()),
|
||||
chunk->Contains(
|
||||
SemiSpaceNewSpace::From(heap()->new_space())->age_mark()),
|
||||
saved_live_bytes, evacuation_time,
|
||||
chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
|
||||
}
|
||||
@ -4030,7 +4036,8 @@ bool ShouldMovePage(Page* p, intptr_t live_bytes,
|
||||
AlwaysPromoteYoung always_promote_young) {
|
||||
Heap* heap = p->heap();
|
||||
const bool reduce_memory = heap->ShouldReduceMemory();
|
||||
const Address age_mark = heap->new_space()->age_mark();
|
||||
const Address age_mark =
|
||||
SemiSpaceNewSpace::From(heap->new_space())->age_mark();
|
||||
return !reduce_memory && !p->NeverEvacuate() &&
|
||||
(live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
|
||||
(always_promote_young == AlwaysPromoteYoung::kYes ||
|
||||
@ -4270,7 +4277,7 @@ void MarkCompactCollector::Evacuate() {
|
||||
|
||||
if (heap()->new_space()) {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
|
||||
if (!heap()->new_space()->Rebalance()) {
|
||||
if (!SemiSpaceNewSpace::From(heap()->new_space())->Rebalance()) {
|
||||
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
|
||||
}
|
||||
}
|
||||
@ -5543,8 +5550,10 @@ void MinorMarkCompactCollector::CollectGarbage() {
|
||||
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
|
||||
for (Page* p :
|
||||
PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
|
||||
for (Page* p : PageRange(SemiSpaceNewSpace::From(heap()->new_space())
|
||||
->from_space()
|
||||
.first_page(),
|
||||
nullptr)) {
|
||||
DCHECK_EQ(promoted_pages_.end(),
|
||||
std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
|
||||
non_atomic_marking_state()->ClearLiveness(p);
|
||||
@ -5703,7 +5712,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
|
||||
new_space_evacuation_pages_.push_back(p);
|
||||
}
|
||||
|
||||
new_space->Flip();
|
||||
SemiSpaceNewSpace::From(new_space)->Flip();
|
||||
new_space->ResetLinearAllocationArea();
|
||||
|
||||
heap()->new_lo_space()->Flip();
|
||||
@ -5711,7 +5720,8 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
|
||||
}
|
||||
|
||||
void MinorMarkCompactCollector::EvacuateEpilogue() {
|
||||
heap()->new_space()->set_age_mark(heap()->new_space()->top());
|
||||
SemiSpaceNewSpace::From(heap()->new_space())
|
||||
->set_age_mark(heap()->new_space()->top());
|
||||
// Give pages that are queued to be freed back to the OS.
|
||||
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
}
|
||||
@ -6096,7 +6106,7 @@ void MinorMarkCompactCollector::Evacuate() {
|
||||
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
|
||||
if (!heap()->new_space()->Rebalance()) {
|
||||
if (!SemiSpaceNewSpace::From(heap()->new_space())->Rebalance()) {
|
||||
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
|
||||
}
|
||||
}
|
||||
|
@ -38,35 +38,28 @@ bool SemiSpace::ContainsSlow(Address a) const {
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// NewSpaceBase
|
||||
// NewSpace
|
||||
|
||||
bool NewSpaceBase::Contains(Object o) const {
|
||||
bool NewSpace::Contains(Object o) const {
|
||||
return o.IsHeapObject() && Contains(HeapObject::cast(o));
|
||||
}
|
||||
|
||||
bool NewSpaceBase::Contains(HeapObject o) const {
|
||||
bool NewSpace::Contains(HeapObject o) const {
|
||||
return BasicMemoryChunk::FromHeapObject(o)->InNewSpace();
|
||||
}
|
||||
|
||||
V8_WARN_UNUSED_RESULT inline AllocationResult
|
||||
NewSpaceBase::AllocateRawSynchronized(int size_in_bytes,
|
||||
AllocationAlignment alignment,
|
||||
AllocationOrigin origin) {
|
||||
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
|
||||
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return AllocateRaw(size_in_bytes, alignment, origin);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// NewSpace
|
||||
// SemiSpaceNewSpace
|
||||
|
||||
bool NewSpace::ContainsSlow(Address a) const {
|
||||
return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
|
||||
}
|
||||
|
||||
V8_INLINE bool NewSpace::EnsureAllocation(int size_in_bytes,
|
||||
AllocationAlignment alignment,
|
||||
AllocationOrigin origin,
|
||||
int* out_max_aligned_size) {
|
||||
V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
|
||||
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
|
||||
int* out_max_aligned_size) {
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
#if DEBUG
|
||||
VerifyTop();
|
||||
|
@ -143,7 +143,7 @@ bool SemiSpace::Commit() {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SemiSpace::Uncommit() {
|
||||
void SemiSpace::Uncommit() {
|
||||
DCHECK(IsCommitted());
|
||||
int actual_pages = 0;
|
||||
while (!memory_chunk_list_.Empty()) {
|
||||
@ -163,7 +163,6 @@ bool SemiSpace::Uncommit() {
|
||||
AccountUncommitted(removed_page_size);
|
||||
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
DCHECK(!IsCommitted());
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t SemiSpace::CommittedPhysicalMemory() const {
|
||||
@ -351,7 +350,7 @@ void SemiSpace::set_age_mark(Address mark) {
|
||||
}
|
||||
|
||||
std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
|
||||
// Use the NewSpace::NewObjectIterator to iterate the ToSpace.
|
||||
// Use the SemiSpaceNewSpace::NewObjectIterator to iterate the ToSpace.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
@ -434,7 +433,8 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
|
||||
// -----------------------------------------------------------------------------
|
||||
// SemiSpaceObjectIterator implementation.
|
||||
|
||||
SemiSpaceObjectIterator::SemiSpaceObjectIterator(const NewSpace* space) {
|
||||
SemiSpaceObjectIterator::SemiSpaceObjectIterator(
|
||||
const SemiSpaceNewSpace* space) {
|
||||
Initialize(space->first_allocatable_address(), space->top());
|
||||
}
|
||||
|
||||
@ -445,16 +445,16 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// NewSpaceBase implementation
|
||||
// NewSpace implementation
|
||||
|
||||
NewSpaceBase::NewSpaceBase(Heap* heap, LinearAllocationArea* allocation_info)
|
||||
NewSpace::NewSpace(Heap* heap, LinearAllocationArea* allocation_info)
|
||||
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info) {}
|
||||
|
||||
void NewSpaceBase::ResetParkedAllocationBuffers() {
|
||||
void NewSpace::ResetParkedAllocationBuffers() {
|
||||
parked_allocation_buffers_.clear();
|
||||
}
|
||||
|
||||
void NewSpaceBase::MaybeFreeUnusedLab(LinearAllocationArea info) {
|
||||
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
|
||||
if (allocation_info_->MergeIfAdjacent(info)) {
|
||||
original_top_.store(allocation_info_->top(), std::memory_order_release);
|
||||
}
|
||||
@ -464,11 +464,7 @@ void NewSpaceBase::MaybeFreeUnusedLab(LinearAllocationArea info) {
|
||||
#endif
|
||||
}
|
||||
|
||||
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
|
||||
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
|
||||
}
|
||||
|
||||
void NewSpaceBase::MakeLinearAllocationAreaIterable() {
|
||||
void NewSpace::MakeLinearAllocationAreaIterable() {
|
||||
Address to_top = top();
|
||||
Page* page = Page::FromAddress(to_top - kTaggedSize);
|
||||
if (page->Contains(to_top)) {
|
||||
@ -477,13 +473,13 @@ void NewSpaceBase::MakeLinearAllocationAreaIterable() {
|
||||
}
|
||||
}
|
||||
|
||||
void NewSpaceBase::FreeLinearAllocationArea() {
|
||||
void NewSpace::FreeLinearAllocationArea() {
|
||||
MakeLinearAllocationAreaIterable();
|
||||
UpdateInlineAllocationLimit(0);
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
void NewSpaceBase::VerifyTop() const {
|
||||
void NewSpace::VerifyTop() const {
|
||||
SpaceWithLinearArea::VerifyTop();
|
||||
|
||||
// Ensure that original_top_ always >= LAB start. The delta between start_
|
||||
@ -496,13 +492,14 @@ void NewSpaceBase::VerifyTop() const {
|
||||
#endif // DEBUG
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// NewSpace implementation
|
||||
// SemiSpaceNewSpace implementation
|
||||
|
||||
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
|
||||
size_t initial_semispace_capacity,
|
||||
size_t max_semispace_capacity,
|
||||
LinearAllocationArea* allocation_info)
|
||||
: NewSpaceBase(heap, allocation_info),
|
||||
SemiSpaceNewSpace::SemiSpaceNewSpace(Heap* heap,
|
||||
v8::PageAllocator* page_allocator,
|
||||
size_t initial_semispace_capacity,
|
||||
size_t max_semispace_capacity,
|
||||
LinearAllocationArea* allocation_info)
|
||||
: NewSpace(heap, allocation_info),
|
||||
to_space_(heap, kToSpace),
|
||||
from_space_(heap, kFromSpace) {
|
||||
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
|
||||
@ -516,7 +513,7 @@ NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
|
||||
ResetLinearAllocationArea();
|
||||
}
|
||||
|
||||
NewSpace::~NewSpace() {
|
||||
SemiSpaceNewSpace::~SemiSpaceNewSpace() {
|
||||
// Tears down the space. Heap memory was not allocated by the space, so it
|
||||
// is not deallocated here.
|
||||
allocation_info_->Reset(kNullAddress, kNullAddress);
|
||||
@ -525,9 +522,9 @@ NewSpace::~NewSpace() {
|
||||
from_space_.TearDown();
|
||||
}
|
||||
|
||||
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
|
||||
void SemiSpaceNewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
|
||||
|
||||
void NewSpace::Grow() {
|
||||
void SemiSpaceNewSpace::Grow() {
|
||||
heap()->safepoint()->AssertActive();
|
||||
// Double the semispace size but only up to maximum capacity.
|
||||
DCHECK(TotalCapacity() < MaximumCapacity());
|
||||
@ -545,7 +542,7 @@ void NewSpace::Grow() {
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
}
|
||||
|
||||
void NewSpace::Shrink() {
|
||||
void SemiSpaceNewSpace::Shrink() {
|
||||
size_t new_capacity = std::max(InitialTotalCapacity(), 2 * Size());
|
||||
size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
|
||||
if (rounded_new_capacity < TotalCapacity()) {
|
||||
@ -557,7 +554,7 @@ void NewSpace::Shrink() {
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
}
|
||||
|
||||
size_t NewSpace::CommittedPhysicalMemory() const {
|
||||
size_t SemiSpaceNewSpace::CommittedPhysicalMemory() const {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
|
||||
size_t size = to_space_.CommittedPhysicalMemory();
|
||||
@ -567,13 +564,13 @@ size_t NewSpace::CommittedPhysicalMemory() const {
|
||||
return size;
|
||||
}
|
||||
|
||||
bool NewSpace::Rebalance() {
|
||||
bool SemiSpaceNewSpace::Rebalance() {
|
||||
// Order here is important to make use of the page pool.
|
||||
return to_space_.EnsureCurrentCapacity() &&
|
||||
from_space_.EnsureCurrentCapacity();
|
||||
}
|
||||
|
||||
void NewSpace::UpdateLinearAllocationArea(Address known_top) {
|
||||
void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) {
|
||||
AdvanceAllocationObservers();
|
||||
|
||||
Address new_top = known_top == 0 ? to_space_.page_low() : known_top;
|
||||
@ -593,7 +590,7 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
|
||||
UpdateInlineAllocationLimit(0);
|
||||
}
|
||||
|
||||
void NewSpace::ResetLinearAllocationArea() {
|
||||
void SemiSpaceNewSpace::ResetLinearAllocationArea() {
|
||||
to_space_.Reset();
|
||||
UpdateLinearAllocationArea();
|
||||
// Clear all mark-bits in the to-space.
|
||||
@ -606,7 +603,7 @@ void NewSpace::ResetLinearAllocationArea() {
|
||||
}
|
||||
}
|
||||
|
||||
void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
|
||||
void SemiSpaceNewSpace::UpdateInlineAllocationLimit(size_t min_size) {
|
||||
Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
|
||||
DCHECK_LE(top(), new_limit);
|
||||
DCHECK_LE(new_limit, to_space_.page_high());
|
||||
@ -618,7 +615,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
|
||||
#endif
|
||||
}
|
||||
|
||||
bool NewSpace::AddFreshPage() {
|
||||
bool SemiSpaceNewSpace::AddFreshPage() {
|
||||
Address top = allocation_info_->top();
|
||||
DCHECK(!OldSpace::IsAtPageStart(top));
|
||||
|
||||
@ -644,13 +641,8 @@ bool NewSpace::AddFreshPage() {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool NewSpace::AddFreshPageSynchronized() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return AddFreshPage();
|
||||
}
|
||||
|
||||
bool NewSpace::AddParkedAllocationBuffer(int size_in_bytes,
|
||||
AllocationAlignment alignment) {
|
||||
bool SemiSpaceNewSpace::AddParkedAllocationBuffer(
|
||||
int size_in_bytes, AllocationAlignment alignment) {
|
||||
int parked_size = 0;
|
||||
Address start = 0;
|
||||
for (auto it = parked_allocation_buffers_.begin();
|
||||
@ -674,8 +666,8 @@ bool NewSpace::AddParkedAllocationBuffer(int size_in_bytes,
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
void NewSpace::VerifyTop() const {
|
||||
NewSpaceBase::VerifyTop();
|
||||
void SemiSpaceNewSpace::VerifyTop() const {
|
||||
NewSpace::VerifyTop();
|
||||
|
||||
// Ensure that original_top_ always >= LAB start. The delta between start_
|
||||
// and top_ is still to be processed by allocation observers.
|
||||
@ -690,7 +682,7 @@ void NewSpace::VerifyTop() const {
|
||||
#ifdef VERIFY_HEAP
|
||||
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
|
||||
// that it works (it depends on the invariants we are checking).
|
||||
void NewSpace::Verify(Isolate* isolate) const {
|
||||
void SemiSpaceNewSpace::Verify(Isolate* isolate) const {
|
||||
// The allocation pointer should be in the space or at the very end.
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
|
||||
@ -774,5 +766,49 @@ void NewSpace::Verify(Isolate* isolate) const {
|
||||
}
|
||||
#endif
|
||||
|
||||
bool SemiSpaceNewSpace::ShouldBePromoted(Address address) const {
|
||||
Page* page = Page::FromAddress(address);
|
||||
Address current_age_mark = age_mark();
|
||||
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
|
||||
(!page->ContainsLimit(current_age_mark) || address < current_age_mark);
|
||||
}
|
||||
|
||||
std::unique_ptr<ObjectIterator> SemiSpaceNewSpace::GetObjectIterator(
|
||||
Heap* heap) {
|
||||
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
|
||||
}
|
||||
|
||||
bool SemiSpaceNewSpace::ContainsSlow(Address a) const {
|
||||
return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
|
||||
}
|
||||
|
||||
size_t SemiSpaceNewSpace::AllocatedSinceLastGC() const {
|
||||
const Address age_mark = to_space_.age_mark();
|
||||
DCHECK_NE(age_mark, kNullAddress);
|
||||
DCHECK_NE(top(), kNullAddress);
|
||||
Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
|
||||
Page* const last_page = Page::FromAllocationAreaAddress(top());
|
||||
Page* current_page = age_mark_page;
|
||||
size_t allocated = 0;
|
||||
if (current_page != last_page) {
|
||||
DCHECK_EQ(current_page, age_mark_page);
|
||||
DCHECK_GE(age_mark_page->area_end(), age_mark);
|
||||
allocated += age_mark_page->area_end() - age_mark;
|
||||
current_page = current_page->next_page();
|
||||
} else {
|
||||
DCHECK_GE(top(), age_mark);
|
||||
return top() - age_mark;
|
||||
}
|
||||
while (current_page != last_page) {
|
||||
DCHECK_NE(current_page, age_mark_page);
|
||||
allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
|
||||
current_page = current_page->next_page();
|
||||
}
|
||||
DCHECK_GE(top(), current_page->area_start());
|
||||
allocated += top() - current_page->area_start();
|
||||
DCHECK_LE(allocated, Size());
|
||||
return allocated;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -21,6 +21,7 @@ namespace internal {
|
||||
|
||||
class Heap;
|
||||
class MemoryChunk;
|
||||
class SemiSpaceNewSpace;
|
||||
|
||||
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
|
||||
|
||||
@ -33,7 +34,7 @@ using ParkedAllocationBuffersVector = std::vector<ParkedAllocationBuffer>;
|
||||
// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
|
||||
// The mark-compact collector uses the memory of the first page in the from
|
||||
// space as a marking stack when tracing live objects.
|
||||
class SemiSpace : public Space {
|
||||
class SemiSpace final : public Space {
|
||||
public:
|
||||
using iterator = PageIterator;
|
||||
using const_iterator = ConstPageIterator;
|
||||
@ -58,7 +59,7 @@ class SemiSpace : public Space {
|
||||
void TearDown();
|
||||
|
||||
bool Commit();
|
||||
bool Uncommit();
|
||||
void Uncommit();
|
||||
bool IsCommitted() const { return !memory_chunk_list_.Empty(); }
|
||||
|
||||
// Grow the semispace to the new capacity. The new capacity requested must
|
||||
@ -106,7 +107,7 @@ class SemiSpace : public Space {
|
||||
void PrependPage(Page* page);
|
||||
void MovePageToTheEnd(Page* page);
|
||||
|
||||
Page* InitializePage(MemoryChunk* chunk) override;
|
||||
Page* InitializePage(MemoryChunk* chunk) final;
|
||||
|
||||
// Age mark accessors.
|
||||
Address age_mark() const { return age_mark_; }
|
||||
@ -127,28 +128,28 @@ class SemiSpace : public Space {
|
||||
SemiSpaceId id() const { return id_; }
|
||||
|
||||
// Approximate amount of physical memory committed for this space.
|
||||
size_t CommittedPhysicalMemory() const override;
|
||||
size_t CommittedPhysicalMemory() const final;
|
||||
|
||||
// If we don't have these here then SemiSpace will be abstract. However
|
||||
// they should never be called:
|
||||
|
||||
size_t Size() const override { UNREACHABLE(); }
|
||||
size_t Size() const final { UNREACHABLE(); }
|
||||
|
||||
size_t SizeOfObjects() const override { return Size(); }
|
||||
size_t SizeOfObjects() const final { return Size(); }
|
||||
|
||||
size_t Available() const override { UNREACHABLE(); }
|
||||
size_t Available() const final { UNREACHABLE(); }
|
||||
|
||||
Page* first_page() override {
|
||||
Page* first_page() final {
|
||||
return reinterpret_cast<Page*>(memory_chunk_list_.front());
|
||||
}
|
||||
Page* last_page() override {
|
||||
Page* last_page() final {
|
||||
return reinterpret_cast<Page*>(memory_chunk_list_.back());
|
||||
}
|
||||
|
||||
const Page* first_page() const override {
|
||||
const Page* first_page() const final {
|
||||
return reinterpret_cast<const Page*>(memory_chunk_list_.front());
|
||||
}
|
||||
const Page* last_page() const override {
|
||||
const Page* last_page() const final {
|
||||
return reinterpret_cast<const Page*>(memory_chunk_list_.back());
|
||||
}
|
||||
|
||||
@ -158,10 +159,10 @@ class SemiSpace : public Space {
|
||||
const_iterator begin() const { return const_iterator(first_page()); }
|
||||
const_iterator end() const { return const_iterator(nullptr); }
|
||||
|
||||
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
|
||||
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) final;
|
||||
|
||||
#ifdef DEBUG
|
||||
V8_EXPORT_PRIVATE void Print() override;
|
||||
V8_EXPORT_PRIVATE void Print() final;
|
||||
// Validate a range of of addresses in a SemiSpace.
|
||||
// The "from" address must be on a page prior to the "to" address,
|
||||
// in the linked page order, or it must be earlier on the same page.
|
||||
@ -208,7 +209,7 @@ class SemiSpace : public Space {
|
||||
|
||||
Page* current_page_;
|
||||
|
||||
friend class NewSpace;
|
||||
friend class SemiSpaceNewSpace;
|
||||
friend class SemiSpaceObjectIterator;
|
||||
};
|
||||
|
||||
@ -220,9 +221,9 @@ class SemiSpace : public Space {
|
||||
class SemiSpaceObjectIterator : public ObjectIterator {
|
||||
public:
|
||||
// Create an iterator over the allocated objects in the given to-space.
|
||||
explicit SemiSpaceObjectIterator(const NewSpace* space);
|
||||
explicit SemiSpaceObjectIterator(const SemiSpaceNewSpace* space);
|
||||
|
||||
inline HeapObject Next() override;
|
||||
inline HeapObject Next() final;
|
||||
|
||||
private:
|
||||
void Initialize(Address start, Address end);
|
||||
@ -233,15 +234,16 @@ class SemiSpaceObjectIterator : public ObjectIterator {
|
||||
Address limit_;
|
||||
};
|
||||
|
||||
class NewSpaceBase : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
|
||||
class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
|
||||
public:
|
||||
using iterator = PageIterator;
|
||||
using const_iterator = ConstPageIterator;
|
||||
|
||||
NewSpaceBase(Heap* heap, LinearAllocationArea* allocation_info);
|
||||
NewSpace(Heap* heap, LinearAllocationArea* allocation_info);
|
||||
|
||||
inline bool Contains(Object o) const;
|
||||
inline bool Contains(HeapObject o) const;
|
||||
virtual bool ContainsSlow(Address a) const = 0;
|
||||
|
||||
void ResetParkedAllocationBuffers();
|
||||
|
||||
@ -277,7 +279,49 @@ class NewSpaceBase : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
|
||||
void MakeLinearAllocationAreaIterable();
|
||||
|
||||
// Creates a filler object in the linear allocation area and closes it.
|
||||
void FreeLinearAllocationArea() override;
|
||||
void FreeLinearAllocationArea() final;
|
||||
|
||||
bool IsAtMaximumCapacity() const {
|
||||
return TotalCapacity() == MaximumCapacity();
|
||||
}
|
||||
|
||||
size_t ExternalBackingStoreOverallBytes() const {
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
|
||||
result +=
|
||||
ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
virtual size_t Capacity() const = 0;
|
||||
virtual size_t TotalCapacity() const = 0;
|
||||
virtual size_t MaximumCapacity() const = 0;
|
||||
virtual size_t AllocatedSinceLastGC() const = 0;
|
||||
|
||||
// Grow the capacity of the space.
|
||||
virtual void Grow() = 0;
|
||||
|
||||
// Shrink the capacity of the space.
|
||||
virtual void Shrink() = 0;
|
||||
|
||||
virtual bool ShouldBePromoted(Address) const = 0;
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
virtual void Verify(Isolate* isolate) const = 0;
|
||||
#endif
|
||||
|
||||
virtual iterator begin() = 0;
|
||||
virtual iterator end() = 0;
|
||||
|
||||
virtual const_iterator begin() const = 0;
|
||||
virtual const_iterator end() const = 0;
|
||||
|
||||
virtual Address first_allocatable_address() const = 0;
|
||||
|
||||
virtual void ResetLinearAllocationArea() = 0;
|
||||
|
||||
virtual bool AddFreshPage() = 0;
|
||||
|
||||
protected:
|
||||
static const int kAllocationBufferParkingThreshold = 4 * KB;
|
||||
@ -295,7 +339,7 @@ class NewSpaceBase : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
|
||||
|
||||
ParkedAllocationBuffersVector parked_allocation_buffers_;
|
||||
|
||||
bool SupportsAllocationObserver() const override { return true; }
|
||||
bool SupportsAllocationObserver() const final { return true; }
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
@ -304,25 +348,30 @@ class NewSpaceBase : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
|
||||
// The new space consists of a contiguous pair of semispaces. It simply
|
||||
// forwards most functions to the appropriate semispace.
|
||||
|
||||
class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
|
||||
class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
|
||||
public:
|
||||
NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
|
||||
size_t initial_semispace_capacity, size_t max_semispace_capacity,
|
||||
LinearAllocationArea* allocation_info);
|
||||
static SemiSpaceNewSpace* From(NewSpace* space) {
|
||||
return static_cast<SemiSpaceNewSpace*>(space);
|
||||
}
|
||||
|
||||
~NewSpace() override;
|
||||
SemiSpaceNewSpace(Heap* heap, v8::PageAllocator* page_allocator,
|
||||
size_t initial_semispace_capacity,
|
||||
size_t max_semispace_capacity,
|
||||
LinearAllocationArea* allocation_info);
|
||||
|
||||
inline bool ContainsSlow(Address a) const;
|
||||
~SemiSpaceNewSpace() final;
|
||||
|
||||
bool ContainsSlow(Address a) const final;
|
||||
|
||||
// Flip the pair of spaces.
|
||||
void Flip();
|
||||
|
||||
// Grow the capacity of the semispaces. Assumes that they are not at
|
||||
// their maximum capacity.
|
||||
void Grow();
|
||||
void Grow() final;
|
||||
|
||||
// Shrink the capacity of the semispaces.
|
||||
void Shrink();
|
||||
void Shrink() final;
|
||||
|
||||
// Return the allocated bytes in the active semispace.
|
||||
size_t Size() const final {
|
||||
@ -335,7 +384,7 @@ class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
|
||||
size_t SizeOfObjects() const final { return Size(); }
|
||||
|
||||
// Return the allocatable capacity of a semispace.
|
||||
size_t Capacity() const {
|
||||
size_t Capacity() const final {
|
||||
SLOW_DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
|
||||
return (to_space_.target_capacity() / Page::kPageSize) *
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage();
|
||||
@ -343,7 +392,7 @@ class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
|
||||
|
||||
// Return the current size of a semispace, allocatable and non-allocatable
|
||||
// memory.
|
||||
size_t TotalCapacity() const {
|
||||
size_t TotalCapacity() const final {
|
||||
DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
|
||||
return to_space_.target_capacity();
|
||||
}
|
||||
@ -375,42 +424,7 @@ class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
|
||||
return to_space_.ExternalBackingStoreBytes(type);
|
||||
}
|
||||
|
||||
size_t ExternalBackingStoreBytes() const {
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
|
||||
result +=
|
||||
ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t AllocatedSinceLastGC() const {
|
||||
const Address age_mark = to_space_.age_mark();
|
||||
DCHECK_NE(age_mark, kNullAddress);
|
||||
DCHECK_NE(top(), kNullAddress);
|
||||
Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
|
||||
Page* const last_page = Page::FromAllocationAreaAddress(top());
|
||||
Page* current_page = age_mark_page;
|
||||
size_t allocated = 0;
|
||||
if (current_page != last_page) {
|
||||
DCHECK_EQ(current_page, age_mark_page);
|
||||
DCHECK_GE(age_mark_page->area_end(), age_mark);
|
||||
allocated += age_mark_page->area_end() - age_mark;
|
||||
current_page = current_page->next_page();
|
||||
} else {
|
||||
DCHECK_GE(top(), age_mark);
|
||||
return top() - age_mark;
|
||||
}
|
||||
while (current_page != last_page) {
|
||||
DCHECK_NE(current_page, age_mark_page);
|
||||
allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
|
||||
current_page = current_page->next_page();
|
||||
}
|
||||
DCHECK_GE(top(), current_page->area_start());
|
||||
allocated += top() - current_page->area_start();
|
||||
DCHECK_LE(allocated, Size());
|
||||
return allocated;
|
||||
}
|
||||
size_t AllocatedSinceLastGC() const final;
|
||||
|
||||
void MovePageFromSpaceToSpace(Page* page) {
|
||||
DCHECK(page->IsFromPage());
|
||||
@ -421,15 +435,11 @@ class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
|
||||
bool Rebalance();
|
||||
|
||||
// Return the maximum capacity of a semispace.
|
||||
size_t MaximumCapacity() const {
|
||||
size_t MaximumCapacity() const final {
|
||||
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
|
||||
return to_space_.maximum_capacity();
|
||||
}
|
||||
|
||||
bool IsAtMaximumCapacity() const {
|
||||
return TotalCapacity() == MaximumCapacity();
|
||||
}
|
||||
|
||||
// Returns the initial capacity of a semispace.
|
||||
size_t InitialTotalCapacity() const {
|
||||
DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
|
||||
@ -442,7 +452,9 @@ class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
|
||||
|
||||
// Return the address of the first allocatable address in the active
|
||||
// semispace. This may be the address where the first object resides.
|
||||
Address first_allocatable_address() const { return to_space_.space_start(); }
|
||||
Address first_allocatable_address() const final {
|
||||
return to_space_.space_start();
|
||||
}
|
||||
|
||||
// Get the age mark of the inactive semispace.
|
||||
Address age_mark() const { return from_space_.age_mark(); }
|
||||
@ -450,28 +462,27 @@ class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
|
||||
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
|
||||
|
||||
// Reset the allocation pointer to the beginning of the active semispace.
|
||||
void ResetLinearAllocationArea();
|
||||
void ResetLinearAllocationArea() final;
|
||||
|
||||
// When inline allocation stepping is active, either because of incremental
|
||||
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
|
||||
// inline allocation every once in a while. This is done by setting
|
||||
// allocation_info_.limit to be lower than the actual limit and and increasing
|
||||
// it in steps to guarantee that the observers are notified periodically.
|
||||
void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
|
||||
void UpdateInlineAllocationLimit(size_t size_in_bytes) final;
|
||||
|
||||
// Try to switch the active semispace to a new, empty, page.
|
||||
// Returns false if this isn't possible or reasonable (i.e., there
|
||||
// are no pages, or the current page is already empty), or true
|
||||
// if successful.
|
||||
bool AddFreshPage();
|
||||
bool AddFreshPageSynchronized();
|
||||
bool AddFreshPage() final;
|
||||
|
||||
bool AddParkedAllocationBuffer(int size_in_bytes,
|
||||
AllocationAlignment alignment);
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
// Verify the active semispace.
|
||||
virtual void Verify(Isolate* isolate) const;
|
||||
void Verify(Isolate* isolate) const final;
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -481,35 +492,42 @@ class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
|
||||
|
||||
// Return whether the operation succeeded.
|
||||
bool CommitFromSpaceIfNeeded() {
|
||||
if (from_space_.IsCommitted()) return true;
|
||||
return from_space_.Commit();
|
||||
if (from_space_.IsCommitted() || from_space_.Commit()) return true;
|
||||
|
||||
// Committing memory to from space failed.
|
||||
// Memory is exhausted and we will die.
|
||||
heap_->FatalProcessOutOfMemory("Committing semi space failed.");
|
||||
}
|
||||
|
||||
bool UncommitFromSpace() {
|
||||
if (!from_space_.IsCommitted()) return true;
|
||||
return from_space_.Uncommit();
|
||||
void UncommitFromSpace() {
|
||||
if (!from_space_.IsCommitted()) return;
|
||||
from_space_.Uncommit();
|
||||
}
|
||||
|
||||
bool IsFromSpaceCommitted() const { return from_space_.IsCommitted(); }
|
||||
|
||||
SemiSpace* active_space() { return &to_space_; }
|
||||
|
||||
Page* first_page() override { return to_space_.first_page(); }
|
||||
Page* last_page() override { return to_space_.last_page(); }
|
||||
Page* first_page() final { return to_space_.first_page(); }
|
||||
Page* last_page() final { return to_space_.last_page(); }
|
||||
|
||||
const Page* first_page() const override { return to_space_.first_page(); }
|
||||
const Page* last_page() const override { return to_space_.last_page(); }
|
||||
const Page* first_page() const final { return to_space_.first_page(); }
|
||||
const Page* last_page() const final { return to_space_.last_page(); }
|
||||
|
||||
iterator begin() { return to_space_.begin(); }
|
||||
iterator end() { return to_space_.end(); }
|
||||
iterator begin() final { return to_space_.begin(); }
|
||||
iterator end() final { return to_space_.end(); }
|
||||
|
||||
const_iterator begin() const { return to_space_.begin(); }
|
||||
const_iterator end() const { return to_space_.end(); }
|
||||
const_iterator begin() const final { return to_space_.begin(); }
|
||||
const_iterator end() const final { return to_space_.end(); }
|
||||
|
||||
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
|
||||
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) final;
|
||||
|
||||
SemiSpace& from_space() { return from_space_; }
|
||||
const SemiSpace& from_space() const { return from_space_; }
|
||||
SemiSpace& to_space() { return to_space_; }
|
||||
const SemiSpace& to_space() const { return to_space_; }
|
||||
|
||||
bool ShouldBePromoted(Address address) const final;
|
||||
|
||||
private:
|
||||
// Update linear allocation area to match the current to-space page.
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "src/heap/evacuation-allocator-inl.h"
|
||||
#include "src/heap/incremental-marking-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/heap/scavenger.h"
|
||||
#include "src/objects/map.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
@ -245,7 +246,8 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(
|
||||
SLOW_DCHECK(static_cast<size_t>(object_size) <=
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage());
|
||||
|
||||
if (!heap()->ShouldBePromoted(object.address())) {
|
||||
if (!SemiSpaceNewSpace::From(heap()->new_space())
|
||||
->ShouldBePromoted(object.address())) {
|
||||
// A semi-space copy may fail due to fragmentation. In that case, we
|
||||
// try to promote the object.
|
||||
result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
|
||||
|
@ -404,11 +404,14 @@ void ScavengerCollector::CollectGarbage() {
|
||||
}
|
||||
}
|
||||
|
||||
SemiSpaceNewSpace* semi_space_new_space =
|
||||
SemiSpaceNewSpace::From(heap_->new_space());
|
||||
|
||||
if (FLAG_concurrent_marking) {
|
||||
// Ensure that concurrent marker does not track pages that are
|
||||
// going to be unmapped.
|
||||
for (Page* p :
|
||||
PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
|
||||
PageRange(semi_space_new_space->from_space().first_page(), nullptr)) {
|
||||
heap_->concurrent_marking()->ClearMemoryChunkData(p);
|
||||
}
|
||||
}
|
||||
@ -416,7 +419,7 @@ void ScavengerCollector::CollectGarbage() {
|
||||
ProcessWeakReferences(&ephemeron_table_list);
|
||||
|
||||
// Set age mark.
|
||||
heap_->new_space_->set_age_mark(heap_->new_space()->top());
|
||||
semi_space_new_space->set_age_mark(semi_space_new_space->top());
|
||||
|
||||
// Since we promote all surviving large objects immediatelly, all remaining
|
||||
// large objects must be dead.
|
||||
@ -532,7 +535,10 @@ void ScavengerCollector::MergeSurvivingNewLargeObjects(
|
||||
int ScavengerCollector::NumberOfScavengeTasks() {
|
||||
if (!FLAG_parallel_scavenge) return 1;
|
||||
const int num_scavenge_tasks =
|
||||
static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
|
||||
static_cast<int>(
|
||||
SemiSpaceNewSpace::From(heap_->new_space())->TotalCapacity()) /
|
||||
MB +
|
||||
1;
|
||||
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
|
||||
int tasks = std::max(
|
||||
1, std::min({num_scavenge_tasks, kMaxScavengerTasks, num_cores}));
|
||||
|
@ -910,6 +910,16 @@ int FixedArrayLenFromSize(int size) {
|
||||
FixedArray::kMaxRegularLength});
|
||||
}
|
||||
|
||||
int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) {
|
||||
Address top = space->top();
|
||||
if ((top & kPageAlignmentMask) == 0) {
|
||||
// `top` points to the start of a page signifies that there is not room in
|
||||
// the current page.
|
||||
return 0;
|
||||
}
|
||||
return static_cast<int>(Page::FromAddress(space->top())->area_end() - top);
|
||||
}
|
||||
|
||||
void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
|
||||
DCHECK(!FLAG_single_generation);
|
||||
PauseAllocationObserversScope pause_observers(heap);
|
||||
@ -919,8 +929,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
|
||||
// the current allocation pointer.
|
||||
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
|
||||
space->limit() == space->top());
|
||||
int space_remaining =
|
||||
static_cast<int>(space->to_space().page_high() - space->top());
|
||||
int space_remaining = GetSpaceRemainingOnCurrentPage(space);
|
||||
while (space_remaining > 0) {
|
||||
int length = FixedArrayLenFromSize(space_remaining);
|
||||
if (length > 0) {
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "test/cctest/heap/heap-utils.h"
|
||||
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/factory.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
@ -134,6 +135,18 @@ bool FillCurrentPage(v8::internal::NewSpace* space,
|
||||
return heap::FillCurrentPageButNBytes(space, 0, out_handles);
|
||||
}
|
||||
|
||||
namespace {
|
||||
int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) {
|
||||
Address top = space->top();
|
||||
if ((top & kPageAlignmentMask) == 0) {
|
||||
// `top` points to the start of a page signifies that there is not room in
|
||||
// the current page.
|
||||
return 0;
|
||||
}
|
||||
return static_cast<int>(Page::FromAddress(space->top())->area_end() - top);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
|
||||
std::vector<Handle<FixedArray>>* out_handles) {
|
||||
PauseAllocationObserversScope pause_observers(space->heap());
|
||||
@ -142,8 +155,7 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
|
||||
// the current allocation pointer.
|
||||
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
|
||||
space->limit() == space->top());
|
||||
int space_remaining =
|
||||
static_cast<int>(space->to_space().page_high() - space->top());
|
||||
int space_remaining = GetSpaceRemainingOnCurrentPage(space);
|
||||
CHECK(space_remaining >= extra_bytes);
|
||||
int new_linear_size = space_remaining - extra_bytes;
|
||||
if (new_linear_size == 0) return false;
|
||||
|
@ -78,7 +78,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
|
||||
CHECK_GT(handles.size(), 0u);
|
||||
Page* const to_be_promoted_page = FindLastPageInNewSpace(handles);
|
||||
CHECK_NOT_NULL(to_be_promoted_page);
|
||||
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark()));
|
||||
CHECK(!to_be_promoted_page->Contains(
|
||||
SemiSpaceNewSpace::From(heap->new_space())->age_mark()));
|
||||
// To perform a sanity check on live bytes we need to mark the heap.
|
||||
heap::SimulateIncrementalMarking(heap, true);
|
||||
// Sanity check that the page meets the requirements for promotion.
|
||||
|
@ -289,7 +289,7 @@ TEST(ComputeDiscardMemoryAreas) {
|
||||
CHECK_EQ(memory_area.size(), page_size * 2);
|
||||
}
|
||||
|
||||
TEST(NewSpace) {
|
||||
TEST(SemiSpaceNewSpace) {
|
||||
if (FLAG_single_generation) return;
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
@ -297,10 +297,11 @@ TEST(NewSpace) {
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
LinearAllocationArea allocation_info;
|
||||
|
||||
std::unique_ptr<NewSpace> new_space = std::make_unique<NewSpace>(
|
||||
heap, memory_allocator->data_page_allocator(),
|
||||
CcTest::heap()->InitialSemiSpaceSize(),
|
||||
CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
|
||||
std::unique_ptr<SemiSpaceNewSpace> new_space =
|
||||
std::make_unique<SemiSpaceNewSpace>(
|
||||
heap, memory_allocator->data_page_allocator(),
|
||||
CcTest::heap()->InitialSemiSpaceSize(),
|
||||
CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
|
||||
CHECK(new_space->MaximumCapacity());
|
||||
|
||||
while (new_space->Available() >= kMaxRegularHeapObjectSize) {
|
||||
@ -313,7 +314,6 @@ TEST(NewSpace) {
|
||||
memory_allocator->unmapper()->EnsureUnmappingCompleted();
|
||||
}
|
||||
|
||||
|
||||
TEST(OldSpace) {
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
|
Loading…
Reference in New Issue
Block a user