[heap] Non-contiguous young generation
This change removes the large contiguous backing store from the young generation and replaces it regular pages. We keep a pool of pages that are committed/uncommitted to avoid creating virtual memory maps during growing and shrinking. BUG=chromium:581412 LOG=N Review URL: https://codereview.chromium.org/1853783002 Cr-Commit-Position: refs/heads/master@{#35261}
This commit is contained in:
parent
cf951dfb37
commit
3f92137209
@ -291,6 +291,10 @@ class VirtualMemory {
|
||||
// by address().
|
||||
VirtualMemory(size_t size, size_t alignment);
|
||||
|
||||
// Construct a virtual memory by assigning it some already mapped address
|
||||
// and size.
|
||||
VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
|
||||
|
||||
// Releases the reserved memory, if any, controlled by this VirtualMemory
|
||||
// object.
|
||||
~VirtualMemory();
|
||||
|
@ -75,7 +75,6 @@ Heap::Heap()
|
||||
code_range_size_(0),
|
||||
// semispace_size_ should be a power of 2 and old_generation_size_ should
|
||||
// be a multiple of Page::kPageSize.
|
||||
reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
|
||||
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
|
||||
initial_semispace_size_(Page::kPageSize),
|
||||
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
|
||||
@ -4915,32 +4914,10 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
|
||||
max_semi_space_size_ = Page::kPageSize;
|
||||
}
|
||||
|
||||
if (isolate()->snapshot_available()) {
|
||||
// If we are using a snapshot we always reserve the default amount
|
||||
// of memory for each semispace because code in the snapshot has
|
||||
// write-barrier code that relies on the size and alignment of new
|
||||
// space. We therefore cannot use a larger max semispace size
|
||||
// than the default reserved semispace size.
|
||||
if (max_semi_space_size_ > reserved_semispace_size_) {
|
||||
max_semi_space_size_ = reserved_semispace_size_;
|
||||
if (FLAG_trace_gc) {
|
||||
PrintIsolate(isolate_,
|
||||
"Max semi-space size cannot be more than %d kbytes\n",
|
||||
reserved_semispace_size_ >> 10);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If we are not using snapshots we reserve space for the actual
|
||||
// max semispace size.
|
||||
reserved_semispace_size_ = max_semi_space_size_;
|
||||
}
|
||||
|
||||
// The new space size must be a power of two to support single-bit testing
|
||||
// for containment.
|
||||
max_semi_space_size_ =
|
||||
base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
|
||||
reserved_semispace_size_ =
|
||||
base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
|
||||
|
||||
if (FLAG_min_semi_space_size > 0) {
|
||||
int initial_semispace_size = FLAG_min_semi_space_size * MB;
|
||||
@ -5284,7 +5261,7 @@ bool Heap::SetUp() {
|
||||
incremental_marking_ = new IncrementalMarking(this);
|
||||
|
||||
// Set up new space.
|
||||
if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
|
||||
if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) {
|
||||
return false;
|
||||
}
|
||||
new_space_top_after_last_gc_ = new_space()->top();
|
||||
|
@ -1174,16 +1174,11 @@ class Heap {
|
||||
// GC statistics. ============================================================
|
||||
// ===========================================================================
|
||||
|
||||
// Returns the maximum amount of memory reserved for the heap. For
|
||||
// the young generation, we reserve 4 times the amount needed for a
|
||||
// semi space. The young generation consists of two semi spaces and
|
||||
// we reserve twice the amount needed for those in order to ensure
|
||||
// that new space can be aligned to its size.
|
||||
// Returns the maximum amount of memory reserved for the heap.
|
||||
intptr_t MaxReserved() {
|
||||
return 4 * reserved_semispace_size_ + max_old_generation_size_;
|
||||
return 2 * max_semi_space_size_ + max_old_generation_size_;
|
||||
}
|
||||
int MaxSemiSpaceSize() { return max_semi_space_size_; }
|
||||
int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
|
||||
int InitialSemiSpaceSize() { return initial_semispace_size_; }
|
||||
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
|
||||
intptr_t MaxExecutableSize() { return max_executable_size_; }
|
||||
@ -1995,10 +1990,8 @@ class Heap {
|
||||
Object* roots_[kRootListLength];
|
||||
|
||||
size_t code_range_size_;
|
||||
int reserved_semispace_size_;
|
||||
int max_semi_space_size_;
|
||||
int initial_semispace_size_;
|
||||
int target_semispace_size_;
|
||||
intptr_t max_old_generation_size_;
|
||||
intptr_t initial_old_generation_size_;
|
||||
bool old_generation_size_configured_;
|
||||
|
@ -251,6 +251,19 @@ AllocationSpace AllocationResult::RetrySpace() {
|
||||
return static_cast<AllocationSpace>(Smi::cast(object_)->value());
|
||||
}
|
||||
|
||||
NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
|
||||
Executability executable,
|
||||
SemiSpace* owner) {
|
||||
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
|
||||
bool in_to_space = (owner->id() != kFromSpace);
|
||||
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
|
||||
: MemoryChunk::IN_FROM_SPACE);
|
||||
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
|
||||
: MemoryChunk::IN_TO_SPACE));
|
||||
NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
|
||||
heap->incremental_marking()->SetNewSpacePageFlags(page);
|
||||
return page;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// PagedSpace
|
||||
@ -261,6 +274,7 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
|
||||
page->mutex_ = new base::Mutex();
|
||||
DCHECK(page->area_size() <= kAllocatableMemory);
|
||||
DCHECK(chunk->owner() == owner);
|
||||
|
||||
owner->IncreaseCapacity(page->area_size());
|
||||
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
|
||||
|
||||
|
@ -315,15 +315,18 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
|
||||
|
||||
|
||||
void MemoryAllocator::TearDown() {
|
||||
for (MemoryChunk* chunk : chunk_pool_) {
|
||||
FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
|
||||
NOT_EXECUTABLE);
|
||||
}
|
||||
// Check that spaces were torn down before MemoryAllocator.
|
||||
DCHECK(size_.Value() == 0);
|
||||
DCHECK_EQ(size_.Value(), 0);
|
||||
// TODO(gc) this will be true again when we fix FreeMemory.
|
||||
// DCHECK(size_executable_ == 0);
|
||||
capacity_ = 0;
|
||||
capacity_executable_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool MemoryAllocator::CommitMemory(Address base, size_t size,
|
||||
Executability executable) {
|
||||
if (!base::VirtualMemory::CommitRegion(base, size,
|
||||
@ -335,20 +338,6 @@ bool MemoryAllocator::CommitMemory(Address base, size_t size,
|
||||
}
|
||||
|
||||
|
||||
void MemoryAllocator::FreeNewSpaceMemory(Address addr,
|
||||
base::VirtualMemory* reservation,
|
||||
Executability executable) {
|
||||
LOG(isolate_, DeleteEvent("NewSpace", addr));
|
||||
|
||||
DCHECK(reservation->IsReserved());
|
||||
const intptr_t size = static_cast<intptr_t>(reservation->size());
|
||||
DCHECK(size_.Value() >= size);
|
||||
size_.Increment(-size);
|
||||
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
||||
FreeMemory(reservation, NOT_EXECUTABLE);
|
||||
}
|
||||
|
||||
|
||||
void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
|
||||
Executability executable) {
|
||||
// TODO(gc) make code_range part of memory allocator?
|
||||
@ -433,26 +422,6 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
|
||||
set_next_page(this);
|
||||
}
|
||||
|
||||
|
||||
NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
|
||||
SemiSpace* semi_space) {
|
||||
Address area_start = start + NewSpacePage::kObjectStartOffset;
|
||||
Address area_end = start + Page::kPageSize;
|
||||
|
||||
MemoryChunk* chunk =
|
||||
MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
|
||||
area_end, NOT_EXECUTABLE, semi_space, nullptr);
|
||||
bool in_to_space = (semi_space->id() != kFromSpace);
|
||||
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
|
||||
: MemoryChunk::IN_FROM_SPACE);
|
||||
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
|
||||
: MemoryChunk::IN_TO_SPACE));
|
||||
NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
|
||||
heap->incremental_marking()->SetNewSpacePageFlags(page);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
||||
void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
|
||||
set_owner(semi_space);
|
||||
set_next_chunk(this);
|
||||
@ -715,15 +684,6 @@ void Page::ResetFreeListStatistics() {
|
||||
available_in_free_list_ = 0;
|
||||
}
|
||||
|
||||
|
||||
Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
|
||||
Executability executable) {
|
||||
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
|
||||
if (chunk == NULL) return NULL;
|
||||
return Page::Initialize(isolate_->heap(), chunk, executable, owner);
|
||||
}
|
||||
|
||||
|
||||
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
|
||||
Space* owner,
|
||||
Executability executable) {
|
||||
@ -782,12 +742,75 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <MemoryAllocator::AllocationMode mode>
|
||||
void MemoryAllocator::Free(MemoryChunk* chunk) {
|
||||
PreFreeMemory(chunk);
|
||||
PerformFreeMemory(chunk);
|
||||
if (mode == kRegular) {
|
||||
PreFreeMemory(chunk);
|
||||
PerformFreeMemory(chunk);
|
||||
} else {
|
||||
DCHECK_EQ(mode, kPooled);
|
||||
FreePooled(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
|
||||
MemoryChunk* chunk);
|
||||
|
||||
template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
|
||||
MemoryChunk* chunk);
|
||||
|
||||
template <typename PageType, MemoryAllocator::AllocationMode mode,
|
||||
typename SpaceType>
|
||||
PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
|
||||
Executability executable) {
|
||||
MemoryChunk* chunk = nullptr;
|
||||
if (mode == kPooled) {
|
||||
DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
|
||||
DCHECK_EQ(executable, NOT_EXECUTABLE);
|
||||
chunk = AllocatePagePooled(owner);
|
||||
}
|
||||
if (chunk == nullptr) {
|
||||
chunk = AllocateChunk(size, size, executable, owner);
|
||||
}
|
||||
if (chunk == nullptr) return nullptr;
|
||||
return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
|
||||
}
|
||||
|
||||
template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
|
||||
PagedSpace>(intptr_t, PagedSpace*,
|
||||
Executability);
|
||||
|
||||
template NewSpacePage* MemoryAllocator::AllocatePage<
|
||||
NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
|
||||
Executability);
|
||||
|
||||
template <typename SpaceType>
|
||||
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
|
||||
if (chunk_pool_.is_empty()) return nullptr;
|
||||
const int size = MemoryChunk::kPageSize;
|
||||
MemoryChunk* chunk = chunk_pool_.RemoveLast();
|
||||
const Address start = reinterpret_cast<Address>(chunk);
|
||||
const Address area_start = start + MemoryChunk::kObjectStartOffset;
|
||||
const Address area_end = start + size;
|
||||
CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE);
|
||||
base::VirtualMemory reservation(start, size);
|
||||
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
|
||||
NOT_EXECUTABLE, owner, &reservation);
|
||||
size_.Increment(size);
|
||||
return chunk;
|
||||
}
|
||||
|
||||
void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
|
||||
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
|
||||
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
|
||||
chunk_pool_.Add(chunk);
|
||||
intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
|
||||
if (chunk->executable() == EXECUTABLE) {
|
||||
size_executable_.Increment(-chunk_size);
|
||||
}
|
||||
size_.Increment(-chunk_size);
|
||||
UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
|
||||
}
|
||||
|
||||
bool MemoryAllocator::CommitBlock(Address start, size_t size,
|
||||
Executability executable) {
|
||||
@ -1159,8 +1182,8 @@ bool PagedSpace::Expand() {
|
||||
|
||||
if (!CanExpand(size)) return false;
|
||||
|
||||
Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
|
||||
executable());
|
||||
Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
|
||||
size, this, executable());
|
||||
if (p == NULL) return false;
|
||||
|
||||
AccountCommitted(static_cast<intptr_t>(p->size()));
|
||||
@ -1290,53 +1313,28 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
|
||||
// -----------------------------------------------------------------------------
|
||||
// NewSpace implementation
|
||||
|
||||
|
||||
bool NewSpace::SetUp(int reserved_semispace_capacity,
|
||||
bool NewSpace::SetUp(int initial_semispace_capacity,
|
||||
int maximum_semispace_capacity) {
|
||||
// Set up new space based on the preallocated memory block defined by
|
||||
// start and size. The provided space is divided into two semi-spaces.
|
||||
// To support fast containment testing in the new space, the size of
|
||||
// this chunk must be a power of two and it must be aligned to its size.
|
||||
int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
|
||||
|
||||
size_t size = 2 * reserved_semispace_capacity;
|
||||
Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
|
||||
size, size, &reservation_);
|
||||
if (base == NULL) return false;
|
||||
|
||||
chunk_base_ = base;
|
||||
chunk_size_ = static_cast<uintptr_t>(size);
|
||||
LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
|
||||
|
||||
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
|
||||
DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
|
||||
|
||||
to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
|
||||
from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
|
||||
if (!to_space_.Commit()) {
|
||||
return false;
|
||||
}
|
||||
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
|
||||
ResetAllocationInfo();
|
||||
|
||||
// Allocate and set up the histogram arrays if necessary.
|
||||
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
|
||||
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
|
||||
|
||||
#define SET_NAME(name) \
|
||||
allocated_histogram_[name].set_name(#name); \
|
||||
promoted_histogram_[name].set_name(#name);
|
||||
INSTANCE_TYPE_LIST(SET_NAME)
|
||||
#undef SET_NAME
|
||||
|
||||
DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
|
||||
DCHECK(static_cast<intptr_t>(chunk_size_) >=
|
||||
2 * heap()->ReservedSemiSpaceSize());
|
||||
DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
|
||||
|
||||
to_space_.SetUp(chunk_base_, initial_semispace_capacity,
|
||||
maximum_semispace_capacity);
|
||||
from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
|
||||
initial_semispace_capacity, maximum_semispace_capacity);
|
||||
if (!to_space_.Commit()) {
|
||||
return false;
|
||||
}
|
||||
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
|
||||
|
||||
ResetAllocationInfo();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1355,12 +1353,6 @@ void NewSpace::TearDown() {
|
||||
|
||||
to_space_.TearDown();
|
||||
from_space_.TearDown();
|
||||
|
||||
heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
|
||||
chunk_base_, &reservation_, NOT_EXECUTABLE);
|
||||
|
||||
chunk_base_ = NULL;
|
||||
chunk_size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
@ -1677,43 +1669,41 @@ void NewSpace::Verify() {
|
||||
// -----------------------------------------------------------------------------
|
||||
// SemiSpace implementation
|
||||
|
||||
void SemiSpace::SetUp(Address start, int initial_capacity,
|
||||
int maximum_capacity) {
|
||||
void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
|
||||
DCHECK_GE(maximum_capacity, Page::kPageSize);
|
||||
minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
|
||||
current_capacity_ = minimum_capacity_;
|
||||
maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
|
||||
committed_ = false;
|
||||
start_ = start;
|
||||
age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
|
||||
}
|
||||
|
||||
|
||||
void SemiSpace::TearDown() {
|
||||
start_ = nullptr;
|
||||
current_capacity_ = 0;
|
||||
// Properly uncommit memory to keep the allocator counters in sync.
|
||||
if (is_committed()) Uncommit();
|
||||
current_capacity_ = maximum_capacity_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool SemiSpace::Commit() {
|
||||
DCHECK(!is_committed());
|
||||
if (!heap()->isolate()->memory_allocator()->CommitBlock(
|
||||
start_, current_capacity_, executable())) {
|
||||
return false;
|
||||
}
|
||||
AccountCommitted(current_capacity_);
|
||||
|
||||
NewSpacePage* current = anchor();
|
||||
const int num_pages = current_capacity_ / Page::kPageSize;
|
||||
for (int i = 0; i < num_pages; i++) {
|
||||
NewSpacePage* new_page =
|
||||
NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
|
||||
heap()
|
||||
->isolate()
|
||||
->memory_allocator()
|
||||
->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
|
||||
NewSpacePage::kAllocatableMemory, this, executable());
|
||||
new_page->InsertAfter(current);
|
||||
current = new_page;
|
||||
}
|
||||
Reset();
|
||||
|
||||
set_current_capacity(current_capacity_);
|
||||
AccountCommitted(current_capacity_);
|
||||
if (age_mark_ == nullptr) {
|
||||
age_mark_ = first_page()->area_start();
|
||||
}
|
||||
committed_ = true;
|
||||
return true;
|
||||
}
|
||||
@ -1721,16 +1711,14 @@ bool SemiSpace::Commit() {
|
||||
|
||||
bool SemiSpace::Uncommit() {
|
||||
DCHECK(is_committed());
|
||||
Address start = start_ + maximum_capacity_ - current_capacity_;
|
||||
if (!heap()->isolate()->memory_allocator()->UncommitBlock(
|
||||
start, current_capacity_)) {
|
||||
return false;
|
||||
NewSpacePageIterator it(this);
|
||||
while (it.has_next()) {
|
||||
heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
|
||||
it.next());
|
||||
}
|
||||
AccountUncommitted(current_capacity_);
|
||||
|
||||
anchor()->set_next_page(anchor());
|
||||
anchor()->set_prev_page(anchor());
|
||||
|
||||
AccountUncommitted(current_capacity_);
|
||||
committed_ = false;
|
||||
return true;
|
||||
}
|
||||
@ -1751,62 +1739,57 @@ bool SemiSpace::GrowTo(int new_capacity) {
|
||||
if (!is_committed()) {
|
||||
if (!Commit()) return false;
|
||||
}
|
||||
DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
|
||||
DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
|
||||
DCHECK_LE(new_capacity, maximum_capacity_);
|
||||
DCHECK_GT(new_capacity, current_capacity_);
|
||||
int pages_before = current_capacity_ / Page::kPageSize;
|
||||
int pages_after = new_capacity / Page::kPageSize;
|
||||
|
||||
size_t delta = new_capacity - current_capacity_;
|
||||
|
||||
const int delta = new_capacity - current_capacity_;
|
||||
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
|
||||
if (!heap()->isolate()->memory_allocator()->CommitBlock(
|
||||
start_ + current_capacity_, delta, executable())) {
|
||||
return false;
|
||||
}
|
||||
AccountCommitted(static_cast<intptr_t>(delta));
|
||||
set_current_capacity(new_capacity);
|
||||
int delta_pages = delta / NewSpacePage::kPageSize;
|
||||
NewSpacePage* last_page = anchor()->prev_page();
|
||||
DCHECK_NE(last_page, anchor());
|
||||
for (int i = pages_before; i < pages_after; i++) {
|
||||
Address page_address = start_ + i * Page::kPageSize;
|
||||
while (delta_pages > 0) {
|
||||
NewSpacePage* new_page =
|
||||
NewSpacePage::Initialize(heap(), page_address, this);
|
||||
heap()
|
||||
->isolate()
|
||||
->memory_allocator()
|
||||
->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
|
||||
NewSpacePage::kAllocatableMemory, this, executable());
|
||||
new_page->InsertAfter(last_page);
|
||||
Bitmap::Clear(new_page);
|
||||
// Duplicate the flags that was set on the old page.
|
||||
new_page->SetFlags(last_page->GetFlags(),
|
||||
NewSpacePage::kCopyOnFlipFlagsMask);
|
||||
last_page = new_page;
|
||||
delta_pages--;
|
||||
}
|
||||
AccountCommitted(static_cast<intptr_t>(delta));
|
||||
current_capacity_ = new_capacity;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool SemiSpace::ShrinkTo(int new_capacity) {
|
||||
DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
|
||||
DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
|
||||
DCHECK_GE(new_capacity, minimum_capacity_);
|
||||
DCHECK_LT(new_capacity, current_capacity_);
|
||||
if (is_committed()) {
|
||||
size_t delta = current_capacity_ - new_capacity;
|
||||
const int delta = current_capacity_ - new_capacity;
|
||||
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
|
||||
|
||||
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
|
||||
if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
|
||||
return false;
|
||||
int delta_pages = delta / NewSpacePage::kPageSize;
|
||||
NewSpacePage* new_last_page;
|
||||
NewSpacePage* last_page;
|
||||
while (delta_pages > 0) {
|
||||
last_page = anchor()->prev_page();
|
||||
new_last_page = last_page->prev_page();
|
||||
new_last_page->set_next_page(anchor());
|
||||
anchor()->set_prev_page(new_last_page);
|
||||
heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
|
||||
last_page);
|
||||
delta_pages--;
|
||||
}
|
||||
AccountUncommitted(static_cast<intptr_t>(delta));
|
||||
|
||||
int pages_after = new_capacity / Page::kPageSize;
|
||||
NewSpacePage* new_last_page =
|
||||
NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
|
||||
new_last_page->set_next_page(anchor());
|
||||
anchor()->set_prev_page(new_last_page);
|
||||
DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
|
||||
}
|
||||
|
||||
set_current_capacity(new_capacity);
|
||||
|
||||
current_capacity_ = new_capacity;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1853,7 +1836,6 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
|
||||
std::swap(from->current_capacity_, to->current_capacity_);
|
||||
std::swap(from->maximum_capacity_, to->maximum_capacity_);
|
||||
std::swap(from->minimum_capacity_, to->minimum_capacity_);
|
||||
std::swap(from->start_, to->start_);
|
||||
std::swap(from->age_mark_, to->age_mark_);
|
||||
std::swap(from->committed_, to->committed_);
|
||||
std::swap(from->anchor_, to->anchor_);
|
||||
|
@ -27,6 +27,7 @@ class FreeList;
|
||||
class Isolate;
|
||||
class MemoryAllocator;
|
||||
class MemoryChunk;
|
||||
class NewSpacePage;
|
||||
class Page;
|
||||
class PagedSpace;
|
||||
class SemiSpace;
|
||||
@ -1246,6 +1247,11 @@ class SkipList {
|
||||
//
|
||||
class MemoryAllocator {
|
||||
public:
|
||||
enum AllocationMode {
|
||||
kRegular,
|
||||
kPooled,
|
||||
};
|
||||
|
||||
explicit MemoryAllocator(Isolate* isolate);
|
||||
|
||||
// Initializes its internal bookkeeping structures.
|
||||
@ -1254,8 +1260,13 @@ class MemoryAllocator {
|
||||
|
||||
void TearDown();
|
||||
|
||||
Page* AllocatePage(intptr_t size, PagedSpace* owner,
|
||||
Executability executable);
|
||||
// Allocates either Page or NewSpacePage from the allocator. AllocationMode
|
||||
// is used to indicate whether pooled allocation, which only works for
|
||||
// MemoryChunk::kPageSize, should be tried first.
|
||||
template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
|
||||
typename SpaceType>
|
||||
PageType* AllocatePage(intptr_t size, SpaceType* owner,
|
||||
Executability executable);
|
||||
|
||||
LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
|
||||
Executability executable);
|
||||
@ -1267,8 +1278,9 @@ class MemoryAllocator {
|
||||
// FreeMemory can be called concurrently when PreFree was executed before.
|
||||
void PerformFreeMemory(MemoryChunk* chunk);
|
||||
|
||||
// Free is a wrapper method, which calls PreFree and PerformFreeMemory
|
||||
// together.
|
||||
// Free is a wrapper method. For kRegular AllocationMode it calls PreFree and
|
||||
// PerformFreeMemory together. For kPooled it will dispatch to pooled free.
|
||||
template <MemoryAllocator::AllocationMode mode = kRegular>
|
||||
void Free(MemoryChunk* chunk);
|
||||
|
||||
// Returns allocated spaces in bytes.
|
||||
@ -1322,8 +1334,6 @@ class MemoryAllocator {
|
||||
|
||||
bool CommitMemory(Address addr, size_t size, Executability executable);
|
||||
|
||||
void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
|
||||
Executability executable);
|
||||
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
|
||||
void FreeMemory(Address addr, size_t size, Executability executable);
|
||||
|
||||
@ -1376,6 +1386,14 @@ class MemoryAllocator {
|
||||
size_t reserved_size);
|
||||
|
||||
private:
|
||||
// See AllocatePage for public interface. Note that currently we only support
|
||||
// pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
|
||||
template <typename SpaceType>
|
||||
MemoryChunk* AllocatePagePooled(SpaceType* owner);
|
||||
|
||||
// Free that chunk into the pool.
|
||||
void FreePooled(MemoryChunk* chunk);
|
||||
|
||||
Isolate* isolate_;
|
||||
|
||||
// Maximum space size in bytes.
|
||||
@ -1429,6 +1447,8 @@ class MemoryAllocator {
|
||||
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
|
||||
}
|
||||
|
||||
List<MemoryChunk*> chunk_pool_;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
|
||||
};
|
||||
|
||||
@ -2269,6 +2289,10 @@ enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
|
||||
|
||||
class NewSpacePage : public MemoryChunk {
|
||||
public:
|
||||
static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
|
||||
Executability executable,
|
||||
SemiSpace* owner);
|
||||
|
||||
static bool IsAtStart(Address addr) {
|
||||
return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
|
||||
kObjectStartOffset;
|
||||
@ -2326,9 +2350,6 @@ class NewSpacePage : public MemoryChunk {
|
||||
// for the doubly-linked list of real pages.
|
||||
explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
|
||||
|
||||
static NewSpacePage* Initialize(Heap* heap, Address start,
|
||||
SemiSpace* semi_space);
|
||||
|
||||
// Intialize a fake NewSpacePage used as sentinel at the ends
|
||||
// of a doubly-linked list of real NewSpacePages.
|
||||
// Only uses the prev/next links, and sets flags to not be in new-space.
|
||||
@ -2354,7 +2375,6 @@ class SemiSpace : public Space {
|
||||
current_capacity_(0),
|
||||
maximum_capacity_(0),
|
||||
minimum_capacity_(0),
|
||||
start_(nullptr),
|
||||
age_mark_(nullptr),
|
||||
committed_(false),
|
||||
id_(semispace),
|
||||
@ -2365,39 +2385,38 @@ class SemiSpace : public Space {
|
||||
inline bool Contains(Object* o);
|
||||
inline bool ContainsSlow(Address a);
|
||||
|
||||
// Creates a space in the young generation. The constructor does not
|
||||
// allocate memory from the OS.
|
||||
void SetUp(Address start, int initial_capacity, int maximum_capacity);
|
||||
|
||||
// Tear down the space. Heap memory was not allocated by the space, so it
|
||||
// is not deallocated here.
|
||||
void SetUp(int initial_capacity, int maximum_capacity);
|
||||
void TearDown();
|
||||
bool HasBeenSetUp() { return maximum_capacity_ != 0; }
|
||||
|
||||
// True if the space has been set up but not torn down.
|
||||
bool HasBeenSetUp() { return start_ != nullptr; }
|
||||
bool Commit();
|
||||
bool Uncommit();
|
||||
bool is_committed() { return committed_; }
|
||||
|
||||
// Grow the semispace to the new capacity. The new capacity
|
||||
// requested must be larger than the current capacity and less than
|
||||
// the maximum capacity.
|
||||
// Grow the semispace to the new capacity. The new capacity requested must
|
||||
// be larger than the current capacity and less than the maximum capacity.
|
||||
bool GrowTo(int new_capacity);
|
||||
|
||||
// Shrinks the semispace to the new capacity. The new capacity
|
||||
// requested must be more than the amount of used memory in the
|
||||
// semispace and less than the current capacity.
|
||||
// Shrinks the semispace to the new capacity. The new capacity requested
|
||||
// must be more than the amount of used memory in the semispace and less
|
||||
// than the current capacity.
|
||||
bool ShrinkTo(int new_capacity);
|
||||
|
||||
// Returns the start address of the first page of the space.
|
||||
Address space_start() {
|
||||
DCHECK_NE(anchor_.next_page(), &anchor_);
|
||||
DCHECK_NE(anchor_.next_page(), anchor());
|
||||
return anchor_.next_page()->area_start();
|
||||
}
|
||||
|
||||
// Returns the start address of the current page of the space.
|
||||
Address page_low() { return current_page_->area_start(); }
|
||||
NewSpacePage* first_page() { return anchor_.next_page(); }
|
||||
NewSpacePage* current_page() { return current_page_; }
|
||||
|
||||
// Returns one past the end address of the space.
|
||||
Address space_end() { return anchor_.prev_page()->area_end(); }
|
||||
|
||||
// Returns the start address of the current page of the space.
|
||||
Address page_low() { return current_page_->area_start(); }
|
||||
|
||||
// Returns one past the end address of the current page of the space.
|
||||
Address page_high() { return current_page_->area_end(); }
|
||||
|
||||
@ -2415,17 +2434,10 @@ class SemiSpace : public Space {
|
||||
Address age_mark() { return age_mark_; }
|
||||
void set_age_mark(Address mark);
|
||||
|
||||
bool is_committed() { return committed_; }
|
||||
bool Commit();
|
||||
bool Uncommit();
|
||||
|
||||
NewSpacePage* first_page() { return anchor_.next_page(); }
|
||||
NewSpacePage* current_page() { return current_page_; }
|
||||
|
||||
// Returns the current total capacity of the semispace.
|
||||
// Returns the current capacity of the semispace.
|
||||
int current_capacity() { return current_capacity_; }
|
||||
|
||||
// Returns the maximum total capacity of the semispace.
|
||||
// Returns the maximum capacity of the semispace.
|
||||
int maximum_capacity() { return maximum_capacity_; }
|
||||
|
||||
// Returns the initial capacity of the semispace.
|
||||
@ -2467,11 +2479,7 @@ class SemiSpace : public Space {
|
||||
#endif
|
||||
|
||||
private:
|
||||
NewSpacePage* anchor() { return &anchor_; }
|
||||
|
||||
void set_current_capacity(int new_capacity) {
|
||||
current_capacity_ = new_capacity;
|
||||
}
|
||||
inline NewSpacePage* anchor() { return &anchor_; }
|
||||
|
||||
// Copies the flags into the masked positions on all pages in the space.
|
||||
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
|
||||
@ -2482,11 +2490,9 @@ class SemiSpace : public Space {
|
||||
// The maximum capacity that can be used by this space.
|
||||
int maximum_capacity_;
|
||||
|
||||
// The mimnimum capacity for the space. A space cannot shrink below this size.
|
||||
// The minimum capacity for the space. A space cannot shrink below this size.
|
||||
int minimum_capacity_;
|
||||
|
||||
// The start address of the space.
|
||||
Address start_;
|
||||
// Used to govern object promotion during mark-compact collection.
|
||||
Address age_mark_;
|
||||
|
||||
@ -2562,20 +2568,21 @@ class NewSpacePageIterator BASE_EMBEDDED {
|
||||
|
||||
class NewSpace : public Space {
|
||||
public:
|
||||
// Constructor.
|
||||
explicit NewSpace(Heap* heap)
|
||||
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
|
||||
to_space_(heap, kToSpace),
|
||||
from_space_(heap, kFromSpace),
|
||||
reservation_(),
|
||||
top_on_previous_step_(0) {}
|
||||
pages_used_(0),
|
||||
top_on_previous_step_(0),
|
||||
allocated_histogram_(nullptr),
|
||||
promoted_histogram_(nullptr) {}
|
||||
|
||||
inline bool Contains(HeapObject* o);
|
||||
inline bool ContainsSlow(Address a);
|
||||
inline bool Contains(Object* o);
|
||||
|
||||
// Sets up the new space using the given chunk.
|
||||
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
|
||||
bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
|
||||
|
||||
// Tears down the space. Heap memory was not allocated by the space, so it
|
||||
// is not deallocated here.
|
||||
@ -2638,22 +2645,40 @@ class NewSpace : public Space {
|
||||
// Return the available bytes without growing.
|
||||
intptr_t Available() override { return Capacity() - Size(); }
|
||||
|
||||
intptr_t PagesFromStart(Address addr) {
|
||||
return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
|
||||
}
|
||||
|
||||
size_t AllocatedSinceLastGC() {
|
||||
intptr_t allocated = top() - to_space_.age_mark();
|
||||
if (allocated < 0) {
|
||||
// Runtime has lowered the top below the age mark.
|
||||
bool seen_age_mark = false;
|
||||
Address age_mark = to_space_.age_mark();
|
||||
NewSpacePage* current_page = to_space_.first_page();
|
||||
NewSpacePage* age_mark_page = NewSpacePage::FromAddress(age_mark);
|
||||
NewSpacePage* last_page = NewSpacePage::FromAddress(top() - kPointerSize);
|
||||
if (age_mark_page == last_page) {
|
||||
if (top() - age_mark >= 0) {
|
||||
return top() - age_mark;
|
||||
}
|
||||
// Top was reset at some point, invalidating this metric.
|
||||
return 0;
|
||||
}
|
||||
// Correctly account for non-allocatable regions at the beginning of
|
||||
// each page from the age_mark() to the top().
|
||||
intptr_t pages =
|
||||
PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
|
||||
allocated -= pages * (NewSpacePage::kObjectStartOffset);
|
||||
DCHECK(0 <= allocated && allocated <= Size());
|
||||
while (current_page != last_page) {
|
||||
if (current_page == age_mark_page) {
|
||||
seen_age_mark = true;
|
||||
break;
|
||||
}
|
||||
current_page = current_page->next_page();
|
||||
}
|
||||
if (!seen_age_mark) {
|
||||
// Top was reset at some point, invalidating this metric.
|
||||
return 0;
|
||||
}
|
||||
intptr_t allocated = age_mark_page->area_end() - age_mark;
|
||||
DCHECK_EQ(current_page, age_mark_page);
|
||||
current_page = age_mark_page->next_page();
|
||||
while (current_page != last_page) {
|
||||
allocated += NewSpacePage::kAllocatableMemory;
|
||||
current_page = current_page->next_page();
|
||||
}
|
||||
allocated += top() - current_page->area_start();
|
||||
DCHECK_LE(0, allocated);
|
||||
DCHECK_LE(allocated, Size());
|
||||
return static_cast<size_t>(allocated);
|
||||
}
|
||||
|
||||
@ -2805,9 +2830,6 @@ class NewSpace : public Space {
|
||||
|
||||
base::Mutex mutex_;
|
||||
|
||||
Address chunk_base_;
|
||||
uintptr_t chunk_size_;
|
||||
|
||||
// The semispaces.
|
||||
SemiSpace to_space_;
|
||||
SemiSpace from_space_;
|
||||
|
@ -2333,11 +2333,7 @@ TEST(GrowAndShrinkNewSpace) {
|
||||
Heap* heap = CcTest::heap();
|
||||
NewSpace* new_space = heap->new_space();
|
||||
|
||||
if (heap->ReservedSemiSpaceSize() == heap->InitialSemiSpaceSize() ||
|
||||
heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
|
||||
// The max size cannot exceed the reserved size, since semispaces must be
|
||||
// always within the reserved space. We can't test new space growing and
|
||||
// shrinking if the reserved size is the same as the minimum (initial) size.
|
||||
if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2382,11 +2378,7 @@ TEST(GrowAndShrinkNewSpace) {
|
||||
TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
|
||||
CcTest::InitializeVM();
|
||||
Heap* heap = CcTest::heap();
|
||||
if (heap->ReservedSemiSpaceSize() == heap->InitialSemiSpaceSize() ||
|
||||
heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
|
||||
// The max size cannot exceed the reserved size, since semispaces must be
|
||||
// always within the reserved space. We can't test new space growing and
|
||||
// shrinking if the reserved size is the same as the minimum (initial) size.
|
||||
if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -317,8 +317,9 @@ TEST(MemoryAllocator) {
|
||||
{
|
||||
int total_pages = 0;
|
||||
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
|
||||
Page* first_page = memory_allocator->AllocatePage(
|
||||
faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
|
||||
Page* first_page = memory_allocator->AllocatePage<Page>(
|
||||
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
|
||||
NOT_EXECUTABLE);
|
||||
|
||||
first_page->InsertAfter(faked_space.anchor()->prev_page());
|
||||
CHECK(first_page->is_valid());
|
||||
@ -330,8 +331,9 @@ TEST(MemoryAllocator) {
|
||||
}
|
||||
|
||||
// Again, we should get n or n - 1 pages.
|
||||
Page* other = memory_allocator->AllocatePage(faked_space.AreaSize(),
|
||||
&faked_space, NOT_EXECUTABLE);
|
||||
Page* other = memory_allocator->AllocatePage<Page>(
|
||||
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
|
||||
NOT_EXECUTABLE);
|
||||
CHECK(other->is_valid());
|
||||
total_pages++;
|
||||
other->InsertAfter(first_page);
|
||||
@ -362,8 +364,8 @@ TEST(NewSpace) {
|
||||
|
||||
NewSpace new_space(heap);
|
||||
|
||||
CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(),
|
||||
CcTest::heap()->ReservedSemiSpaceSize()));
|
||||
CHECK(new_space.SetUp(CcTest::heap()->InitialSemiSpaceSize(),
|
||||
CcTest::heap()->InitialSemiSpaceSize()));
|
||||
CHECK(new_space.HasBeenSetUp());
|
||||
|
||||
while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
|
||||
|
Loading…
Reference in New Issue
Block a user