[heap] Make ReadOnlySpace use bump pointer allocation
This changes ReadOnlySpace to no longer be a PagedSpace but instead it is now a BaseSpace. BasicSpace is a new base class that Space inherits from and which has no allocation methods and does not dictate how the pages should be held. ReadOnlySpace unlike Space holds its pages as a std::vector<ReadOnlyPage>, where ReadOnlyPage directly subclasses BasicMemoryChunk, meaning they do not have prev_ and next_ pointers and cannot be held in a heap::List. This is desirable since with pointer compression we would like to remap these pages to different memory addresses which would be impossible with a heap::List. Since ReadOnlySpace no longer uses most of the code from the other Spaces it makes sense to simplify its memory allocation to use a simple bump pointer and always allocate a new page whenever an allocation exceeds the remaining space on the final page. Change-Id: Iee6d9f96cfb174b4026ee671ee4f897909b38418 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2209060 Commit-Queue: Dan Elphick <delphick@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#68137}
This commit is contained in:
parent
86fee30e25
commit
81c34968a7
@ -8426,8 +8426,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
|
||||
i::ReadOnlySpace* ro_space = heap->read_only_space();
|
||||
heap_statistics->total_heap_size_ += ro_space->CommittedMemory();
|
||||
heap_statistics->total_physical_size_ += ro_space->CommittedPhysicalMemory();
|
||||
heap_statistics->total_available_size_ += ro_space->Available();
|
||||
heap_statistics->used_heap_size_ += ro_space->SizeOfObjects();
|
||||
heap_statistics->used_heap_size_ += ro_space->Size();
|
||||
#endif // V8_SHARED_RO_HEAP
|
||||
|
||||
heap_statistics->total_heap_size_executable_ =
|
||||
@ -8461,18 +8460,26 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
|
||||
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
i::Heap* heap = isolate->heap();
|
||||
i::Space* space = heap->space(static_cast<int>(index));
|
||||
|
||||
i::AllocationSpace allocation_space = static_cast<i::AllocationSpace>(index);
|
||||
space_statistics->space_name_ = i::Heap::GetSpaceName(allocation_space);
|
||||
|
||||
if (allocation_space == i::RO_SPACE && V8_SHARED_RO_HEAP_BOOL) {
|
||||
// RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared.
|
||||
space_statistics->space_size_ = 0;
|
||||
space_statistics->space_used_size_ = 0;
|
||||
space_statistics->space_available_size_ = 0;
|
||||
space_statistics->physical_space_size_ = 0;
|
||||
if (allocation_space == i::RO_SPACE) {
|
||||
if (V8_SHARED_RO_HEAP_BOOL) {
|
||||
// RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared.
|
||||
space_statistics->space_size_ = 0;
|
||||
space_statistics->space_used_size_ = 0;
|
||||
space_statistics->space_available_size_ = 0;
|
||||
space_statistics->physical_space_size_ = 0;
|
||||
} else {
|
||||
i::ReadOnlySpace* space = heap->read_only_space();
|
||||
space_statistics->space_size_ = space->CommittedMemory();
|
||||
space_statistics->space_used_size_ = space->Size();
|
||||
space_statistics->space_available_size_ = 0;
|
||||
space_statistics->physical_space_size_ = space->CommittedPhysicalMemory();
|
||||
}
|
||||
} else {
|
||||
i::Space* space = heap->space(static_cast<int>(index));
|
||||
space_statistics->space_size_ = space->CommittedMemory();
|
||||
space_statistics->space_used_size_ = space->SizeOfObjects();
|
||||
space_statistics->space_available_size_ = space->Available();
|
||||
|
@ -3260,15 +3260,15 @@ void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
|
||||
AddressToString(isolate_address));
|
||||
|
||||
const uintptr_t ro_space_firstpage_address =
|
||||
reinterpret_cast<uintptr_t>(heap()->read_only_space()->first_page());
|
||||
heap()->read_only_space()->FirstPageAddress();
|
||||
add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress,
|
||||
AddressToString(ro_space_firstpage_address));
|
||||
const uintptr_t map_space_firstpage_address =
|
||||
reinterpret_cast<uintptr_t>(heap()->map_space()->first_page());
|
||||
heap()->map_space()->FirstPageAddress();
|
||||
add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress,
|
||||
AddressToString(map_space_firstpage_address));
|
||||
const uintptr_t code_space_firstpage_address =
|
||||
reinterpret_cast<uintptr_t>(heap()->code_space()->first_page());
|
||||
heap()->code_space()->FirstPageAddress();
|
||||
add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress,
|
||||
AddressToString(code_space_firstpage_address));
|
||||
}
|
||||
|
@ -42,7 +42,8 @@ void BasicMemoryChunk::ReleaseMarkingBitmap() {
|
||||
// static
|
||||
BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
|
||||
size_t size, Address area_start,
|
||||
Address area_end, Space* owner,
|
||||
Address area_end,
|
||||
BaseSpace* owner,
|
||||
VirtualMemory reservation) {
|
||||
BasicMemoryChunk* chunk = FromAddress(base);
|
||||
DCHECK_EQ(base, chunk->address());
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
|
||||
|
||||
#include <type_traits>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "src/base/atomic-utils.h"
|
||||
#include "src/common/globals.h"
|
||||
@ -16,7 +17,7 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Space;
|
||||
class BaseSpace;
|
||||
|
||||
class BasicMemoryChunk {
|
||||
public:
|
||||
@ -150,9 +151,9 @@ class BasicMemoryChunk {
|
||||
}
|
||||
|
||||
// Gets the chunk's owner or null if the space has been detached.
|
||||
Space* owner() const { return owner_; }
|
||||
BaseSpace* owner() const { return owner_; }
|
||||
|
||||
void set_owner(Space* space) { owner_ = space; }
|
||||
void set_owner(BaseSpace* space) { owner_ = space; }
|
||||
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
void SetFlag(Flag flag) {
|
||||
@ -204,7 +205,8 @@ class BasicMemoryChunk {
|
||||
|
||||
static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
|
||||
Address area_start, Address area_end,
|
||||
Space* owner, VirtualMemory reservation);
|
||||
BaseSpace* owner,
|
||||
VirtualMemory reservation);
|
||||
|
||||
size_t wasted_memory() { return wasted_memory_; }
|
||||
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
|
||||
@ -304,7 +306,7 @@ class BasicMemoryChunk {
|
||||
std::atomic<intptr_t> high_water_mark_;
|
||||
|
||||
// The space owning this memory chunk.
|
||||
std::atomic<Space*> owner_;
|
||||
std::atomic<BaseSpace*> owner_;
|
||||
|
||||
// If the chunk needs to remember its memory reservation, it is stored here.
|
||||
VirtualMemory reservation_;
|
||||
|
@ -241,8 +241,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
|
||||
DCHECK(!large_object);
|
||||
DCHECK(CanAllocateInReadOnlySpace());
|
||||
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
|
||||
allocation =
|
||||
read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
|
||||
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -482,8 +482,7 @@ void Heap::PrintShortHeapStatistics() {
|
||||
"Read-only space, used: %6zu KB"
|
||||
", available: %6zu KB"
|
||||
", committed: %6zu KB\n",
|
||||
read_only_space_->Size() / KB,
|
||||
read_only_space_->Available() / KB,
|
||||
read_only_space_->Size() / KB, size_t{0},
|
||||
read_only_space_->CommittedMemory() / KB);
|
||||
PrintIsolate(isolate_,
|
||||
"New space, used: %6zu KB"
|
||||
@ -534,8 +533,8 @@ void Heap::PrintShortHeapStatistics() {
|
||||
"All spaces, used: %6zu KB"
|
||||
", available: %6zu KB"
|
||||
", committed: %6zu KB\n",
|
||||
(this->SizeOfObjects() + ro_space->SizeOfObjects()) / KB,
|
||||
(this->Available() + ro_space->Available()) / KB,
|
||||
(this->SizeOfObjects() + ro_space->Size()) / KB,
|
||||
(this->Available()) / KB,
|
||||
(this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
|
||||
PrintIsolate(isolate_,
|
||||
"Unmapper buffering %zu chunks of committed: %6zu KB\n",
|
||||
@ -1967,6 +1966,9 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
|
||||
#else
|
||||
if (space == NEW_SPACE) {
|
||||
allocation = new_space()->AllocateRawUnaligned(size);
|
||||
} else if (space == RO_SPACE) {
|
||||
allocation = read_only_space()->AllocateRaw(
|
||||
size, AllocationAlignment::kWordAligned);
|
||||
} else {
|
||||
// The deserializer will update the skip list.
|
||||
allocation = paged_space(space)->AllocateRawUnaligned(size);
|
||||
@ -3009,10 +3011,12 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
|
||||
|
||||
#ifdef DEBUG
|
||||
void VerifyNoNeedToClearSlots(Address start, Address end) {
|
||||
MemoryChunk* chunk = MemoryChunk::FromAddress(start);
|
||||
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
|
||||
if (basic_chunk->InReadOnlySpace()) return;
|
||||
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
|
||||
// TODO(ulan): Support verification of large pages.
|
||||
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
|
||||
Space* space = chunk->owner();
|
||||
BaseSpace* space = chunk->owner();
|
||||
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
|
||||
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
|
||||
}
|
||||
@ -4184,28 +4188,6 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
|
||||
}
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
|
||||
public:
|
||||
explicit VerifyReadOnlyPointersVisitor(Heap* heap)
|
||||
: VerifyPointersVisitor(heap) {}
|
||||
|
||||
protected:
|
||||
void VerifyPointers(HeapObject host, MaybeObjectSlot start,
|
||||
MaybeObjectSlot end) override {
|
||||
if (!host.is_null()) {
|
||||
CHECK(ReadOnlyHeap::Contains(host.map()));
|
||||
}
|
||||
VerifyPointersVisitor::VerifyPointers(host, start, end);
|
||||
|
||||
for (MaybeObjectSlot current = start; current < end; ++current) {
|
||||
HeapObject heap_object;
|
||||
if ((*current)->GetHeapObject(&heap_object)) {
|
||||
CHECK(ReadOnlyHeap::Contains(heap_object));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void Heap::Verify() {
|
||||
CHECK(HasBeenSetUp());
|
||||
SafepointScope safepoint_scope(this);
|
||||
@ -4250,8 +4232,7 @@ void Heap::Verify() {
|
||||
|
||||
void Heap::VerifyReadOnlyHeap() {
|
||||
CHECK(!read_only_space_->writable());
|
||||
VerifyReadOnlyPointersVisitor read_only_visitor(this);
|
||||
read_only_space_->Verify(isolate(), &read_only_visitor);
|
||||
read_only_space_->Verify(isolate());
|
||||
}
|
||||
|
||||
class SlotVerifyingVisitor : public ObjectVisitor {
|
||||
@ -5323,13 +5304,15 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
|
||||
DCHECK_NOT_NULL(ro_heap);
|
||||
DCHECK_IMPLIES(read_only_space_ != nullptr,
|
||||
read_only_space_ == ro_heap->read_only_space());
|
||||
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
|
||||
space_[RO_SPACE] = nullptr;
|
||||
read_only_space_ = ro_heap->read_only_space();
|
||||
}
|
||||
|
||||
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
|
||||
CHECK(V8_SHARED_RO_HEAP_BOOL);
|
||||
delete read_only_space_;
|
||||
space_[RO_SPACE] = read_only_space_ = space;
|
||||
|
||||
read_only_space_ = space;
|
||||
}
|
||||
|
||||
void Heap::SetUpSpaces() {
|
||||
@ -5618,7 +5601,7 @@ void Heap::TearDown() {
|
||||
tracer_.reset();
|
||||
|
||||
isolate()->read_only_heap()->OnHeapTearDown();
|
||||
space_[RO_SPACE] = read_only_space_ = nullptr;
|
||||
read_only_space_ = nullptr;
|
||||
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
|
||||
delete space_[i];
|
||||
space_[i] = nullptr;
|
||||
|
@ -293,7 +293,8 @@ class MinorNonAtomicMarkingState final
|
||||
class MajorMarkingState final
|
||||
: public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
|
||||
reinterpret_cast<intptr_t>(chunk),
|
||||
BasicMemoryChunk::kMarkBitmapOffset);
|
||||
@ -320,7 +321,8 @@ class MajorMarkingState final
|
||||
class MajorAtomicMarkingState final
|
||||
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
|
||||
reinterpret_cast<intptr_t>(chunk),
|
||||
BasicMemoryChunk::kMarkBitmapOffset);
|
||||
@ -337,7 +339,7 @@ class MajorNonAtomicMarkingState final
|
||||
AccessMode::NON_ATOMIC> {
|
||||
public:
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const MemoryChunk* chunk) const {
|
||||
const BasicMemoryChunk* chunk) const {
|
||||
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
|
||||
reinterpret_cast<intptr_t>(chunk),
|
||||
BasicMemoryChunk::kMarkBitmapOffset);
|
||||
|
@ -7,11 +7,13 @@
|
||||
#include <cinttypes>
|
||||
|
||||
#include "src/base/address-region.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/flags/flags.h"
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/read-only-spaces.h"
|
||||
#include "src/logging/log.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -372,10 +374,9 @@ Address MemoryAllocator::AllocateAlignedMemory(
|
||||
return base;
|
||||
}
|
||||
|
||||
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
size_t commit_area_size,
|
||||
Executability executable,
|
||||
Space* owner) {
|
||||
V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
|
||||
size_t reserve_area_size, size_t commit_area_size, Executability executable,
|
||||
BaseSpace* owner) {
|
||||
DCHECK_LE(commit_area_size, reserve_area_size);
|
||||
|
||||
size_t chunk_size;
|
||||
@ -483,19 +484,32 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
size_executable_ -= chunk_size;
|
||||
}
|
||||
CHECK(last_chunk_.IsReserved());
|
||||
return AllocateChunk(reserve_area_size, commit_area_size, executable,
|
||||
owner);
|
||||
return AllocateBasicChunk(reserve_area_size, commit_area_size, executable,
|
||||
owner);
|
||||
}
|
||||
|
||||
BasicMemoryChunk* chunk =
|
||||
BasicMemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
|
||||
owner, std::move(reservation));
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
size_t commit_area_size,
|
||||
Executability executable,
|
||||
BaseSpace* owner) {
|
||||
BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
|
||||
reserve_area_size, commit_area_size, executable, owner);
|
||||
MemoryChunk* chunk =
|
||||
MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
|
||||
executable, owner, std::move(reservation));
|
||||
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
|
||||
|
||||
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
|
||||
return chunk;
|
||||
}
|
||||
|
||||
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
|
||||
void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
|
||||
Address start_free,
|
||||
size_t bytes_to_free,
|
||||
Address new_area_end) {
|
||||
VirtualMemory* reservation = chunk->reserved_memory();
|
||||
@ -519,22 +533,42 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
|
||||
size_ -= released_bytes;
|
||||
}
|
||||
|
||||
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
|
||||
void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
|
||||
Executability executable) {
|
||||
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
|
||||
VirtualMemory* reservation = chunk->reserved_memory();
|
||||
const size_t size =
|
||||
reservation->IsReserved() ? reservation->size() : chunk->size();
|
||||
DCHECK_GE(size_, static_cast<size_t>(size));
|
||||
size_ -= size;
|
||||
if (chunk->executable() == EXECUTABLE) {
|
||||
if (executable == EXECUTABLE) {
|
||||
DCHECK_GE(size_executable_, size);
|
||||
size_executable_ -= size;
|
||||
UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk));
|
||||
}
|
||||
|
||||
if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
|
||||
chunk->SetFlag(MemoryChunk::UNREGISTERED);
|
||||
}
|
||||
|
||||
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
|
||||
UnregisterMemory(chunk, chunk->executable());
|
||||
}
|
||||
|
||||
void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
|
||||
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
|
||||
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
|
||||
UnregisterMemory(chunk);
|
||||
chunk->SetFlag(MemoryChunk::PRE_FREED);
|
||||
chunk->ReleaseMarkingBitmap();
|
||||
|
||||
VirtualMemory* reservation = chunk->reserved_memory();
|
||||
if (reservation->IsReserved()) {
|
||||
reservation->Free();
|
||||
} else {
|
||||
// Only read-only pages can have non-initialized reservation object.
|
||||
FreeMemory(page_allocator(NOT_EXECUTABLE), chunk->address(), chunk->size());
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
|
||||
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
|
||||
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
|
||||
@ -547,20 +581,15 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
|
||||
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
|
||||
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
|
||||
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
|
||||
DCHECK(!chunk->InReadOnlySpace());
|
||||
chunk->ReleaseAllAllocatedMemory();
|
||||
|
||||
VirtualMemory* reservation = chunk->reserved_memory();
|
||||
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
|
||||
UncommitMemory(reservation);
|
||||
} else {
|
||||
if (reservation->IsReserved()) {
|
||||
reservation->Free();
|
||||
} else {
|
||||
// Only read-only pages can have non-initialized reservation object.
|
||||
DCHECK_EQ(RO_SPACE, chunk->owner_identity());
|
||||
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
|
||||
chunk->size());
|
||||
}
|
||||
DCHECK(reservation->IsReserved());
|
||||
reservation->Free();
|
||||
}
|
||||
}
|
||||
|
||||
@ -630,6 +659,16 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
|
||||
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
|
||||
size_t size, SemiSpace* owner, Executability executable);
|
||||
|
||||
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
|
||||
ReadOnlySpace* owner) {
|
||||
BasicMemoryChunk* chunk = nullptr;
|
||||
if (chunk == nullptr) {
|
||||
chunk = AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
|
||||
}
|
||||
if (chunk == nullptr) return nullptr;
|
||||
return owner->InitializePage(chunk);
|
||||
}
|
||||
|
||||
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
|
||||
LargeObjectSpace* owner,
|
||||
Executability executable) {
|
||||
@ -655,8 +694,10 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(start, size, kZapValue);
|
||||
}
|
||||
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
|
||||
NOT_EXECUTABLE, owner, std::move(reservation));
|
||||
BasicMemoryChunk* basic_chunk =
|
||||
BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
|
||||
area_end, owner, std::move(reservation));
|
||||
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE);
|
||||
size_ += size;
|
||||
return chunk;
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ namespace internal {
|
||||
|
||||
class Heap;
|
||||
class Isolate;
|
||||
class ReadOnlyPage;
|
||||
|
||||
// The process-wide singleton that keeps track of code range regions with the
|
||||
// intention to reuse free code range regions as a workaround for CFG memory
|
||||
@ -192,9 +193,12 @@ class MemoryAllocator {
|
||||
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
|
||||
Executability executable);
|
||||
|
||||
ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
|
||||
|
||||
template <MemoryAllocator::FreeMode mode = kFull>
|
||||
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
void Free(MemoryChunk* chunk);
|
||||
void FreeReadOnlyPage(ReadOnlyPage* chunk);
|
||||
|
||||
// Returns allocated spaces in bytes.
|
||||
size_t Size() const { return size_; }
|
||||
@ -215,13 +219,20 @@ class MemoryAllocator {
|
||||
address >= highest_ever_allocated_;
|
||||
}
|
||||
|
||||
// Returns a BasicMemoryChunk in which the memory region from commit_area_size
|
||||
// to reserve_area_size of the chunk area is reserved but not committed, it
|
||||
// could be committed later by calling MemoryChunk::CommitArea.
|
||||
V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
|
||||
size_t reserve_area_size, size_t commit_area_size,
|
||||
Executability executable, BaseSpace* space);
|
||||
|
||||
// Returns a MemoryChunk in which the memory region from commit_area_size to
|
||||
// reserve_area_size of the chunk area is reserved but not committed, it
|
||||
// could be committed later by calling MemoryChunk::CommitArea.
|
||||
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
|
||||
size_t commit_area_size,
|
||||
Executability executable,
|
||||
Space* space);
|
||||
BaseSpace* space);
|
||||
|
||||
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
|
||||
size_t alignment, Executability executable,
|
||||
@ -233,7 +244,7 @@ class MemoryAllocator {
|
||||
// internally memory is freed from |start_free| to the end of the reservation.
|
||||
// Additional memory beyond the page is not accounted though, so
|
||||
// |bytes_to_free| is computed by the caller.
|
||||
void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
|
||||
void PartialFreeMemory(BasicMemoryChunk* chunk, Address start_free,
|
||||
size_t bytes_to_free, Address new_area_end);
|
||||
|
||||
// Checks if an allocated MemoryChunk was intended to be used for executable
|
||||
@ -290,21 +301,24 @@ class MemoryAllocator {
|
||||
// Performs all necessary bookkeeping to free the memory, but does not free
|
||||
// it.
|
||||
void UnregisterMemory(MemoryChunk* chunk);
|
||||
void UnregisterMemory(BasicMemoryChunk* chunk,
|
||||
Executability executable = NOT_EXECUTABLE);
|
||||
|
||||
private:
|
||||
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
|
||||
size_t requested);
|
||||
|
||||
// PreFreeMemory logically frees the object, i.e., it unregisters the memory,
|
||||
// logs a delete event and adds the chunk to remembered unmapped pages.
|
||||
// PreFreeMemory logically frees the object, i.e., it unregisters the
|
||||
// memory, logs a delete event and adds the chunk to remembered unmapped
|
||||
// pages.
|
||||
void PreFreeMemory(MemoryChunk* chunk);
|
||||
|
||||
// PerformFreeMemory can be called concurrently when PreFree was executed
|
||||
// before.
|
||||
void PerformFreeMemory(MemoryChunk* chunk);
|
||||
|
||||
// See AllocatePage for public interface. Note that currently we only support
|
||||
// pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
|
||||
// See AllocatePage for public interface. Note that currently we only
|
||||
// support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
|
||||
template <typename SpaceType>
|
||||
MemoryChunk* AllocatePagePooled(SpaceType* owner);
|
||||
|
||||
@ -350,15 +364,16 @@ class MemoryAllocator {
|
||||
VirtualMemory code_reservation_;
|
||||
|
||||
// Page allocator used for allocating data pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator (when pointer compression is enabled).
|
||||
// configuration it may be a page allocator instance provided by
|
||||
// v8::Platform or a BoundedPageAllocator (when pointer compression is
|
||||
// enabled).
|
||||
v8::PageAllocator* data_page_allocator_;
|
||||
|
||||
// Page allocator used for allocating code pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator (when pointer compression is enabled or
|
||||
// on those 64-bit architectures where pc-relative 32-bit displacement
|
||||
// can be used for call and jump instructions).
|
||||
// configuration it may be a page allocator instance provided by
|
||||
// v8::Platform or a BoundedPageAllocator (when pointer compression is
|
||||
// enabled or on those 64-bit architectures where pc-relative 32-bit
|
||||
// displacement can be used for call and jump instructions).
|
||||
v8::PageAllocator* code_page_allocator_;
|
||||
|
||||
// A part of the |code_reservation_| that may contain executable code
|
||||
@ -371,10 +386,11 @@ class MemoryAllocator {
|
||||
// optionally existing page in the beginning of the |code_range_|.
|
||||
// So, summarizing all above, the following conditions hold:
|
||||
// 1) |code_reservation_| >= |code_range_|
|
||||
// 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
|
||||
// 3) |code_reservation_| is AllocatePageSize()-aligned
|
||||
// 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
|
||||
// 5) |code_range_| is CommitPageSize()-aligned
|
||||
// 2) |code_range_| >= |optional RW pages| +
|
||||
// |code_page_allocator_instance_|. 3) |code_reservation_| is
|
||||
// AllocatePageSize()-aligned 4) |code_page_allocator_instance_| is
|
||||
// MemoryChunk::kAlignment-aligned 5) |code_range_| is
|
||||
// CommitPageSize()-aligned
|
||||
std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
|
||||
|
||||
// Maximum space size in bytes.
|
||||
|
@ -167,14 +167,9 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
|
||||
|
||||
} // namespace
|
||||
|
||||
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
Address area_start, Address area_end,
|
||||
Executability executable, Space* owner,
|
||||
VirtualMemory reservation) {
|
||||
MemoryChunk* chunk = FromAddress(base);
|
||||
DCHECK_EQ(base, chunk->address());
|
||||
BasicMemoryChunk::Initialize(heap, base, size, area_start, area_end, owner,
|
||||
std::move(reservation));
|
||||
MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
|
||||
Executability executable) {
|
||||
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
|
||||
|
||||
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
|
||||
@ -202,14 +197,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
|
||||
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
|
||||
0);
|
||||
if (owner->identity() == RO_SPACE) {
|
||||
heap->incremental_marking()
|
||||
->non_atomic_marking_state()
|
||||
->bitmap(chunk)
|
||||
->MarkAllBits();
|
||||
chunk->SetFlag(READ_ONLY_HEAP);
|
||||
}
|
||||
|
||||
if (executable == EXECUTABLE) {
|
||||
chunk->SetFlag(IS_EXECUTABLE);
|
||||
if (heap->write_protect_code_memory()) {
|
||||
@ -225,7 +212,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
}
|
||||
}
|
||||
|
||||
if (owner->identity() == CODE_SPACE) {
|
||||
if (chunk->owner()->identity() == CODE_SPACE) {
|
||||
chunk->code_object_registry_ = new CodeObjectRegistry();
|
||||
} else {
|
||||
chunk->code_object_registry_ = nullptr;
|
||||
|
@ -286,17 +286,16 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
bool InOldSpace() const;
|
||||
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
|
||||
|
||||
// Gets the chunk's owner or null if the space has been detached.
|
||||
Space* owner() const { return owner_; }
|
||||
|
||||
void set_owner(Space* space) { owner_ = space; }
|
||||
|
||||
bool IsWritable() const {
|
||||
// If this is a read-only space chunk but heap_ is non-null, it has not yet
|
||||
// been sealed and can be written to.
|
||||
return !InReadOnlySpace() || heap_ != nullptr;
|
||||
}
|
||||
|
||||
Space* owner() const {
|
||||
return reinterpret_cast<Space*>(BasicMemoryChunk::owner());
|
||||
}
|
||||
|
||||
// Gets the chunk's allocation space, potentially dealing with a null owner_
|
||||
// (like read-only chunks have).
|
||||
inline AllocationSpace owner_identity() const;
|
||||
@ -331,10 +330,8 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
void ReleaseAllocatedMemoryNeededForWritableChunk();
|
||||
|
||||
protected:
|
||||
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
|
||||
Address area_start, Address area_end,
|
||||
Executability executable, Space* owner,
|
||||
VirtualMemory reservation);
|
||||
static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
|
||||
Executability executable);
|
||||
|
||||
// Release all memory allocated by the chunk. Should be called when memory
|
||||
// chunk is about to be freed.
|
||||
|
@ -380,7 +380,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
|
||||
|
||||
size_t NewSpace::CommittedPhysicalMemory() {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
size_t size = to_space_.CommittedPhysicalMemory();
|
||||
if (from_space_.is_committed()) {
|
||||
size += from_space_.CommittedPhysicalMemory();
|
||||
@ -469,7 +469,7 @@ void NewSpace::UpdateLinearAllocationArea() {
|
||||
DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
|
||||
|
||||
Address new_top = to_space_.page_low();
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
allocation_info_.Reset(new_top, to_space_.page_high());
|
||||
// The order of the following two stores is important.
|
||||
// See the corresponding loads in ConcurrentMarking::Run.
|
||||
|
@ -33,9 +33,7 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
|
||||
DCHECK_LE(cur_addr_, cur_end_);
|
||||
if (!obj.IsFreeSpaceOrFiller()) {
|
||||
if (obj.IsCode()) {
|
||||
DCHECK_IMPLIES(
|
||||
space_->identity() != CODE_SPACE,
|
||||
space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
|
||||
DCHECK_EQ(space_->identity(), CODE_SPACE);
|
||||
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
|
||||
} else {
|
||||
DCHECK_OBJECT_SIZE(obj_size);
|
||||
@ -127,7 +125,6 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
|
||||
|
||||
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
|
||||
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
|
||||
return AllocationResult::Retry(identity());
|
||||
}
|
||||
@ -145,8 +142,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
|
||||
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
|
||||
AllocationAlignment alignment,
|
||||
AllocationOrigin origin) {
|
||||
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
|
||||
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
|
||||
DCHECK_EQ(identity(), OLD_SPACE);
|
||||
int allocation_size = size_in_bytes;
|
||||
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
|
||||
if (object.is_null()) {
|
||||
|
@ -49,8 +49,7 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
|
||||
heap->mark_compact_collector()->EnsureSweepingCompleted();
|
||||
#ifdef DEBUG
|
||||
AllocationSpace owner = page->owner_identity();
|
||||
DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
|
||||
owner == CODE_SPACE);
|
||||
DCHECK(owner == OLD_SPACE || owner == MAP_SPACE || owner == CODE_SPACE);
|
||||
#endif // DEBUG
|
||||
}
|
||||
|
||||
@ -114,12 +113,11 @@ void PagedSpace::RefillFreeList() {
|
||||
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
|
||||
// generation spaces out.
|
||||
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
|
||||
identity() != MAP_SPACE && identity() != RO_SPACE) {
|
||||
identity() != MAP_SPACE) {
|
||||
return;
|
||||
}
|
||||
DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
|
||||
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
|
||||
DCHECK(!IsDetached());
|
||||
MarkCompactCollector* collector = heap()->mark_compact_collector();
|
||||
size_t added = 0;
|
||||
|
||||
@ -237,7 +235,7 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
|
||||
|
||||
size_t PagedSpace::CommittedPhysicalMemory() {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
size_t size = 0;
|
||||
for (Page* page : *this) {
|
||||
size += page->CommittedPhysicalMemory();
|
||||
@ -323,7 +321,7 @@ void PagedSpace::ResetFreeList() {
|
||||
|
||||
void PagedSpace::ShrinkImmortalImmovablePages() {
|
||||
DCHECK(!heap()->deserialization_complete());
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
FreeLinearAllocationArea();
|
||||
ResetFreeList();
|
||||
for (Page* page : *this) {
|
||||
@ -692,15 +690,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
|
||||
}
|
||||
|
||||
for (Page* page : *this) {
|
||||
#ifdef V8_SHARED_RO_HEAP
|
||||
if (identity() == RO_SPACE) {
|
||||
CHECK_NULL(page->owner());
|
||||
} else {
|
||||
CHECK_EQ(page->owner(), this);
|
||||
}
|
||||
#else
|
||||
CHECK_EQ(page->owner(), this);
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < kNumTypes; i++) {
|
||||
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
|
||||
@ -781,7 +771,6 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
|
||||
}
|
||||
|
||||
void PagedSpace::VerifyLiveBytes() {
|
||||
DCHECK_NE(identity(), RO_SPACE);
|
||||
IncrementalMarking::MarkingState* marking_state =
|
||||
heap()->incremental_marking()->marking_state();
|
||||
for (Page* page : *this) {
|
||||
|
@ -317,7 +317,7 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
void SetTopAndLimit(Address top, Address limit) {
|
||||
DCHECK(top == limit ||
|
||||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
allocation_info_.Reset(top, limit);
|
||||
}
|
||||
void DecreaseLimit(Address new_limit);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "src/base/lazy-instance.h"
|
||||
#include "src/base/lsan.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/read-only-spaces.h"
|
||||
@ -137,7 +138,7 @@ ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(
|
||||
|
||||
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
|
||||
DCHECK(!init_complete_);
|
||||
read_only_space_->ShrinkImmortalImmovablePages();
|
||||
read_only_space_->ShrinkPages();
|
||||
#ifdef V8_SHARED_RO_HEAP
|
||||
std::shared_ptr<ReadOnlyArtifacts> artifacts(*read_only_artifacts_.Pointer());
|
||||
read_only_space()->DetachPagesAndAddToArtifacts(artifacts);
|
||||
@ -174,7 +175,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
|
||||
if (artifacts) {
|
||||
auto ro_space = artifacts->shared_read_only_space();
|
||||
statistics->read_only_space_size_ = ro_space->CommittedMemory();
|
||||
statistics->read_only_space_used_size_ = ro_space->SizeOfObjects();
|
||||
statistics->read_only_space_used_size_ = ro_space->Size();
|
||||
statistics->read_only_space_physical_size_ =
|
||||
ro_space->CommittedPhysicalMemory();
|
||||
}
|
||||
@ -183,7 +184,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
|
||||
|
||||
// static
|
||||
bool ReadOnlyHeap::Contains(Address address) {
|
||||
return MemoryChunk::FromAddress(address)->InReadOnlySpace();
|
||||
return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace();
|
||||
}
|
||||
|
||||
// static
|
||||
@ -214,30 +215,33 @@ ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
|
||||
|
||||
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
|
||||
: ro_space_(ro_space),
|
||||
current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ? nullptr
|
||||
: ro_space->first_page()),
|
||||
current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
|
||||
? std::vector<ReadOnlyPage*>::iterator()
|
||||
: ro_space->pages().begin()),
|
||||
current_addr_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
|
||||
? Address()
|
||||
: current_page_->area_start()) {}
|
||||
: (*current_page_)->area_start()) {}
|
||||
|
||||
HeapObject ReadOnlyHeapObjectIterator::Next() {
|
||||
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
|
||||
return HeapObject(); // Unsupported
|
||||
}
|
||||
|
||||
if (current_page_ == nullptr) {
|
||||
if (current_page_ == ro_space_->pages().end()) {
|
||||
return HeapObject();
|
||||
}
|
||||
|
||||
BasicMemoryChunk* current_page = *current_page_;
|
||||
for (;;) {
|
||||
DCHECK_LE(current_addr_, current_page_->area_end());
|
||||
if (current_addr_ == current_page_->area_end()) {
|
||||
DCHECK_LE(current_addr_, current_page->area_end());
|
||||
if (current_addr_ == current_page->area_end()) {
|
||||
// Progress to the next page.
|
||||
current_page_ = current_page_->next_page();
|
||||
if (current_page_ == nullptr) {
|
||||
++current_page_;
|
||||
if (current_page_ == ro_space_->pages().end()) {
|
||||
return HeapObject();
|
||||
}
|
||||
current_addr_ = current_page_->area_start();
|
||||
current_page = *current_page_;
|
||||
current_addr_ = current_page->area_start();
|
||||
}
|
||||
|
||||
if (current_addr_ == ro_space_->top() &&
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/optional.h"
|
||||
@ -20,10 +21,12 @@ class SharedMemoryStatistics;
|
||||
|
||||
namespace internal {
|
||||
|
||||
class BasicMemoryChunk;
|
||||
class Isolate;
|
||||
class Page;
|
||||
class ReadOnlyArtifacts;
|
||||
class ReadOnlyDeserializer;
|
||||
class ReadOnlyPage;
|
||||
class ReadOnlySpace;
|
||||
|
||||
// This class transparently manages read-only space, roots and cache creation
|
||||
@ -116,7 +119,7 @@ class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
|
||||
|
||||
private:
|
||||
ReadOnlySpace* const ro_space_;
|
||||
Page* current_page_;
|
||||
std::vector<ReadOnlyPage*>::const_iterator current_page_;
|
||||
Address current_addr_;
|
||||
};
|
||||
|
||||
|
@ -4,8 +4,11 @@
|
||||
|
||||
#include "src/heap/read-only-spaces.h"
|
||||
|
||||
#include "include/v8-internal.h"
|
||||
#include "src/base/lsan.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/combined-heap.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
@ -21,20 +24,28 @@ namespace internal {
|
||||
// ReadOnlySpace implementation
|
||||
|
||||
ReadOnlySpace::ReadOnlySpace(Heap* heap)
|
||||
: PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
|
||||
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
|
||||
: BaseSpace(heap, RO_SPACE),
|
||||
top_(kNullAddress),
|
||||
limit_(kNullAddress),
|
||||
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()),
|
||||
area_size_(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)) {}
|
||||
|
||||
ReadOnlySpace::~ReadOnlySpace() {
|
||||
Unseal();
|
||||
for (ReadOnlyPage* chunk : pages_) {
|
||||
heap()->memory_allocator()->FreeReadOnlyPage(chunk);
|
||||
}
|
||||
pages_.resize(0);
|
||||
accounting_stats_.Clear();
|
||||
}
|
||||
|
||||
ReadOnlyArtifacts::~ReadOnlyArtifacts() {
|
||||
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
|
||||
|
||||
MemoryChunk* next_chunk;
|
||||
for (MemoryChunk* chunk = pages_.front(); chunk != nullptr;
|
||||
chunk = next_chunk) {
|
||||
for (ReadOnlyPage* chunk : pages_) {
|
||||
void* chunk_address = reinterpret_cast<void*>(chunk->address());
|
||||
page_allocator->SetPermissions(chunk_address, chunk->size(),
|
||||
PageAllocator::kReadWrite);
|
||||
next_chunk = chunk->list_node().next();
|
||||
size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
|
||||
CHECK(page_allocator->FreePages(chunk_address, size));
|
||||
}
|
||||
@ -46,17 +57,19 @@ void ReadOnlyArtifacts::set_read_only_heap(
|
||||
}
|
||||
|
||||
SharedReadOnlySpace::~SharedReadOnlySpace() {
|
||||
// Clear the memory chunk list before the space is deleted, so that the
|
||||
// inherited destructors don't try to destroy the MemoryChunks themselves.
|
||||
memory_chunk_list_ = heap::List<MemoryChunk>();
|
||||
// Clear the chunk list before the space is deleted, so that the inherited
|
||||
// destructors don't try to destroy the BasicMemoryChunks themselves.
|
||||
pages_.resize(0);
|
||||
}
|
||||
|
||||
SharedReadOnlySpace::SharedReadOnlySpace(
|
||||
Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts)
|
||||
: ReadOnlySpace(heap) {
|
||||
artifacts->pages().ShallowCopyTo(&memory_chunk_list_);
|
||||
pages_ = artifacts->pages();
|
||||
is_marked_read_only_ = true;
|
||||
accounting_stats_ = artifacts->accounting_stats();
|
||||
top_ = kNullAddress;
|
||||
limit_ = kNullAddress;
|
||||
}
|
||||
|
||||
void ReadOnlySpace::DetachPagesAndAddToArtifacts(
|
||||
@ -64,14 +77,13 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
|
||||
Heap* heap = ReadOnlySpace::heap();
|
||||
Seal(SealMode::kDetachFromHeapAndForget);
|
||||
artifacts->set_accounting_stats(accounting_stats_);
|
||||
artifacts->TransferPages(std::move(memory_chunk_list_));
|
||||
artifacts->TransferPages(std::move(pages_));
|
||||
artifacts->set_shared_read_only_space(
|
||||
std::make_unique<SharedReadOnlySpace>(heap, artifacts));
|
||||
heap->ReplaceReadOnlySpace(artifacts->shared_read_only_space());
|
||||
}
|
||||
|
||||
void ReadOnlyPage::MakeHeaderRelocatable() {
|
||||
ReleaseAllocatedMemoryNeededForWritableChunk();
|
||||
// Detached read-only space needs to have a valid marking bitmap. Instruct
|
||||
// Lsan to ignore it if required.
|
||||
LSAN_IGNORE_OBJECT(marking_bitmap_);
|
||||
@ -81,12 +93,13 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
|
||||
|
||||
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
|
||||
PageAllocator::Permission access) {
|
||||
for (Page* p : *this) {
|
||||
for (BasicMemoryChunk* chunk : pages_) {
|
||||
// Read only pages don't have valid reservation object so we get proper
|
||||
// page allocator manually.
|
||||
v8::PageAllocator* page_allocator =
|
||||
memory_allocator->page_allocator(p->executable());
|
||||
CHECK(SetPermissions(page_allocator, p->address(), p->size(), access));
|
||||
memory_allocator->page_allocator(NOT_EXECUTABLE);
|
||||
CHECK(SetPermissions(page_allocator, chunk->address(), chunk->size(),
|
||||
access));
|
||||
}
|
||||
}
|
||||
|
||||
@ -94,27 +107,20 @@ void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
|
||||
// on the heap. If there was already a free list then the elements on it
|
||||
// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
|
||||
// fix them.
|
||||
void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
|
||||
free_list_->RepairLists(heap());
|
||||
void ReadOnlySpace::RepairFreeSpacesAfterDeserialization() {
|
||||
BasicMemoryChunk::UpdateHighWaterMark(top_);
|
||||
// Each page may have a small free space that is not tracked by a free list.
|
||||
// Those free spaces still contain null as their map pointer.
|
||||
// Overwrite them with new fillers.
|
||||
for (Page* page : *this) {
|
||||
int size = static_cast<int>(page->wasted_memory());
|
||||
if (size == 0) {
|
||||
// If there is no wasted memory then all free space is in the free list.
|
||||
continue;
|
||||
for (BasicMemoryChunk* chunk : pages_) {
|
||||
Address start = chunk->HighWaterMark();
|
||||
Address end = chunk->area_end();
|
||||
// Put a filler object in the gap between the end of the allocated objects
|
||||
// and the end of the allocatable area.
|
||||
if (start < end) {
|
||||
heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
|
||||
ClearRecordedSlots::kNo);
|
||||
}
|
||||
Address start = page->HighWaterMark();
|
||||
Address end = page->area_end();
|
||||
if (start < end - size) {
|
||||
// A region at the high watermark is already in free list.
|
||||
HeapObject filler = HeapObject::FromAddress(start);
|
||||
CHECK(filler.IsFreeSpaceOrFiller());
|
||||
start += filler.Size();
|
||||
}
|
||||
CHECK_EQ(size, static_cast<int>(end - start));
|
||||
heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,29 +151,373 @@ void ReadOnlySpace::Seal(SealMode ro_mode) {
|
||||
|
||||
if (ro_mode == SealMode::kDetachFromHeapAndForget) {
|
||||
DetachFromHeap();
|
||||
for (Page* p : *this) {
|
||||
memory_allocator->UnregisterMemory(p);
|
||||
static_cast<ReadOnlyPage*>(p)->MakeHeaderRelocatable();
|
||||
}
|
||||
} else {
|
||||
for (Page* p : *this) {
|
||||
p->ReleaseAllocatedMemoryNeededForWritableChunk();
|
||||
for (BasicMemoryChunk* chunk : pages_) {
|
||||
memory_allocator->UnregisterMemory(chunk);
|
||||
static_cast<ReadOnlyPage*>(chunk)->MakeHeaderRelocatable();
|
||||
}
|
||||
}
|
||||
|
||||
free_list_.reset();
|
||||
|
||||
SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
|
||||
}
|
||||
|
||||
void ReadOnlySpace::Unseal() {
|
||||
DCHECK(is_marked_read_only_);
|
||||
if (HasPages()) {
|
||||
if (!pages_.empty()) {
|
||||
SetPermissionsForPages(heap()->memory_allocator(),
|
||||
PageAllocator::kReadWrite);
|
||||
}
|
||||
is_marked_read_only_ = false;
|
||||
}
|
||||
|
||||
bool ReadOnlySpace::ContainsSlow(Address addr) {
|
||||
BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
|
||||
for (BasicMemoryChunk* chunk : pages_) {
|
||||
if (chunk == c) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
namespace {
|
||||
// Only iterates over a single chunk as the chunk iteration is done externally.
|
||||
class ReadOnlySpaceObjectIterator : public ObjectIterator {
|
||||
public:
|
||||
ReadOnlySpaceObjectIterator(Heap* heap, ReadOnlySpace* space,
|
||||
BasicMemoryChunk* chunk)
|
||||
: cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
|
||||
|
||||
// Advance to the next object, skipping free spaces and other fillers and
|
||||
// skipping the special garbage section of which there is one per space.
|
||||
// Returns nullptr when the iteration has ended.
|
||||
HeapObject Next() override {
|
||||
HeapObject next_obj = FromCurrentPage();
|
||||
if (!next_obj.is_null()) return next_obj;
|
||||
return HeapObject();
|
||||
}
|
||||
|
||||
private:
|
||||
HeapObject FromCurrentPage() {
|
||||
while (cur_addr_ != cur_end_) {
|
||||
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
|
||||
cur_addr_ = space_->limit();
|
||||
continue;
|
||||
}
|
||||
HeapObject obj = HeapObject::FromAddress(cur_addr_);
|
||||
const int obj_size = obj.Size();
|
||||
cur_addr_ += obj_size;
|
||||
DCHECK_LE(cur_addr_, cur_end_);
|
||||
if (!obj.IsFreeSpaceOrFiller()) {
|
||||
if (obj.IsCode()) {
|
||||
DCHECK(Code::cast(obj).is_builtin());
|
||||
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
|
||||
} else {
|
||||
DCHECK_OBJECT_SIZE(obj_size);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
return HeapObject();
|
||||
}
|
||||
|
||||
Address cur_addr_; // Current iteration point.
|
||||
Address cur_end_; // End iteration point.
|
||||
ReadOnlySpace* space_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
namespace {
|
||||
class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
|
||||
public:
|
||||
explicit VerifyReadOnlyPointersVisitor(Heap* heap)
|
||||
: VerifyPointersVisitor(heap) {}
|
||||
|
||||
protected:
|
||||
void VerifyPointers(HeapObject host, MaybeObjectSlot start,
|
||||
MaybeObjectSlot end) override {
|
||||
if (!host.is_null()) {
|
||||
CHECK(ReadOnlyHeap::Contains(host.map()));
|
||||
}
|
||||
VerifyPointersVisitor::VerifyPointers(host, start, end);
|
||||
|
||||
for (MaybeObjectSlot current = start; current < end; ++current) {
|
||||
HeapObject heap_object;
|
||||
if ((*current)->GetHeapObject(&heap_object)) {
|
||||
CHECK(ReadOnlyHeap::Contains(heap_object));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
void ReadOnlySpace::Verify(Isolate* isolate) {
|
||||
bool allocation_pointer_found_in_space = top_ == limit_;
|
||||
VerifyReadOnlyPointersVisitor visitor(isolate->heap());
|
||||
|
||||
for (BasicMemoryChunk* page : pages_) {
|
||||
#ifdef V8_SHARED_RO_HEAP
|
||||
CHECK_NULL(page->owner());
|
||||
#else
|
||||
CHECK_EQ(page->owner(), this);
|
||||
#endif
|
||||
|
||||
if (page == Page::FromAllocationAreaAddress(top_)) {
|
||||
allocation_pointer_found_in_space = true;
|
||||
}
|
||||
ReadOnlySpaceObjectIterator it(isolate->heap(), this, page);
|
||||
Address end_of_previous_object = page->area_start();
|
||||
Address top = page->area_end();
|
||||
|
||||
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
|
||||
CHECK(end_of_previous_object <= object.address());
|
||||
|
||||
Map map = object.map();
|
||||
CHECK(map.IsMap());
|
||||
|
||||
// The object itself should look OK.
|
||||
object.ObjectVerify(isolate);
|
||||
|
||||
// All the interior pointers should be contained in the heap.
|
||||
int size = object.Size();
|
||||
object.IterateBody(map, size, &visitor);
|
||||
CHECK(object.address() + size <= top);
|
||||
end_of_previous_object = object.address() + size;
|
||||
|
||||
CHECK(!object.IsExternalString());
|
||||
CHECK(!object.IsJSArrayBuffer());
|
||||
}
|
||||
}
|
||||
CHECK(allocation_pointer_found_in_space);
|
||||
|
||||
#ifdef DEBUG
|
||||
VerifyCounters(isolate->heap());
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
void ReadOnlySpace::VerifyCounters(Heap* heap) {
|
||||
size_t total_capacity = 0;
|
||||
size_t total_allocated = 0;
|
||||
for (BasicMemoryChunk* page : pages_) {
|
||||
total_capacity += page->area_size();
|
||||
ReadOnlySpaceObjectIterator it(heap, this, page);
|
||||
size_t real_allocated = 0;
|
||||
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
|
||||
if (!object.IsFreeSpaceOrFiller()) {
|
||||
real_allocated += object.Size();
|
||||
}
|
||||
}
|
||||
total_allocated += page->allocated_bytes();
|
||||
// The real size can be smaller than the accounted size if array trimming,
|
||||
// object slack tracking happened after sweeping.
|
||||
DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
|
||||
DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
|
||||
}
|
||||
DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
|
||||
DCHECK_EQ(total_allocated, accounting_stats_.Size());
|
||||
}
|
||||
#endif // DEBUG
|
||||
#endif // VERIFY_HEAP
|
||||
|
||||
size_t ReadOnlySpace::CommittedPhysicalMemory() {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
BasicMemoryChunk::UpdateHighWaterMark(top_);
|
||||
size_t size = 0;
|
||||
for (auto* chunk : pages_) {
|
||||
size += chunk->size();
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
void ReadOnlySpace::FreeLinearAllocationArea() {
|
||||
// Mark the old linear allocation area with a free space map so it can be
|
||||
// skipped when scanning the heap.
|
||||
if (top_ == kNullAddress) {
|
||||
DCHECK_EQ(kNullAddress, limit_);
|
||||
return;
|
||||
}
|
||||
|
||||
// Clear the bits in the unused black area.
|
||||
ReadOnlyPage* page = pages_.back();
|
||||
heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
|
||||
page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
|
||||
|
||||
heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
|
||||
ClearRecordedSlots::kNo);
|
||||
|
||||
BasicMemoryChunk::UpdateHighWaterMark(top_);
|
||||
|
||||
top_ = kNullAddress;
|
||||
limit_ = kNullAddress;
|
||||
}
|
||||
|
||||
void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
|
||||
if (top_ + size_in_bytes <= limit_) {
|
||||
return;
|
||||
}
|
||||
|
||||
DCHECK_GE(size_in_bytes, 0);
|
||||
|
||||
FreeLinearAllocationArea();
|
||||
|
||||
BasicMemoryChunk* chunk =
|
||||
heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this);
|
||||
|
||||
accounting_stats_.IncreaseCapacity(chunk->area_size());
|
||||
AccountCommitted(chunk->size());
|
||||
CHECK_NOT_NULL(chunk);
|
||||
pages_.push_back(static_cast<ReadOnlyPage*>(chunk));
|
||||
|
||||
heap()->CreateFillerObjectAt(chunk->area_start(),
|
||||
static_cast<int>(chunk->area_size()),
|
||||
ClearRecordedSlots::kNo);
|
||||
|
||||
top_ = chunk->area_start();
|
||||
limit_ = chunk->area_end();
|
||||
return;
|
||||
}
|
||||
|
||||
HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
|
||||
int size_in_bytes, AllocationAlignment alignment) {
|
||||
Address current_top = top_;
|
||||
int filler_size = Heap::GetFillToAlign(current_top, alignment);
|
||||
|
||||
Address new_top = current_top + filler_size + size_in_bytes;
|
||||
if (new_top > limit_) return HeapObject();
|
||||
|
||||
top_ = new_top;
|
||||
if (filler_size > 0) {
|
||||
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
|
||||
HeapObject::FromAddress(current_top),
|
||||
filler_size);
|
||||
}
|
||||
|
||||
// Allocation always occurs in the last chunk for RO_SPACE.
|
||||
BasicMemoryChunk* chunk = pages_.back();
|
||||
int allocated_size = filler_size + size_in_bytes;
|
||||
accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
|
||||
chunk->IncreaseAllocatedBytes(allocated_size);
|
||||
|
||||
return HeapObject::FromAddress(current_top);
|
||||
}
|
||||
|
||||
AllocationResult ReadOnlySpace::AllocateRawAligned(
|
||||
int size_in_bytes, AllocationAlignment alignment) {
|
||||
DCHECK(!IsDetached());
|
||||
int allocation_size = size_in_bytes;
|
||||
|
||||
HeapObject object = TryAllocateLinearlyAligned(allocation_size, alignment);
|
||||
if (object.is_null()) {
|
||||
// We don't know exactly how much filler we need to align until space is
|
||||
// allocated, so assume the worst case.
|
||||
EnsureSpaceForAllocation(allocation_size +
|
||||
Heap::GetMaximumFillToAlign(alignment));
|
||||
allocation_size = size_in_bytes;
|
||||
object = TryAllocateLinearlyAligned(size_in_bytes, alignment);
|
||||
CHECK(!object.is_null());
|
||||
}
|
||||
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
|
||||
DCHECK(!IsDetached());
|
||||
EnsureSpaceForAllocation(size_in_bytes);
|
||||
Address current_top = top_;
|
||||
Address new_top = current_top + size_in_bytes;
|
||||
DCHECK_LE(new_top, limit_);
|
||||
top_ = new_top;
|
||||
HeapObject object = HeapObject::FromAddress(current_top);
|
||||
|
||||
DCHECK(!object.is_null());
|
||||
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
|
||||
|
||||
// Allocation always occurs in the last chunk for RO_SPACE.
|
||||
BasicMemoryChunk* chunk = pages_.back();
|
||||
accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
|
||||
chunk->IncreaseAllocatedBytes(size_in_bytes);
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
AllocationResult ReadOnlySpace::AllocateRaw(size_t size_in_bytes,
|
||||
AllocationAlignment alignment) {
|
||||
#ifdef V8_HOST_ARCH_32_BIT
|
||||
AllocationResult result = alignment != kWordAligned
|
||||
? AllocateRawAligned(size_in_bytes, alignment)
|
||||
: AllocateRawUnaligned(size_in_bytes);
|
||||
#else
|
||||
AllocationResult result =
|
||||
AllocateRawUnaligned(static_cast<int>(size_in_bytes));
|
||||
#endif
|
||||
HeapObject heap_obj;
|
||||
if (!result.IsRetry() && result.To(&heap_obj)) {
|
||||
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t ReadOnlyPage::ShrinkToHighWaterMark() {
|
||||
// Shrink pages to high water mark. The water mark points either to a filler
|
||||
// or the area_end.
|
||||
HeapObject filler = HeapObject::FromAddress(HighWaterMark());
|
||||
if (filler.address() == area_end()) return 0;
|
||||
CHECK(filler.IsFreeSpaceOrFiller());
|
||||
DCHECK_EQ(filler.address() + filler.Size(), area_end());
|
||||
|
||||
size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
|
||||
MemoryAllocator::GetCommitPageSize());
|
||||
if (unused > 0) {
|
||||
DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
|
||||
if (FLAG_trace_gc_verbose) {
|
||||
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
|
||||
reinterpret_cast<void*>(this),
|
||||
reinterpret_cast<void*>(area_end()),
|
||||
reinterpret_cast<void*>(area_end() - unused));
|
||||
}
|
||||
heap()->CreateFillerObjectAt(
|
||||
filler.address(),
|
||||
static_cast<int>(area_end() - filler.address() - unused),
|
||||
ClearRecordedSlots::kNo);
|
||||
heap()->memory_allocator()->PartialFreeMemory(
|
||||
this, address() + size() - unused, unused, area_end() - unused);
|
||||
if (filler.address() != area_end()) {
|
||||
CHECK(filler.IsFreeSpaceOrFiller());
|
||||
CHECK_EQ(filler.address() + filler.Size(), area_end());
|
||||
}
|
||||
}
|
||||
return unused;
|
||||
}
|
||||
|
||||
void ReadOnlySpace::ShrinkPages() {
|
||||
DCHECK(!heap()->deserialization_complete());
|
||||
BasicMemoryChunk::UpdateHighWaterMark(top_);
|
||||
heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
|
||||
ClearRecordedSlots::kNo);
|
||||
|
||||
for (ReadOnlyPage* chunk : pages_) {
|
||||
DCHECK(chunk->IsFlagSet(Page::NEVER_EVACUATE));
|
||||
size_t unused = chunk->ShrinkToHighWaterMark();
|
||||
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
|
||||
AccountUncommitted(unused);
|
||||
}
|
||||
limit_ = pages_.back()->area_end();
|
||||
}
|
||||
|
||||
ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
|
||||
ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
|
||||
page->allocated_bytes_ = 0;
|
||||
page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
|
||||
heap()
|
||||
->incremental_marking()
|
||||
->non_atomic_marking_state()
|
||||
->bitmap(chunk)
|
||||
->MarkAllBits();
|
||||
chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -10,7 +10,9 @@
|
||||
|
||||
#include "include/v8-platform.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/allocation-stats.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/list.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/paged-spaces.h"
|
||||
@ -22,12 +24,14 @@ namespace internal {
|
||||
class MemoryAllocator;
|
||||
class ReadOnlyHeap;
|
||||
|
||||
class ReadOnlyPage : public Page {
|
||||
class ReadOnlyPage : public BasicMemoryChunk {
|
||||
public:
|
||||
// Clears any pointers in the header that point out of the page that would
|
||||
// otherwise make the header non-relocatable.
|
||||
void MakeHeaderRelocatable();
|
||||
|
||||
size_t ShrinkToHighWaterMark();
|
||||
|
||||
private:
|
||||
friend class ReadOnlySpace;
|
||||
};
|
||||
@ -48,8 +52,8 @@ class ReadOnlyArtifacts {
|
||||
return shared_read_only_space_.get();
|
||||
}
|
||||
|
||||
heap::List<MemoryChunk>& pages() { return pages_; }
|
||||
void TransferPages(heap::List<MemoryChunk>&& pages) {
|
||||
std::vector<ReadOnlyPage*>& pages() { return pages_; }
|
||||
void TransferPages(std::vector<ReadOnlyPage*>&& pages) {
|
||||
pages_ = std::move(pages);
|
||||
}
|
||||
|
||||
@ -59,7 +63,7 @@ class ReadOnlyArtifacts {
|
||||
ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); }
|
||||
|
||||
private:
|
||||
heap::List<MemoryChunk> pages_;
|
||||
std::vector<ReadOnlyPage*> pages_;
|
||||
AllocationStats stats_;
|
||||
std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
|
||||
std::unique_ptr<ReadOnlyHeap> read_only_heap_;
|
||||
@ -67,7 +71,7 @@ class ReadOnlyArtifacts {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Read Only space for all Immortal Immovable and Immutable objects
|
||||
class ReadOnlySpace : public PagedSpace {
|
||||
class ReadOnlySpace : public BaseSpace {
|
||||
public:
|
||||
explicit ReadOnlySpace(Heap* heap);
|
||||
|
||||
@ -76,13 +80,19 @@ class ReadOnlySpace : public PagedSpace {
|
||||
void DetachPagesAndAddToArtifacts(
|
||||
std::shared_ptr<ReadOnlyArtifacts> artifacts);
|
||||
|
||||
~ReadOnlySpace() override { Unseal(); }
|
||||
~ReadOnlySpace() override;
|
||||
|
||||
bool IsDetached() const { return heap_ == nullptr; }
|
||||
|
||||
bool writable() const { return !is_marked_read_only_; }
|
||||
|
||||
bool Contains(Address a) = delete;
|
||||
bool Contains(Object o) = delete;
|
||||
|
||||
V8_EXPORT_PRIVATE
|
||||
AllocationResult AllocateRaw(size_t size_in_bytes,
|
||||
AllocationAlignment alignment);
|
||||
|
||||
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
|
||||
|
||||
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
|
||||
@ -93,10 +103,32 @@ class ReadOnlySpace : public PagedSpace {
|
||||
void Seal(SealMode ro_mode);
|
||||
|
||||
// During boot the free_space_map is created, and afterwards we may need
|
||||
// to write it into the free list nodes that were already created.
|
||||
void RepairFreeListsAfterDeserialization();
|
||||
// to write it into the free space nodes that were already created.
|
||||
void RepairFreeSpacesAfterDeserialization();
|
||||
|
||||
size_t Available() override { return 0; }
|
||||
size_t Size() override { return area_size_; }
|
||||
size_t CommittedPhysicalMemory() override;
|
||||
|
||||
const std::vector<ReadOnlyPage*>& pages() const { return pages_; }
|
||||
Address top() const { return top_; }
|
||||
Address limit() const { return limit_; }
|
||||
size_t Capacity() const { return capacity_; }
|
||||
|
||||
bool ContainsSlow(Address addr);
|
||||
void ShrinkPages();
|
||||
#ifdef VERIFY_HEAP
|
||||
void Verify(Isolate* isolate);
|
||||
#ifdef DEBUG
|
||||
void VerifyCounters(Heap* heap);
|
||||
#endif // DEBUG
|
||||
#endif // VERIFY_HEAP
|
||||
|
||||
// Return size of allocatable area on a page in this space.
|
||||
int AreaSize() { return static_cast<int>(area_size_); }
|
||||
|
||||
ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
|
||||
|
||||
Address FirstPageAddress() const { return pages_.front()->address(); }
|
||||
|
||||
protected:
|
||||
void SetPermissionsForPages(MemoryAllocator* memory_allocator,
|
||||
@ -104,16 +136,36 @@ class ReadOnlySpace : public PagedSpace {
|
||||
|
||||
bool is_marked_read_only_ = false;
|
||||
|
||||
// Accounting information for this space.
|
||||
AllocationStats accounting_stats_;
|
||||
|
||||
std::vector<ReadOnlyPage*> pages_;
|
||||
|
||||
Address top_;
|
||||
Address limit_;
|
||||
|
||||
private:
|
||||
// Unseal the space after is has been sealed, by making it writable.
|
||||
// TODO(v8:7464): Only possible if the space hasn't been detached.
|
||||
// Unseal the space after it has been sealed, by making it writable.
|
||||
void Unseal();
|
||||
|
||||
//
|
||||
// String padding must be cleared just before serialization and therefore the
|
||||
// string padding in the space will already have been cleared if the space was
|
||||
// deserialized.
|
||||
void DetachFromHeap() { heap_ = nullptr; }
|
||||
|
||||
AllocationResult AllocateRawUnaligned(int size_in_bytes);
|
||||
AllocationResult AllocateRawAligned(int size_in_bytes,
|
||||
AllocationAlignment alignment);
|
||||
|
||||
HeapObject TryAllocateLinearlyAligned(int size_in_bytes,
|
||||
AllocationAlignment alignment);
|
||||
void EnsureSpaceForAllocation(int size_in_bytes);
|
||||
void FreeLinearAllocationArea();
|
||||
|
||||
// String padding must be cleared just before serialization and therefore
|
||||
// the string padding in the space will already have been cleared if the
|
||||
// space was deserialized.
|
||||
bool is_string_padding_cleared_;
|
||||
|
||||
size_t capacity_;
|
||||
size_t area_size_;
|
||||
};
|
||||
|
||||
class SharedReadOnlySpace : public ReadOnlySpace {
|
||||
|
@ -130,11 +130,6 @@ void Page::MergeOldToNewRememberedSets() {
|
||||
sweeping_slot_set_ = nullptr;
|
||||
}
|
||||
|
||||
void Page::ResetAllocationStatistics() {
|
||||
allocated_bytes_ = area_size();
|
||||
wasted_memory_ = 0;
|
||||
}
|
||||
|
||||
void Page::AllocateLocalTracker() {
|
||||
DCHECK_NULL(local_tracker_);
|
||||
local_tracker_ = new LocalArrayBufferTracker(this);
|
||||
|
@ -363,15 +363,70 @@ class NoFreeList final : public FreeList {
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Space is the abstract superclass for all allocation spaces.
|
||||
class V8_EXPORT_PRIVATE Space : public Malloced {
|
||||
// BaseSpace is the abstract superclass for all allocation spaces.
|
||||
class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
|
||||
public:
|
||||
Heap* heap() const {
|
||||
DCHECK_NOT_NULL(heap_);
|
||||
return heap_;
|
||||
}
|
||||
|
||||
AllocationSpace identity() { return id_; }
|
||||
|
||||
const char* name() { return Heap::GetSpaceName(id_); }
|
||||
|
||||
void AccountCommitted(size_t bytes) {
|
||||
DCHECK_GE(committed_ + bytes, committed_);
|
||||
committed_ += bytes;
|
||||
if (committed_ > max_committed_) {
|
||||
max_committed_ = committed_;
|
||||
}
|
||||
}
|
||||
|
||||
void AccountUncommitted(size_t bytes) {
|
||||
DCHECK_GE(committed_, committed_ - bytes);
|
||||
committed_ -= bytes;
|
||||
}
|
||||
|
||||
// Return the total amount committed memory for this space, i.e., allocatable
|
||||
// memory and page headers.
|
||||
virtual size_t CommittedMemory() { return committed_; }
|
||||
|
||||
virtual size_t MaximumCommittedMemory() { return max_committed_; }
|
||||
|
||||
// Approximate amount of physical memory committed for this space.
|
||||
virtual size_t CommittedPhysicalMemory() = 0;
|
||||
|
||||
// Returns allocated size.
|
||||
virtual size_t Size() = 0;
|
||||
|
||||
protected:
|
||||
BaseSpace(Heap* heap, AllocationSpace id)
|
||||
: heap_(heap), id_(id), committed_(0), max_committed_(0) {}
|
||||
|
||||
// Even though this has no virtual functions, this ensures that pointers are
|
||||
// stable through casting.
|
||||
virtual ~BaseSpace() = default;
|
||||
|
||||
protected:
|
||||
Heap* heap_;
|
||||
AllocationSpace id_;
|
||||
|
||||
// Keeps track of committed memory in a space.
|
||||
std::atomic<size_t> committed_;
|
||||
size_t max_committed_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(BaseSpace);
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Space is the abstract superclass for all allocation spaces that are not
|
||||
// sealed after startup (i.e. not ReadOnlySpace).
|
||||
class V8_EXPORT_PRIVATE Space : public BaseSpace {
|
||||
public:
|
||||
Space(Heap* heap, AllocationSpace id, FreeList* free_list)
|
||||
: allocation_observers_paused_(false),
|
||||
heap_(heap),
|
||||
id_(id),
|
||||
committed_(0),
|
||||
max_committed_(0),
|
||||
: BaseSpace(heap, id),
|
||||
allocation_observers_paused_(false),
|
||||
free_list_(std::unique_ptr<FreeList>(free_list)) {
|
||||
external_backing_store_bytes_ =
|
||||
new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
|
||||
@ -383,22 +438,11 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
|
||||
static inline void MoveExternalBackingStoreBytes(
|
||||
ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
|
||||
|
||||
virtual ~Space() {
|
||||
~Space() override {
|
||||
delete[] external_backing_store_bytes_;
|
||||
external_backing_store_bytes_ = nullptr;
|
||||
}
|
||||
|
||||
Heap* heap() const {
|
||||
DCHECK_NOT_NULL(heap_);
|
||||
return heap_;
|
||||
}
|
||||
|
||||
bool IsDetached() const { return heap_ == nullptr; }
|
||||
|
||||
AllocationSpace identity() { return id_; }
|
||||
|
||||
const char* name() { return Heap::GetSpaceName(id_); }
|
||||
|
||||
virtual void AddAllocationObserver(AllocationObserver* observer);
|
||||
|
||||
virtual void RemoveAllocationObserver(AllocationObserver* observer);
|
||||
@ -416,22 +460,10 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
|
||||
// single allocation-folding group.
|
||||
void AllocationStepAfterMerge(Address first_object_in_chunk, int size);
|
||||
|
||||
// Return the total amount committed memory for this space, i.e., allocatable
|
||||
// memory and page headers.
|
||||
virtual size_t CommittedMemory() { return committed_; }
|
||||
|
||||
virtual size_t MaximumCommittedMemory() { return max_committed_; }
|
||||
|
||||
// Returns allocated size.
|
||||
virtual size_t Size() = 0;
|
||||
|
||||
// Returns size of objects. Can differ from the allocated size
|
||||
// (e.g. see OldLargeObjectSpace).
|
||||
virtual size_t SizeOfObjects() { return Size(); }
|
||||
|
||||
// Approximate amount of physical memory committed for this space.
|
||||
virtual size_t CommittedPhysicalMemory() = 0;
|
||||
|
||||
// Return the available bytes without growing.
|
||||
virtual size_t Available() = 0;
|
||||
|
||||
@ -445,19 +477,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
|
||||
|
||||
virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0;
|
||||
|
||||
void AccountCommitted(size_t bytes) {
|
||||
DCHECK_GE(committed_ + bytes, committed_);
|
||||
committed_ += bytes;
|
||||
if (committed_ > max_committed_) {
|
||||
max_committed_ = committed_;
|
||||
}
|
||||
}
|
||||
|
||||
void AccountUncommitted(size_t bytes) {
|
||||
DCHECK_GE(committed_, committed_ - bytes);
|
||||
committed_ -= bytes;
|
||||
}
|
||||
|
||||
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
|
||||
size_t amount);
|
||||
|
||||
@ -470,8 +489,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
|
||||
return external_backing_store_bytes_[type];
|
||||
}
|
||||
|
||||
void* GetRandomMmapAddr();
|
||||
|
||||
MemoryChunk* first_page() { return memory_chunk_list_.front(); }
|
||||
MemoryChunk* last_page() { return memory_chunk_list_.back(); }
|
||||
|
||||
@ -482,6 +499,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
|
||||
|
||||
FreeList* free_list() { return free_list_.get(); }
|
||||
|
||||
Address FirstPageAddress() const { return first_page()->address(); }
|
||||
|
||||
#ifdef DEBUG
|
||||
virtual void Print() = 0;
|
||||
#endif
|
||||
@ -492,8 +511,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
|
||||
return !allocation_observers_paused_ && !allocation_observers_.empty();
|
||||
}
|
||||
|
||||
void DetachFromHeap() { heap_ = nullptr; }
|
||||
|
||||
std::vector<AllocationObserver*> allocation_observers_;
|
||||
|
||||
// The List manages the pages that belong to the given space.
|
||||
@ -503,12 +520,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
|
||||
std::atomic<size_t>* external_backing_store_bytes_;
|
||||
|
||||
bool allocation_observers_paused_;
|
||||
Heap* heap_;
|
||||
AllocationSpace id_;
|
||||
|
||||
// Keeps track of committed memory in a space.
|
||||
std::atomic<size_t> committed_;
|
||||
size_t max_committed_;
|
||||
|
||||
std::unique_ptr<FreeList> free_list_;
|
||||
|
||||
@ -585,17 +596,6 @@ class Page : public MemoryChunk {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the offset of a given address to this page.
|
||||
inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
|
||||
|
||||
// Returns the address for a given offset to the this page.
|
||||
Address OffsetToAddress(size_t offset) {
|
||||
Address address_in_page = address() + offset;
|
||||
DCHECK_GE(address_in_page, area_start());
|
||||
DCHECK_LT(address_in_page, area_end());
|
||||
return address_in_page;
|
||||
}
|
||||
|
||||
void AllocateLocalTracker();
|
||||
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
|
||||
bool contains_array_buffers();
|
||||
@ -611,21 +611,6 @@ class Page : public MemoryChunk {
|
||||
return categories_[type];
|
||||
}
|
||||
|
||||
size_t wasted_memory() { return wasted_memory_; }
|
||||
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
|
||||
size_t allocated_bytes() { return allocated_bytes_; }
|
||||
void IncreaseAllocatedBytes(size_t bytes) {
|
||||
DCHECK_LE(bytes, area_size());
|
||||
allocated_bytes_ += bytes;
|
||||
}
|
||||
void DecreaseAllocatedBytes(size_t bytes) {
|
||||
DCHECK_LE(bytes, area_size());
|
||||
DCHECK_GE(allocated_bytes(), bytes);
|
||||
allocated_bytes_ -= bytes;
|
||||
}
|
||||
|
||||
void ResetAllocationStatistics();
|
||||
|
||||
size_t ShrinkToHighWaterMark();
|
||||
|
||||
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
|
||||
|
@ -107,14 +107,14 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
|
||||
// create a back reference that encodes the page number as the chunk_index and
|
||||
// the offset within the page as the chunk_offset.
|
||||
Address address = obj.address();
|
||||
Page* page = Page::FromAddress(address);
|
||||
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
|
||||
uint32_t chunk_index = 0;
|
||||
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
|
||||
for (Page* p : *read_only_space) {
|
||||
if (p == page) break;
|
||||
for (ReadOnlyPage* page : read_only_space->pages()) {
|
||||
if (chunk == page) break;
|
||||
++chunk_index;
|
||||
}
|
||||
uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address));
|
||||
uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
|
||||
SerializerReference back_reference = SerializerReference::BackReference(
|
||||
SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset);
|
||||
reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
|
||||
|
@ -364,12 +364,9 @@ HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) {
|
||||
uint32_t chunk_index = source_.GetInt();
|
||||
uint32_t chunk_offset = source_.GetInt();
|
||||
if (is_off_thread() || isolate()->heap()->deserialization_complete()) {
|
||||
PagedSpace* read_only_space =
|
||||
ReadOnlySpace* read_only_space =
|
||||
local_isolate()->heap()->read_only_space();
|
||||
Page* page = read_only_space->first_page();
|
||||
for (uint32_t i = 0; i < chunk_index; ++i) {
|
||||
page = page->next_page();
|
||||
}
|
||||
ReadOnlyPage* page = read_only_space->pages()[chunk_index];
|
||||
Address address = page->OffsetToAddress(chunk_offset);
|
||||
obj = HeapObject::FromAddress(address);
|
||||
} else {
|
||||
|
@ -39,7 +39,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
|
||||
ReadOnlyRoots roots(isolate);
|
||||
|
||||
roots.Iterate(this);
|
||||
ro_heap->read_only_space()->RepairFreeListsAfterDeserialization();
|
||||
ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
|
||||
|
||||
// Deserialize the Read-only Object Cache.
|
||||
for (size_t i = 0;; ++i) {
|
||||
|
@ -182,8 +182,8 @@ TEST(GetObjectProperties) {
|
||||
: Contains(props->brief, "maybe EmptyFixedArray"));
|
||||
|
||||
// Provide a heap first page so the API can be more sure.
|
||||
heap_addresses.read_only_space_first_page = reinterpret_cast<uintptr_t>(
|
||||
i_isolate->heap()->read_only_space()->first_page());
|
||||
heap_addresses.read_only_space_first_page =
|
||||
i_isolate->heap()->read_only_space()->FirstPageAddress();
|
||||
props =
|
||||
d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses);
|
||||
CHECK(props->type_check_result ==
|
||||
|
@ -95,14 +95,20 @@ static void DumpKnownObject(FILE* out, i::Heap* heap, const char* space_name,
|
||||
#undef RO_ROOT_LIST_CASE
|
||||
}
|
||||
|
||||
static void DumpSpaceFirstPageAddress(FILE* out, i::PagedSpace* space) {
|
||||
static void DumpSpaceFirstPageAddress(FILE* out, i::BaseSpace* space,
|
||||
i::Address first_page) {
|
||||
const char* name = space->name();
|
||||
i::Address first_page = reinterpret_cast<i::Address>(space->first_page());
|
||||
i::Tagged_t compressed = i::CompressTagged(first_page);
|
||||
uintptr_t unsigned_compressed = static_cast<uint32_t>(compressed);
|
||||
i::PrintF(out, " 0x%08" V8PRIxPTR ": \"%s\",\n", unsigned_compressed, name);
|
||||
}
|
||||
|
||||
template <typename SpaceT>
|
||||
static void DumpSpaceFirstPageAddress(FILE* out, SpaceT* space) {
|
||||
i::Address first_page = space->FirstPageAddress();
|
||||
DumpSpaceFirstPageAddress(out, space, first_page);
|
||||
}
|
||||
|
||||
static int DumpHeapConstants(FILE* out, const char* argv0) {
|
||||
// Start up V8.
|
||||
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
|
||||
|
Loading…
Reference in New Issue
Block a user