Reland "[heap] Set read-only space's and its pages' heap_ to null."

Seems like the CodeBuilder CL actually caused this.

This is a reland of 964edc251f

Original change's description:
> [heap] Set read-only space's and its pages' heap_ to null.
>
> Various small changes are required to enable this.
>
> HeapObject::GetReadOnlyRoots no longer uses the Space's heap when
> possible (see comment in ReadOnlyHeap::GetReadOnlyRoots definition).
> This requires that ReadOnlyRoots be construct-able using a raw pointer
> to the read-only space's roots array.
>
> Global read-only heap state is now cleared by tests where appropriate
> and extra DCHECKs in ReadOnlyHeap::SetUp should make catching future
> issues easier.
>
> String padding is now always cleared just before read-only space is
> sealed when not deserializing.
>
> Change-Id: I7d1db1c11567be5df06ff7066f3a699125f8b372
> Bug: v8:7464
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1535830
> Commit-Queue: Maciej Goszczycki <goszczycki@google.com>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Dan Elphick <delphick@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#61188}

Bug: v8:7464
Change-Id: If75bbd16c2e2af5b80cd60811dfd7866f8be8309
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1599186
Commit-Queue: Maciej Goszczycki <goszczycki@google.com>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61323}
This commit is contained in:
Maciej Goszczycki 2019-05-08 11:36:30 +01:00 committed by Commit Bot
parent 0ec79a9b4c
commit b672d08990
20 changed files with 278 additions and 142 deletions

View File

@ -754,8 +754,6 @@ StartupData SnapshotCreator::CreateBlob(
isolate->heap()->CompactWeakArrayLists(internal::AllocationType::kOld);
}
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
if (function_code_handling == FunctionCodeHandling::kClear) {
// Clear out re-compilable data from all shared function infos. Any
// JSFunctions using these SFIs will have their code pointers reset by the

View File

@ -3603,7 +3603,7 @@ const char* Heap::GarbageCollectionReasonToString(
bool Heap::Contains(HeapObject value) {
// Check RO_SPACE first because IsOutsideAllocatedSpace cannot account for a
// shared RO_SPACE.
// TODO(goszczycki): Exclude read-only space. Use ReadOnlyHeap::Contains where
// TODO(v8:7464): Exclude read-only space. Use ReadOnlyHeap::Contains where
// appropriate.
if (read_only_space_ != nullptr && read_only_space_->Contains(value)) {
return true;
@ -3741,9 +3741,18 @@ void Heap::Verify() {
lo_space_->Verify(isolate());
code_lo_space_->Verify(isolate());
new_lo_space_->Verify(isolate());
}
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
// TODO(v8:7464): Always verify read-only space once PagedSpace::Verify
// supports verifying shared read-only space. Currently HeapObjectIterator is
// explicitly disabled for read-only space when sharing is enabled, because it
// relies on PagedSpace::heap_ being non-null.
#ifndef V8_SHARED_RO_HEAP
VerifyReadOnlyPointersVisitor read_only_visitor(this);
read_only_space_->Verify(isolate(), &read_only_visitor);
#endif
}
class SlotVerifyingVisitor : public ObjectVisitor {

View File

@ -1281,6 +1281,9 @@ class Heap {
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
V8_EXPORT_PRIVATE void Verify();
// Verify the read-only heap after all read-only heap objects have been
// created.
void VerifyReadOnlyHeap();
void VerifyRememberedSetFor(HeapObject object);
#endif

View File

@ -23,6 +23,7 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
@ -511,10 +512,10 @@ void MarkCompactCollector::CollectGarbage() {
}
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
HeapObjectIterator iterator(space);
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
ReadOnlyHeapIterator iterator(space);
for (HeapObject object = iterator.next(); !object.is_null();
object = iterator.next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
}
}

View File

@ -703,7 +703,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
void VerifyMarkbitsAreDirty(PagedSpace* space);
void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(NewSpace* space);
void VerifyMarkbitsAreClean(LargeObjectSpace* space);

View File

@ -8,6 +8,7 @@
#include "src/base/once.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/spaces.h"
#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
@ -23,50 +24,65 @@ ReadOnlyHeap* shared_ro_heap = nullptr;
// static
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
void* isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
base::CallOnce(&setup_ro_heap_once, [isolate, des, isolate_ro_roots]() {
shared_ro_heap = Init(isolate, des);
if (des != nullptr) {
std::memcpy(shared_ro_heap->read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
}
// Make sure we are only sharing read-only space when deserializing. Otherwise
// we would be trying to create heap objects inside an already initialized
// read-only space. Use ClearSharedHeapForTest if you need a new read-only
// space.
DCHECK_IMPLIES(shared_ro_heap != nullptr, des != nullptr);
base::CallOnce(&setup_ro_heap_once, [isolate, des]() {
shared_ro_heap = CreateAndAttachToIsolate(isolate);
if (des != nullptr) shared_ro_heap->DeseralizeIntoIsolate(isolate, des);
});
isolate->heap()->SetUpFromReadOnlyHeap(shared_ro_heap);
if (des != nullptr) {
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(isolate_ro_roots, shared_ro_heap->read_only_roots_,
kEntriesCount * sizeof(Address));
}
#else
Init(isolate, des);
auto* ro_heap = CreateAndAttachToIsolate(isolate);
if (des != nullptr) ro_heap->DeseralizeIntoIsolate(isolate, des);
#endif // V8_SHARED_RO_HEAP
}
void ReadOnlyHeap::OnCreateHeapObjectsComplete() {
DCHECK(!deserializing_);
#ifdef V8_SHARED_RO_HEAP
read_only_space_->Forget();
#endif
read_only_space_->MarkAsReadOnly();
void ReadOnlyHeap::DeseralizeIntoIsolate(Isolate* isolate,
ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(des);
des->DeserializeInto(isolate);
InitFromIsolate(isolate);
}
void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
InitFromIsolate(isolate);
}
// static
ReadOnlyHeap* ReadOnlyHeap::Init(Isolate* isolate, ReadOnlyDeserializer* des) {
ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(Isolate* isolate) {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
isolate->heap()->SetUpFromReadOnlyHeap(ro_heap);
if (des != nullptr) {
des->DeserializeInto(isolate);
ro_heap->deserializing_ = true;
#ifdef V8_SHARED_RO_HEAP
ro_heap->read_only_space_->Forget();
#endif
ro_heap->read_only_space_->MarkAsReadOnly();
}
return ro_heap;
}
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
DCHECK(!init_complete_);
#ifdef V8_SHARED_RO_HEAP
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
#else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
#endif
init_complete_ = true;
}
void ReadOnlyHeap::OnHeapTearDown() {
#ifndef V8_SHARED_RO_HEAP
delete read_only_space_;
@ -74,11 +90,35 @@ void ReadOnlyHeap::OnHeapTearDown() {
#endif
}
// static
void ReadOnlyHeap::ClearSharedHeapForTest() {
#ifdef V8_SHARED_RO_HEAP
DCHECK_NOT_NULL(shared_ro_heap);
// TODO(v8:7464): Just leak read-only space for now. The paged-space heap
// is null so there isn't a nice way to do this.
delete shared_ro_heap;
shared_ro_heap = nullptr;
setup_ro_heap_once = 0;
#endif
}
// static
bool ReadOnlyHeap::Contains(HeapObject object) {
return Page::FromAddress(object.ptr())->owner()->identity() == RO_SPACE;
}
// static
ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
#ifdef V8_SHARED_RO_HEAP
// This fails if we are creating heap objects and the roots haven't yet been
// copied into the read-only heap or it has been cleared for testing.
if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) {
return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
}
#endif
return ReadOnlyRoots(GetHeapFromWritableObject(object));
}
ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap)
: ReadOnlyHeapIterator(ro_heap->read_only_space()) {}

View File

@ -6,13 +6,14 @@
#define V8_HEAP_READ_ONLY_HEAP_H_
#include "src/base/macros.h"
#include "src/heap/heap.h"
#include "src/objects.h"
#include "src/objects/heap-object.h"
#include "src/roots.h"
namespace v8 {
namespace internal {
class Isolate;
class Page;
class ReadOnlyDeserializer;
class ReadOnlySpace;
@ -24,20 +25,30 @@ class ReadOnlyHeap final {
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
// If necessary create read-only heap and initialize its artifacts (if the
// deserializer is provided).
// TODO(goszczycki): Ideally we'd create this without needing a heap.
// If necessary creates read-only heap and initializes its artifacts (if
// the deserializer is provided). Then attaches the read-only heap to the
// isolate.
// TODO(v8:7464): Ideally we'd create this without needing a heap.
static void SetUp(Isolate* isolate, ReadOnlyDeserializer* des);
// Indicate that all read-only space objects have been created and will not
// be written to. This is not thread safe, and should really only be used as
// part of mksnapshot or when read-only heap sharing is disabled.
void OnCreateHeapObjectsComplete();
// Indicate that the current isolate no longer requires the read-only heap and
// it may be safely disposed of.
// Indicates that the isolate has been set up and all read-only space objects
// have been created and will not be written to. This is not thread safe, and
// should really only be used during snapshot creation or when read-only heap
// sharing is disabled.
void OnCreateHeapObjectsComplete(Isolate* isolate);
// Indicates that the current isolate no longer requires the read-only heap
// and it may be safely disposed of.
void OnHeapTearDown();
// Returns whether the object resides in the read-only space.
V8_EXPORT_PRIVATE static bool Contains(HeapObject object);
// Gets read-only roots from an appropriate root list: shared read-only root
// list if the shared read-only heap has been initialized or the isolate
// specific roots table.
V8_EXPORT_PRIVATE static ReadOnlyRoots GetReadOnlyRoots(HeapObject object);
// Clears any shared read-only heap artifacts for testing, forcing read-only
// heap to be re-created on next set up.
V8_EXPORT_PRIVATE static void ClearSharedHeapForTest();
std::vector<Object>* read_only_object_cache() {
return &read_only_object_cache_;
@ -45,9 +56,18 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
static ReadOnlyHeap* Init(Isolate* isolate, ReadOnlyDeserializer* des);
// Creates a new read-only heap and attaches it to the provided isolate.
static ReadOnlyHeap* CreateAndAttachToIsolate(Isolate* isolate);
// Runs the read-only deserailizer and calls InitFromIsolate to complete
// read-only heap initialization.
void DeseralizeIntoIsolate(Isolate* isolate, ReadOnlyDeserializer* des);
// Initializes read-only heap from an already set-up isolate, copying
// read-only roots from the isolate. This then seals the space off from
// further writes, marks it as read-only and detaches it from the heap (unless
// sharing is disabled).
void InitFromIsolate(Isolate* isolate);
bool deserializing_ = false;
bool init_complete_ = false;
ReadOnlySpace* read_only_space_ = nullptr;
std::vector<Object> read_only_object_cache_;

View File

@ -50,7 +50,11 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
cur_end_(kNullAddress),
space_(space),
page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {}
current_page_(page_range_.begin()) {
#ifdef V8_SHARED_RO_HEAP
DCHECK_NE(space->identity(), RO_SPACE);
#endif
}
HeapObjectIterator::HeapObjectIterator(Page* page)
: cur_addr_(kNullAddress),
@ -60,10 +64,14 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
current_page_(page_range_.begin()) {
#ifdef DEBUG
Space* owner = page->owner();
DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
owner == page->heap()->code_space() ||
owner == page->heap()->read_only_space());
// TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer
// used to verify read-only space for non-shared builds.
#ifdef V8_SHARED_RO_HEAP
DCHECK_NE(owner->identity(), RO_SPACE);
#endif
// Do not access the heap of the read-only space.
DCHECK(owner->identity() == RO_SPACE || owner->identity() == OLD_SPACE ||
owner->identity() == MAP_SPACE || owner->identity() == CODE_SPACE);
#endif // DEBUG
}
@ -73,17 +81,19 @@ bool HeapObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
#ifdef ENABLE_MINOR_MC
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
}
#else
DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
#endif // ENABLE_MINOR_MC
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
@ -1112,13 +1122,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
static_cast<int>(released_bytes));
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
@ -1130,13 +1135,21 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
size_executable_ -= size;
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
chunk->SetFlag(MemoryChunk::PRE_FREED);
}
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory();
@ -2029,8 +2042,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// be in map space.
Map map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
CHECK(isolate->heap()->map_space()->Contains(map) ||
ReadOnlyHeap::Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@ -2039,7 +2052,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
object->ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
heap()->VerifyRememberedSetFor(object);
isolate->heap()->VerifyRememberedSetFor(object);
}
// All the interior pointers should be contained in the heap.
@ -3335,34 +3348,22 @@ ReadOnlySpace::ReadOnlySpace(Heap* heap)
void ReadOnlyPage::MakeHeaderRelocatable() {
if (mutex_ != nullptr) {
// TODO(v8:7464): heap_ and owner_ need to be cleared as well.
delete mutex_;
heap_ = nullptr;
mutex_ = nullptr;
local_tracker_ = nullptr;
reservation_.Reset();
}
}
void ReadOnlySpace::Forget() {
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access) {
for (Page* p : *this) {
heap()->memory_allocator()->PreFreeMemory(p);
}
}
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
MemoryAllocator* memory_allocator = heap()->memory_allocator();
for (Page* p : *this) {
ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
if (access == PageAllocator::kRead) {
page->MakeHeaderRelocatable();
}
// Read only pages don't have valid reservation object so we get proper
// page allocator manually.
v8::PageAllocator* page_allocator =
memory_allocator->page_allocator(page->executable());
CHECK(
SetPermissions(page_allocator, page->address(), page->size(), access));
memory_allocator->page_allocator(p->executable());
CHECK(SetPermissions(page_allocator, p->address(), p->size(), access));
}
}
@ -3397,7 +3398,6 @@ void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
if (is_string_padding_cleared_) return;
WritableScope writable_scope(this);
ReadOnlyHeapIterator iterator(this);
for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
if (o->IsSeqOneByteString()) {
@ -3409,16 +3409,27 @@ void ReadOnlySpace::ClearStringPaddingIfNeeded() {
is_string_padding_cleared_ = true;
}
void ReadOnlySpace::MarkAsReadOnly() {
void ReadOnlySpace::Seal(SealMode ro_mode) {
DCHECK(!is_marked_read_only_);
FreeLinearAllocationArea();
is_marked_read_only_ = true;
SetPermissionsForPages(PageAllocator::kRead);
auto* memory_allocator = heap()->memory_allocator();
if (ro_mode == SealMode::kDetachFromHeapAndForget) {
DetachFromHeap();
for (Page* p : *this) {
memory_allocator->UnregisterMemory(p);
static_cast<ReadOnlyPage*>(p)->MakeHeaderRelocatable();
}
}
SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
}
void ReadOnlySpace::MarkAsReadWrite() {
void ReadOnlySpace::Unseal() {
DCHECK(is_marked_read_only_);
SetPermissionsForPages(PageAllocator::kReadWrite);
SetPermissionsForPages(heap()->memory_allocator(), PageAllocator::kReadWrite);
is_marked_read_only_ = false;
}

View File

@ -318,7 +318,11 @@ class MemoryChunk {
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
// enabled.
INCREMENTAL_MARKING = 1u << 18,
NEW_SPACE_BELOW_AGE_MARK = 1u << 19
NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
// The memory chunk freeing bookkeeping has been performed but the chunk has
// not yet been freed.
UNREGISTERED = 1u << 20
};
using Flags = uintptr_t;
@ -475,7 +479,10 @@ class MemoryChunk {
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
inline Heap* heap() const { return heap_; }
inline Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
Heap* synchronized_heap();
@ -1007,7 +1014,10 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
external_backing_store_bytes_ = nullptr;
}
Heap* heap() const { return heap_; }
Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
@ -1097,6 +1107,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return !allocation_observers_paused_ && !allocation_observers_.empty();
}
void DetachFromHeap() { heap_ = nullptr; }
std::vector<AllocationObserver*> allocation_observers_;
// The List manages the pages that belong to the given space.
@ -1442,15 +1454,20 @@ class MemoryAllocator {
Unmapper* unmapper() { return &unmapper_; }
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
void PreFreeMemory(MemoryChunk* chunk);
// Performs all necessary bookkeeping to free the memory, but does not free
// it.
void UnregisterMemory(MemoryChunk* chunk);
private:
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
size_t requested);
// FreeMemory can be called concurrently when PreFree was executed before.
// PreFreeMemory logically frees the object, i.e., it unregisters the memory,
// logs a delete event and adds the chunk to remembered unmapped pages.
void PreFreeMemory(MemoryChunk* chunk);
// PerformFreeMemory can be called concurrently when PreFree was executed
// before.
void PerformFreeMemory(MemoryChunk* chunk);
// See AllocatePage for public interface. Note that currently we only support
@ -2976,41 +2993,36 @@ class MapSpace : public PagedSpace {
class ReadOnlySpace : public PagedSpace {
public:
class WritableScope {
public:
explicit WritableScope(ReadOnlySpace* space) : space_(space) {
space_->MarkAsReadWrite();
}
~WritableScope() { space_->MarkAsReadOnly(); }
private:
ReadOnlySpace* space_;
};
explicit ReadOnlySpace(Heap* heap);
// TODO(v8:7464): Remove this once PagedSpace::TearDown no longer writes to
// TODO(v8:7464): Remove this once PagedSpace::Unseal no longer writes to
// memory_chunk_list_.
~ReadOnlySpace() override { MarkAsReadWrite(); }
~ReadOnlySpace() override { Unseal(); }
bool writable() const { return !is_marked_read_only_; }
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
void MarkAsReadOnly();
// Make the heap forget the space for memory bookkeeping purposes
// (e.g. prevent space's memory from registering as leaked).
void Forget();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
// Seal the space by marking it read-only, optionally detaching it
// from the heap and forgetting it for memory bookkeeping purposes (e.g.
// prevent space's memory from registering as leaked).
void Seal(SealMode ro_mode);
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
void RepairFreeListsAfterDeserialization();
private:
void MarkAsReadWrite();
void SetPermissionsForPages(PageAllocator::Permission access);
// Unseal the space after is has been sealed, by making it writable.
// TODO(v8:7464): Only possible if the space hasn't been detached.
void Unseal();
void SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access);
bool is_marked_read_only_ = false;
//
// String padding must be cleared just before serialization and therefore the
// string padding in the space will already have been cleared if the space was

View File

@ -3442,16 +3442,24 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
AlwaysAllocateScope always_allocate(this);
CodeSpaceMemoryModificationScope modification_scope(&heap_);
if (!create_heap_objects) {
startup_deserializer->DeserializeInto(this);
if (create_heap_objects) {
heap_.read_only_space()->ClearStringPaddingIfNeeded();
heap_.read_only_heap()->OnCreateHeapObjectsComplete(this);
} else {
heap_.read_only_heap()->OnCreateHeapObjectsComplete();
startup_deserializer->DeserializeInto(this);
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
interpreter_->Initialize();
heap_.NotifyDeserializationComplete();
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap_.VerifyReadOnlyHeap();
}
#endif
delete setup_delegate_;
setup_delegate_ = nullptr;

View File

@ -26,6 +26,7 @@
#include "src/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
#include "src/isolate-allocator.h"
#include "src/isolate-data.h"
#include "src/messages.h"

View File

@ -631,9 +631,7 @@ void HeapObject::VerifySmiField(int offset) {
#endif
ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
// TODO(v8:7464): When RO_SPACE is embedded, this will access a global
// variable instead.
return ReadOnlyRoots(GetHeapFromWritableObject(*this));
return ReadOnlyHeap::GetReadOnlyRoots(*this);
}
Map HeapObject::map() const { return map_word().ToMap(); }

View File

@ -98,7 +98,9 @@ void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
os << map()->instance_type();
}
os << "]";
if (GetHeapFromWritableObject(*this)->InOldSpace(*this)) {
if (ReadOnlyHeap::Contains(*this)) {
os << " in ReadOnlySpace";
} else if (GetHeapFromWritableObject(*this)->InOldSpace(*this)) {
os << " in OldSpace";
}
if (!IsMap()) os << "\n - map: " << Brief(map());

View File

@ -9,6 +9,7 @@
#include "src/feedback-vector.h"
#include "src/handles.h"
#include "src/heap/read-only-heap.h"
#include "src/isolate.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/descriptor-array.h"
@ -57,24 +58,26 @@ bool RootsTable::IsRootHandle(Handle<T> handle, RootIndex* index) const {
}
ReadOnlyRoots::ReadOnlyRoots(Heap* heap)
: roots_table_(Isolate::FromHeap(heap)->roots_table()) {}
: ReadOnlyRoots(Isolate::FromHeap(heap)) {}
ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
: roots_table_(isolate->roots_table()) {}
: read_only_roots_(reinterpret_cast<Address*>(
isolate->roots_table().read_only_roots_begin().address())) {}
ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {}
// We use unchecked_cast below because we trust our read-only roots to
// have the right type, and to avoid the heavy #includes that would be
// required for checked casts.
#define ROOT_ACCESSOR(Type, name, CamelName) \
Type ReadOnlyRoots::name() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Type::unchecked_cast( \
Object(roots_table_[RootIndex::k##CamelName])); \
} \
Handle<Type> ReadOnlyRoots::name##_handle() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Handle<Type>(&roots_table_[RootIndex::k##CamelName]); \
#define ROOT_ACCESSOR(Type, name, CamelName) \
Type ReadOnlyRoots::name() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Type::unchecked_cast(Object(at(RootIndex::k##CamelName))); \
} \
Handle<Type> ReadOnlyRoots::name##_handle() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Handle<Type>(&at(RootIndex::k##CamelName)); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
@ -83,13 +86,13 @@ READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
Map ReadOnlyRoots::MapForFixedTypedArray(ExternalArrayType array_type) {
RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(array_type);
DCHECK(CheckType(root_index));
return Map::unchecked_cast(Object(roots_table_[root_index]));
return Map::unchecked_cast(Object(at(root_index)));
}
Map ReadOnlyRoots::MapForFixedTypedArray(ElementsKind elements_kind) {
RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(elements_kind);
DCHECK(CheckType(root_index));
return Map::unchecked_cast(Object(roots_table_[root_index]));
return Map::unchecked_cast(Object(at(root_index)));
}
FixedTypedArrayBase ReadOnlyRoots::EmptyFixedTypedArrayForTypedArray(
@ -97,7 +100,13 @@ FixedTypedArrayBase ReadOnlyRoots::EmptyFixedTypedArrayForTypedArray(
RootIndex root_index =
RootsTable::RootIndexForEmptyFixedTypedArray(elements_kind);
DCHECK(CheckType(root_index));
return FixedTypedArrayBase::unchecked_cast(Object(roots_table_[root_index]));
return FixedTypedArrayBase::unchecked_cast(Object(at(root_index)));
}
Address& ReadOnlyRoots::at(RootIndex root_index) const {
size_t index = static_cast<size_t>(root_index);
DCHECK_LT(index, kEntriesCount);
return read_only_roots_[index];
}
} // namespace internal

View File

@ -61,15 +61,15 @@ RootIndex RootsTable::RootIndexForEmptyFixedTypedArray(
void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
visitor->VisitRootPointers(Root::kReadOnlyRootList, nullptr,
roots_table_.read_only_roots_begin(),
roots_table_.read_only_roots_end());
FullObjectSlot(read_only_roots_),
FullObjectSlot(&read_only_roots_[kEntriesCount]));
visitor->Synchronize(VisitorSynchronization::kReadOnlyRootList);
}
#ifdef DEBUG
bool ReadOnlyRoots::CheckType(RootIndex index) const {
Object root(roots_table_[index]);
Object root(at(index));
switch (index) {
#define CHECKTYPE(Type, name, CamelName) \
case RootIndex::k##CamelName: \

View File

@ -24,9 +24,10 @@ class Heap;
class Isolate;
class Map;
class PropertyCell;
class ReadOnlyHeap;
class RootVisitor;
class String;
class Symbol;
class RootVisitor;
// Defines all the read-only roots in Heap.
#define STRONG_READ_ONLY_ROOT_LIST(V) \
@ -509,6 +510,9 @@ class RootsTable {
class ReadOnlyRoots {
public:
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
V8_INLINE explicit ReadOnlyRoots(Heap* heap);
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
@ -534,7 +538,13 @@ class ReadOnlyRoots {
#endif
private:
RootsTable& roots_table_;
V8_INLINE explicit ReadOnlyRoots(Address* ro_roots);
V8_INLINE Address& at(RootIndex root_index) const;
Address* read_only_roots_;
friend class ReadOnlyHeap;
};
} // namespace internal

View File

@ -57,8 +57,6 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
// context independent.
if (script->ContainsAsmModule()) return nullptr;
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
// Serialize code object.
Handle<String> source(String::cast(script->source()), isolate);
CodeSerializer cs(isolate, SerializedCodeData::SourceHash(

View File

@ -6267,6 +6267,7 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
CHECK(!context.IsEmpty());
v8::Context::Scope context_scope(context);
}
ReadOnlyHeap::ClearSharedHeapForTest();
isolate->Dispose();
}
}

View File

@ -95,7 +95,6 @@ class TestSerializer {
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->Init(nullptr, nullptr);
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
return v8_isolate;
}
@ -204,6 +203,7 @@ v8::StartupData WarmUpSnapshotDataBlob(v8::StartupData cold_snapshot_blob,
result = snapshot_creator.CreateBlob(
v8::SnapshotCreator::FunctionCodeHandling::kKeep);
}
ReadOnlyHeap::ClearSharedHeapForTest();
return result;
}
@ -279,6 +279,7 @@ void TestStartupSerializerOnceImpl() {
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs);
{
v8::HandleScope handle_scope(isolate);
@ -383,6 +384,7 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
blobs1.Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs2);
{
v8::Isolate::Scope isolate_scope(isolate);
@ -403,6 +405,7 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs);
{
v8::Isolate::Scope isolate_scope(isolate);
@ -431,6 +434,7 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
blobs1.Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs2);
{
v8::Isolate::Scope isolate_scope(isolate);
@ -510,6 +514,7 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
}
UNINITIALIZED_TEST(PartialSerializerContext) {
@ -639,6 +644,7 @@ static void PartiallySerializeCustomContext(
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
}
UNINITIALIZED_TEST(PartialSerializerCustomContext) {
@ -1381,6 +1387,7 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobWithLocker) {
const char* source1 = "function f() { return 42; }";
DisableEmbeddedBlobRefcounting();
ReadOnlyHeap::ClearSharedHeapForTest();
v8::StartupData data1 = CreateSnapshotDataBlob(source1);
v8::Isolate::CreateParams params1;
@ -3328,6 +3335,7 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
}
{
ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@ -3408,6 +3416,7 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
isolate->Dispose();
}
{
ReadOnlyHeap::ClearSharedHeapForTest();
SnapshotCreator creator(nullptr, &blob);
v8::Isolate* isolate = creator.GetIsolate();
{
@ -3434,6 +3443,7 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
{
ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@ -3778,6 +3788,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
CHECK(blob.CanBeRehashed());
}
ReadOnlyHeap::ClearSharedHeapForTest();
i::FLAG_hash_seed = 1337;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();

View File

@ -267,6 +267,9 @@ TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
bool SequentialUnmapperTest::old_flag_;
// TODO(v8:7464): Enable these once there is a good way to free the shared
// read-only space.
#ifndef V8_SHARED_RO_HEAP
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
Page* page = allocator()->AllocatePage(
@ -326,6 +329,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
}
}
#endif // V8_SHARED_RO_HEAP
} // namespace internal
} // namespace v8