Revert "[heap] Set read-only space's and its pages' heap_ to null."

This reverts commit 964edc251f.

Reason for revert: chromium:959190

Original change's description:
> [heap] Set read-only space's and its pages' heap_ to null.
>
> Various small changes are required to enable this.
>
> HeapObject::GetReadOnlyRoots no longer uses the Space's heap when
> possible (see comment in ReadOnlyHeap::GetReadOnlyRoots definition).
> This requires that ReadOnlyRoots be construct-able using a raw pointer
> to the read-only space's roots array.
>
> Global read-only heap state is now cleared by tests where appropriate
> and extra DCHECKs in ReadOnlyHeap::SetUp should make catching future
> issues easier.
>
> String padding is now always cleared just before read-only space is
> sealed when not deserializing.
>
> Change-Id: I7d1db1c11567be5df06ff7066f3a699125f8b372
> Bug: v8:7464
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1535830
> Commit-Queue: Maciej Goszczycki <goszczycki@google.com>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Dan Elphick <delphick@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#61188}

TBR=ulan@chromium.org,hpayer@chromium.org,delphick@chromium.org,goszczycki@google.com

Change-Id: I53cecf3976dfeabae309040313351385f651f010
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7464, chromium:959190
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1591608
Reviewed-by: Adam Klein <adamk@chromium.org>
Commit-Queue: Adam Klein <adamk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61217}
This commit is contained in:
Maciej Goszczycki 2019-05-03 17:26:53 +00:00 committed by Commit Bot
parent 314d68b858
commit fa4b433f32
20 changed files with 139 additions and 276 deletions

View File

@ -754,6 +754,8 @@ StartupData SnapshotCreator::CreateBlob(
isolate->heap()->CompactWeakArrayLists(internal::AllocationType::kOld);
}
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
if (function_code_handling == FunctionCodeHandling::kClear) {
// Clear out re-compilable data from all shared function infos. Any
// JSFunctions using these SFIs will have their code pointers reset by the

View File

@ -3593,7 +3593,7 @@ const char* Heap::GarbageCollectionReasonToString(
bool Heap::Contains(HeapObject value) {
// Check RO_SPACE first because IsOutsideAllocatedSpace cannot account for a
// shared RO_SPACE.
// TODO(v8:7464): Exclude read-only space. Use ReadOnlyHeap::Contains where
// TODO(goszczycki): Exclude read-only space. Use ReadOnlyHeap::Contains where
// appropriate.
if (read_only_space_ != nullptr && read_only_space_->Contains(value)) {
return true;
@ -3731,18 +3731,9 @@ void Heap::Verify() {
lo_space_->Verify(isolate());
code_lo_space_->Verify(isolate());
new_lo_space_->Verify(isolate());
}
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
// TODO(v8:7464): Always verify read-only space once PagedSpace::Verify
// supports verifying shared read-only space. Currently HeapObjectIterator is
// explicitly disabled for read-only space when sharing is enabled, because it
// relies on PagedSpace::heap_ being non-null.
#ifndef V8_SHARED_RO_HEAP
VerifyReadOnlyPointersVisitor read_only_visitor(this);
read_only_space_->Verify(isolate(), &read_only_visitor);
#endif
}
class SlotVerifyingVisitor : public ObjectVisitor {

View File

@ -1281,9 +1281,6 @@ class Heap {
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
V8_EXPORT_PRIVATE void Verify();
// Verify the read-only heap after all read-only heap objects have been
// created.
void VerifyReadOnlyHeap();
void VerifyRememberedSetFor(HeapObject object);
#endif

View File

@ -23,7 +23,6 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
@ -512,10 +511,10 @@ void MarkCompactCollector::CollectGarbage() {
}
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
ReadOnlyHeapIterator iterator(space);
for (HeapObject object = iterator.next(); !object.is_null();
object = iterator.next()) {
void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
HeapObjectIterator iterator(space);
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
}
}

View File

@ -703,7 +703,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
void VerifyMarkbitsAreDirty(PagedSpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(NewSpace* space);
void VerifyMarkbitsAreClean(LargeObjectSpace* space);

View File

@ -8,7 +8,6 @@
#include "src/base/once.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/spaces.h"
#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
@ -24,63 +23,48 @@ ReadOnlyHeap* shared_ro_heap = nullptr;
// static
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
// Make sure we are only sharing read-only space when deserializing. Otherwise
// we would be trying to create heap objects inside an already initialized
// read-only space. Use ClearSharedHeapForTest if you need a new read-only
// space.
DCHECK_IMPLIES(shared_ro_heap != nullptr, des != nullptr);
base::CallOnce(&setup_ro_heap_once, [isolate, des]() {
shared_ro_heap = CreateAndAttachToIsolate(isolate);
if (des != nullptr) shared_ro_heap->DeseralizeIntoIsolate(isolate, des);
void* isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
base::CallOnce(&setup_ro_heap_once, [isolate, des, isolate_ro_roots]() {
shared_ro_heap = Init(isolate, des);
if (des != nullptr) {
std::memcpy(shared_ro_heap->read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
}
});
isolate->heap()->SetUpFromReadOnlyHeap(shared_ro_heap);
if (des != nullptr) {
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(isolate_ro_roots, shared_ro_heap->read_only_roots_,
kEntriesCount * sizeof(Address));
}
#else
auto* ro_heap = CreateAndAttachToIsolate(isolate);
if (des != nullptr) ro_heap->DeseralizeIntoIsolate(isolate, des);
Init(isolate, des);
#endif // V8_SHARED_RO_HEAP
}
void ReadOnlyHeap::DeseralizeIntoIsolate(Isolate* isolate,
ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(des);
des->DeserializeInto(isolate);
InitFromIsolate(isolate);
}
void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
InitFromIsolate(isolate);
void ReadOnlyHeap::OnCreateHeapObjectsComplete() {
DCHECK(!deserializing_);
#ifdef V8_SHARED_RO_HEAP
read_only_space_->Forget();
#endif
read_only_space_->MarkAsReadOnly();
}
// static
ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(Isolate* isolate) {
ReadOnlyHeap* ReadOnlyHeap::Init(Isolate* isolate, ReadOnlyDeserializer* des) {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
isolate->heap()->SetUpFromReadOnlyHeap(ro_heap);
return ro_heap;
}
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
DCHECK(!init_complete_);
if (des != nullptr) {
des->DeserializeInto(isolate);
ro_heap->deserializing_ = true;
#ifdef V8_SHARED_RO_HEAP
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
#else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
ro_heap->read_only_space_->Forget();
#endif
init_complete_ = true;
ro_heap->read_only_space_->MarkAsReadOnly();
}
return ro_heap;
}
void ReadOnlyHeap::OnHeapTearDown() {
@ -90,35 +74,11 @@ void ReadOnlyHeap::OnHeapTearDown() {
#endif
}
// static
void ReadOnlyHeap::ClearSharedHeapForTest() {
#ifdef V8_SHARED_RO_HEAP
DCHECK_NOT_NULL(shared_ro_heap);
// TODO(v8:7464): Just leak read-only space for now. The paged-space heap
// is null so there isn't a nice way to do this.
delete shared_ro_heap;
shared_ro_heap = nullptr;
setup_ro_heap_once = 0;
#endif
}
// static
bool ReadOnlyHeap::Contains(HeapObject object) {
return Page::FromAddress(object.ptr())->owner()->identity() == RO_SPACE;
}
// static
ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
#ifdef V8_SHARED_RO_HEAP
// This fails if we are creating heap objects and the roots haven't yet been
// copied into the read-only heap.
if (shared_ro_heap->init_complete_) {
return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
}
#endif
return ReadOnlyRoots(GetHeapFromWritableObject(object));
}
ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap)
: ReadOnlyHeapIterator(ro_heap->read_only_space()) {}

View File

@ -6,14 +6,13 @@
#define V8_HEAP_READ_ONLY_HEAP_H_
#include "src/base/macros.h"
#include "src/heap/heap.h"
#include "src/objects.h"
#include "src/objects/heap-object.h"
#include "src/roots.h"
namespace v8 {
namespace internal {
class Isolate;
class Page;
class ReadOnlyDeserializer;
class ReadOnlySpace;
@ -25,30 +24,20 @@ class ReadOnlyHeap final {
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
// If necessary creates read-only heap and initializes its artifacts (if
// the deserializer is provided). Then attaches the read-only heap to the
// isolate.
// TODO(v8:7464): Ideally we'd create this without needing a heap.
// If necessary create read-only heap and initialize its artifacts (if the
// deserializer is provided).
// TODO(goszczycki): Ideally we'd create this without needing a heap.
static void SetUp(Isolate* isolate, ReadOnlyDeserializer* des);
// Indicates that the isolate has been set up and all read-only space objects
// have been created and will not be written to. This is not thread safe, and
// should really only be used during snapshot creation or when read-only heap
// sharing is disabled.
void OnCreateHeapObjectsComplete(Isolate* isolate);
// Indicates that the current isolate no longer requires the read-only heap
// and it may be safely disposed of.
// Indicate that all read-only space objects have been created and will not
// be written to. This is not thread safe, and should really only be used as
// part of mksnapshot or when read-only heap sharing is disabled.
void OnCreateHeapObjectsComplete();
// Indicate that the current isolate no longer requires the read-only heap and
// it may be safely disposed of.
void OnHeapTearDown();
// Returns whether the object resides in the read-only space.
V8_EXPORT_PRIVATE static bool Contains(HeapObject object);
// Gets read-only roots from an appropriate root list: shared read-only root
// list if the shared read-only heap has been initialized or the isolate
// specific roots table.
V8_EXPORT_PRIVATE static ReadOnlyRoots GetReadOnlyRoots(HeapObject object);
// Clears any shared read-only heap artifacts for testing, forcing read-only
// heap to be re-created on next set up.
V8_EXPORT_PRIVATE static void ClearSharedHeapForTest();
std::vector<Object>* read_only_object_cache() {
return &read_only_object_cache_;
@ -56,18 +45,9 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
// Creates a new read-only heap and attaches it to the provided isolate.
static ReadOnlyHeap* CreateAndAttachToIsolate(Isolate* isolate);
// Runs the read-only deserailizer and calls InitFromIsolate to complete
// read-only heap initialization.
void DeseralizeIntoIsolate(Isolate* isolate, ReadOnlyDeserializer* des);
// Initializes read-only heap from an already set-up isolate, copying
// read-only roots from the isolate. This then seals the space off from
// further writes, marks it as read-only and detaches it from the heap (unless
// sharing is disabled).
void InitFromIsolate(Isolate* isolate);
static ReadOnlyHeap* Init(Isolate* isolate, ReadOnlyDeserializer* des);
bool init_complete_ = false;
bool deserializing_ = false;
ReadOnlySpace* read_only_space_ = nullptr;
std::vector<Object> read_only_object_cache_;

View File

@ -50,11 +50,7 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
cur_end_(kNullAddress),
space_(space),
page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {
#ifdef V8_SHARED_RO_HEAP
DCHECK_NE(space->identity(), RO_SPACE);
#endif
}
current_page_(page_range_.begin()) {}
HeapObjectIterator::HeapObjectIterator(Page* page)
: cur_addr_(kNullAddress),
@ -64,14 +60,10 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
current_page_(page_range_.begin()) {
#ifdef DEBUG
Space* owner = page->owner();
// TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer
// used to verify read-only space for non-shared builds.
#ifdef V8_SHARED_RO_HEAP
DCHECK_NE(owner->identity(), RO_SPACE);
#endif
// Do not access the heap of the read-only space.
DCHECK(owner->identity() == RO_SPACE || owner->identity() == OLD_SPACE ||
owner->identity() == MAP_SPACE || owner->identity() == CODE_SPACE);
DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
owner == page->heap()->code_space() ||
owner == page->heap()->read_only_space());
#endif // DEBUG
}
@ -81,19 +73,17 @@ bool HeapObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
#ifdef ENABLE_MINOR_MC
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
#ifdef ENABLE_MINOR_MC
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
}
#else
DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
#endif // ENABLE_MINOR_MC
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
@ -1122,8 +1112,13 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
static_cast<int>(released_bytes));
}
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
@ -1135,21 +1130,13 @@ void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
size_executable_ -= size;
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
chunk->SetFlag(MemoryChunk::PRE_FREED);
}
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory();
@ -2042,8 +2029,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// be in map space.
Map map = object->map();
CHECK(map->IsMap());
CHECK(isolate->heap()->map_space()->Contains(map) ||
ReadOnlyHeap::Contains(map));
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@ -2052,7 +2039,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
object->ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
isolate->heap()->VerifyRememberedSetFor(object);
heap()->VerifyRememberedSetFor(object);
}
// All the interior pointers should be contained in the heap.
@ -3348,22 +3335,34 @@ ReadOnlySpace::ReadOnlySpace(Heap* heap)
void ReadOnlyPage::MakeHeaderRelocatable() {
if (mutex_ != nullptr) {
// TODO(v8:7464): heap_ and owner_ need to be cleared as well.
delete mutex_;
heap_ = nullptr;
mutex_ = nullptr;
local_tracker_ = nullptr;
reservation_.Reset();
}
}
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access) {
void ReadOnlySpace::Forget() {
for (Page* p : *this) {
heap()->memory_allocator()->PreFreeMemory(p);
}
}
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
MemoryAllocator* memory_allocator = heap()->memory_allocator();
for (Page* p : *this) {
ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
if (access == PageAllocator::kRead) {
page->MakeHeaderRelocatable();
}
// Read only pages don't have valid reservation object so we get proper
// page allocator manually.
v8::PageAllocator* page_allocator =
memory_allocator->page_allocator(p->executable());
CHECK(SetPermissions(page_allocator, p->address(), p->size(), access));
memory_allocator->page_allocator(page->executable());
CHECK(
SetPermissions(page_allocator, page->address(), page->size(), access));
}
}
@ -3398,6 +3397,7 @@ void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
if (is_string_padding_cleared_) return;
WritableScope writable_scope(this);
ReadOnlyHeapIterator iterator(this);
for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
if (o->IsSeqOneByteString()) {
@ -3409,27 +3409,16 @@ void ReadOnlySpace::ClearStringPaddingIfNeeded() {
is_string_padding_cleared_ = true;
}
void ReadOnlySpace::Seal(SealMode ro_mode) {
void ReadOnlySpace::MarkAsReadOnly() {
DCHECK(!is_marked_read_only_);
FreeLinearAllocationArea();
is_marked_read_only_ = true;
auto* memory_allocator = heap()->memory_allocator();
if (ro_mode == SealMode::kDetachFromHeapAndForget) {
DetachFromHeap();
for (Page* p : *this) {
memory_allocator->UnregisterMemory(p);
static_cast<ReadOnlyPage*>(p)->MakeHeaderRelocatable();
}
}
SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
SetPermissionsForPages(PageAllocator::kRead);
}
void ReadOnlySpace::Unseal() {
void ReadOnlySpace::MarkAsReadWrite() {
DCHECK(is_marked_read_only_);
SetPermissionsForPages(heap()->memory_allocator(), PageAllocator::kReadWrite);
SetPermissionsForPages(PageAllocator::kReadWrite);
is_marked_read_only_ = false;
}

View File

@ -318,11 +318,7 @@ class MemoryChunk {
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
// enabled.
INCREMENTAL_MARKING = 1u << 18,
NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
// The memory chunk freeing bookkeeping has been performed but the chunk has
// not yet been freed.
UNREGISTERED = 1u << 20
NEW_SPACE_BELOW_AGE_MARK = 1u << 19
};
using Flags = uintptr_t;
@ -479,10 +475,7 @@ class MemoryChunk {
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
inline Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
inline Heap* heap() const { return heap_; }
Heap* synchronized_heap();
@ -1014,10 +1007,7 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
external_backing_store_bytes_ = nullptr;
}
Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
Heap* heap() const { return heap_; }
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
@ -1107,8 +1097,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return !allocation_observers_paused_ && !allocation_observers_.empty();
}
void DetachFromHeap() { heap_ = nullptr; }
std::vector<AllocationObserver*> allocation_observers_;
// The List manages the pages that belong to the given space.
@ -1454,20 +1442,15 @@ class MemoryAllocator {
Unmapper* unmapper() { return &unmapper_; }
// Performs all necessary bookkeeping to free the memory, but does not free
// it.
void UnregisterMemory(MemoryChunk* chunk);
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
void PreFreeMemory(MemoryChunk* chunk);
private:
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
size_t requested);
// PreFreeMemory logically frees the object, i.e., it unregisters the memory,
// logs a delete event and adds the chunk to remembered unmapped pages.
void PreFreeMemory(MemoryChunk* chunk);
// PerformFreeMemory can be called concurrently when PreFree was executed
// before.
// FreeMemory can be called concurrently when PreFree was executed before.
void PerformFreeMemory(MemoryChunk* chunk);
// See AllocatePage for public interface. Note that currently we only support
@ -2993,36 +2976,41 @@ class MapSpace : public PagedSpace {
class ReadOnlySpace : public PagedSpace {
public:
class WritableScope {
public:
explicit WritableScope(ReadOnlySpace* space) : space_(space) {
space_->MarkAsReadWrite();
}
~WritableScope() { space_->MarkAsReadOnly(); }
private:
ReadOnlySpace* space_;
};
explicit ReadOnlySpace(Heap* heap);
// TODO(v8:7464): Remove this once PagedSpace::Unseal no longer writes to
// TODO(v8:7464): Remove this once PagedSpace::TearDown no longer writes to
// memory_chunk_list_.
~ReadOnlySpace() override { Unseal(); }
~ReadOnlySpace() override { MarkAsReadWrite(); }
bool writable() const { return !is_marked_read_only_; }
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
// Seal the space by marking it read-only, optionally detaching it
// from the heap and forgetting it for memory bookkeeping purposes (e.g.
// prevent space's memory from registering as leaked).
void Seal(SealMode ro_mode);
void MarkAsReadOnly();
// Make the heap forget the space for memory bookkeeping purposes
// (e.g. prevent space's memory from registering as leaked).
void Forget();
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
void RepairFreeListsAfterDeserialization();
private:
// Unseal the space after is has been sealed, by making it writable.
// TODO(v8:7464): Only possible if the space hasn't been detached.
void Unseal();
void SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access);
void MarkAsReadWrite();
void SetPermissionsForPages(PageAllocator::Permission access);
bool is_marked_read_only_ = false;
//
// String padding must be cleared just before serialization and therefore the
// string padding in the space will already have been cleared if the space was

View File

@ -3444,21 +3444,13 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
if (!create_heap_objects) {
startup_deserializer->DeserializeInto(this);
} else {
heap_.read_only_space()->ClearStringPaddingIfNeeded();
heap_.read_only_heap()->OnCreateHeapObjectsComplete(this);
heap_.read_only_heap()->OnCreateHeapObjectsComplete();
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
interpreter_->Initialize();
heap_.NotifyDeserializationComplete();
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap_.VerifyReadOnlyHeap();
}
#endif
delete setup_delegate_;
setup_delegate_ = nullptr;

View File

@ -26,7 +26,6 @@
#include "src/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
#include "src/isolate-allocator.h"
#include "src/isolate-data.h"
#include "src/messages.h"

View File

@ -631,7 +631,9 @@ void HeapObject::VerifySmiField(int offset) {
#endif
ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
return ReadOnlyHeap::GetReadOnlyRoots(*this);
// TODO(v8:7464): When RO_SPACE is embedded, this will access a global
// variable instead.
return ReadOnlyRoots(GetHeapFromWritableObject(*this));
}
Map HeapObject::map() const { return map_word().ToMap(); }

View File

@ -98,9 +98,7 @@ void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
os << map()->instance_type();
}
os << "]";
if (ReadOnlyHeap::Contains(*this)) {
os << " in ReadOnlySpace";
} else if (GetHeapFromWritableObject(*this)->InOldSpace(*this)) {
if (GetHeapFromWritableObject(*this)->InOldSpace(*this)) {
os << " in OldSpace";
}
if (!IsMap()) os << "\n - map: " << Brief(map());

View File

@ -9,7 +9,6 @@
#include "src/feedback-vector.h"
#include "src/handles.h"
#include "src/heap/read-only-heap.h"
#include "src/isolate.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/descriptor-array.h"
@ -58,26 +57,24 @@ bool RootsTable::IsRootHandle(Handle<T> handle, RootIndex* index) const {
}
ReadOnlyRoots::ReadOnlyRoots(Heap* heap)
: ReadOnlyRoots(Isolate::FromHeap(heap)) {}
: roots_table_(Isolate::FromHeap(heap)->roots_table()) {}
ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
: read_only_roots_(reinterpret_cast<Address*>(
isolate->roots_table().read_only_roots_begin().address())) {}
ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {}
: roots_table_(isolate->roots_table()) {}
// We use unchecked_cast below because we trust our read-only roots to
// have the right type, and to avoid the heavy #includes that would be
// required for checked casts.
#define ROOT_ACCESSOR(Type, name, CamelName) \
Type ReadOnlyRoots::name() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Type::unchecked_cast(Object(at(RootIndex::k##CamelName))); \
} \
Handle<Type> ReadOnlyRoots::name##_handle() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Handle<Type>(&at(RootIndex::k##CamelName)); \
#define ROOT_ACCESSOR(Type, name, CamelName) \
Type ReadOnlyRoots::name() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Type::unchecked_cast( \
Object(roots_table_[RootIndex::k##CamelName])); \
} \
Handle<Type> ReadOnlyRoots::name##_handle() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Handle<Type>(&roots_table_[RootIndex::k##CamelName]); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
@ -86,13 +83,13 @@ READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
Map ReadOnlyRoots::MapForFixedTypedArray(ExternalArrayType array_type) {
RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(array_type);
DCHECK(CheckType(root_index));
return Map::unchecked_cast(Object(at(root_index)));
return Map::unchecked_cast(Object(roots_table_[root_index]));
}
Map ReadOnlyRoots::MapForFixedTypedArray(ElementsKind elements_kind) {
RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(elements_kind);
DCHECK(CheckType(root_index));
return Map::unchecked_cast(Object(at(root_index)));
return Map::unchecked_cast(Object(roots_table_[root_index]));
}
FixedTypedArrayBase ReadOnlyRoots::EmptyFixedTypedArrayForTypedArray(
@ -100,13 +97,7 @@ FixedTypedArrayBase ReadOnlyRoots::EmptyFixedTypedArrayForTypedArray(
RootIndex root_index =
RootsTable::RootIndexForEmptyFixedTypedArray(elements_kind);
DCHECK(CheckType(root_index));
return FixedTypedArrayBase::unchecked_cast(Object(at(root_index)));
}
Address& ReadOnlyRoots::at(RootIndex root_index) const {
size_t index = static_cast<size_t>(root_index);
DCHECK_LT(index, kEntriesCount);
return read_only_roots_[index];
return FixedTypedArrayBase::unchecked_cast(Object(roots_table_[root_index]));
}
} // namespace internal

View File

@ -61,15 +61,15 @@ RootIndex RootsTable::RootIndexForEmptyFixedTypedArray(
void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
visitor->VisitRootPointers(Root::kReadOnlyRootList, nullptr,
FullObjectSlot(read_only_roots_),
FullObjectSlot(&read_only_roots_[kEntriesCount]));
roots_table_.read_only_roots_begin(),
roots_table_.read_only_roots_end());
visitor->Synchronize(VisitorSynchronization::kReadOnlyRootList);
}
#ifdef DEBUG
bool ReadOnlyRoots::CheckType(RootIndex index) const {
Object root(at(index));
Object root(roots_table_[index]);
switch (index) {
#define CHECKTYPE(Type, name, CamelName) \
case RootIndex::k##CamelName: \

View File

@ -24,10 +24,9 @@ class Heap;
class Isolate;
class Map;
class PropertyCell;
class ReadOnlyHeap;
class RootVisitor;
class String;
class Symbol;
class RootVisitor;
// Defines all the read-only roots in Heap.
#define STRONG_READ_ONLY_ROOT_LIST(V) \
@ -510,9 +509,6 @@ class RootsTable {
class ReadOnlyRoots {
public:
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
V8_INLINE explicit ReadOnlyRoots(Heap* heap);
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
@ -538,13 +534,7 @@ class ReadOnlyRoots {
#endif
private:
V8_INLINE explicit ReadOnlyRoots(Address* ro_roots);
V8_INLINE Address& at(RootIndex root_index) const;
Address* read_only_roots_;
friend class ReadOnlyHeap;
RootsTable& roots_table_;
};
} // namespace internal

View File

@ -57,6 +57,8 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
// context independent.
if (script->ContainsAsmModule()) return nullptr;
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
// Serialize code object.
Handle<String> source(String::cast(script->source()), isolate);
CodeSerializer cs(isolate, SerializedCodeData::SourceHash(

View File

@ -6267,7 +6267,6 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
CHECK(!context.IsEmpty());
v8::Context::Scope context_scope(context);
}
ReadOnlyHeap::ClearSharedHeapForTest();
isolate->Dispose();
}
}

View File

@ -95,6 +95,7 @@ class TestSerializer {
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->Init(nullptr, nullptr);
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
return v8_isolate;
}
@ -189,7 +190,6 @@ v8::StartupData CreateSnapshotDataBlob(
}
result = snapshot_creator.CreateBlob(function_code_handling);
}
ReadOnlyHeap::ClearSharedHeapForTest();
return result;
}
@ -228,7 +228,6 @@ v8::StartupData WarmUpSnapshotDataBlob(v8::StartupData cold_snapshot_blob,
result = snapshot_creator.CreateBlob(
v8::SnapshotCreator::FunctionCodeHandling::kKeep);
}
ReadOnlyHeap::ClearSharedHeapForTest();
return result;
}
@ -304,7 +303,6 @@ void TestStartupSerializerOnceImpl() {
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs);
{
v8::HandleScope handle_scope(isolate);
@ -409,7 +407,6 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
blobs1.Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs2);
{
v8::Isolate::Scope isolate_scope(isolate);
@ -430,7 +427,6 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs);
{
v8::Isolate::Scope isolate_scope(isolate);
@ -459,7 +455,6 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
blobs1.Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs2);
{
v8::Isolate::Scope isolate_scope(isolate);
@ -539,7 +534,6 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
}
UNINITIALIZED_TEST(PartialSerializerContext) {
@ -669,7 +663,6 @@ static void PartiallySerializeCustomContext(
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
}
UNINITIALIZED_TEST(PartialSerializerCustomContext) {
@ -1404,7 +1397,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobWithLocker) {
const char* source1 = "function f() { return 42; }";
ReadOnlyHeap::ClearSharedHeapForTest();
v8::StartupData data1 = CreateSnapshotDataBlob(source1);
v8::Isolate::CreateParams params1;
@ -3348,7 +3340,6 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
}
{
ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@ -3429,7 +3420,6 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
isolate->Dispose();
}
{
ReadOnlyHeap::ClearSharedHeapForTest();
SnapshotCreator creator(nullptr, &blob);
v8::Isolate* isolate = creator.GetIsolate();
{
@ -3456,7 +3446,6 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
{
ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@ -3801,7 +3790,6 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
CHECK(blob.CanBeRehashed());
}
ReadOnlyHeap::ClearSharedHeapForTest();
i::FLAG_hash_seed = 1337;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();

View File

@ -267,9 +267,6 @@ TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
bool SequentialUnmapperTest::old_flag_;
// TODO(v8:7464): Enable these once there is a good way to free the shared
// read-only space.
#ifndef V8_SHARED_RO_HEAP
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
Page* page = allocator()->AllocatePage(
@ -329,7 +326,6 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
}
}
#endif // V8_SHARED_RO_HEAP
} // namespace internal
} // namespace v8