[heap] Split out LargeObject* from spaces.h

Creates a new large-spaces.h and large-spaces.cc to contain
LargeObjectSpace and subclasses.

Bug: v8:10473
Change-Id: Ifdb4eac9df5c8213f992d549e04b612b62f6df0b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2170826
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67456}
This commit is contained in:
Dan Elphick 2020-04-28 17:38:57 +01:00 committed by Commit Bot
parent a3fbabc726
commit d398af189f
12 changed files with 787 additions and 728 deletions

View File

@ -2411,6 +2411,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.cc",
"src/heap/item-parallel-job.h",
"src/heap/large-spaces.cc",
"src/heap/large-spaces.h",
"src/heap/list.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",

View File

@ -6,6 +6,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"

View File

@ -40,6 +40,7 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/large-spaces.h"
#include "src/heap/local-heap.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"

546
src/heap/large-spaces.cc Normal file
View File

@ -0,0 +1,546 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/large-spaces.h"
#include "src/execution/isolate.h"
#include "src/heap/combined-heap.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/list.h"
#include "src/heap/marking.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces-inl.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
#include "src/sanitizer/msan.h"
#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
// This check is here to ensure that the lower 32 bits of any real heap object
// can't overlap with the lower 32 bits of cleared weak reference value and
// therefore it's enough to compare only the lower 32 bits of a MaybeObject in
// order to figure out if it's a cleared weak reference or not.
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable) {
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
LargePage* page = static_cast<LargePage*>(chunk);
page->SetFlag(MemoryChunk::LARGE_PAGE);
page->list_node().Initialize();
return page;
}
size_t LargeObjectSpace::Available() {
// We return zero here since we cannot take advantage of already allocated
// large object memory.
return 0;
}
Address LargePage::GetAddressToShrink(Address object_address,
size_t object_size) {
if (executable() == EXECUTABLE) {
return 0;
}
size_t used_size = ::RoundUp((object_address - address()) + object_size,
MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
return 0;
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
DCHECK_NULL(this->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
}
// -----------------------------------------------------------------------------
// LargeObjectSpaceObjectIterator
LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
LargeObjectSpace* space) {
current_ = space->first_page();
}
HeapObject LargeObjectSpaceObjectIterator::Next() {
if (current_ == nullptr) return HeapObject();
HeapObject object = current_->GetObject();
current_ = current_->next_page();
return object;
}
// -----------------------------------------------------------------------------
// OldLargeObjectSpace
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, new NoFreeList()),
size_(0),
page_count_(0),
objects_size_(0) {}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
LargePage* page = first_page();
LOG(heap()->isolate(),
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
}
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
return AllocateRaw(object_size, NOT_EXECUTABLE);
}
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, executable);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion();
AllocationStep(object_size, object.address(), object_size);
return object;
}
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
AddPage(page, object_size);
HeapObject object = page->GetObject();
heap()->CreateFillerObjectAt(object.address(), object_size,
ClearRecordedSlots::kNo);
return page;
}
size_t LargeObjectSpace::CommittedPhysicalMemory() {
// On a platform that provides lazy committing of memory, we over-account
// the actually committed memory. There is no easy way right now to support
// precise accounting of committed memory in large object space.
return CommittedMemory();
}
LargePage* CodeLargeObjectSpace::FindPage(Address a) {
const Address key = MemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) {
LargePage* page = it->second;
CHECK(page->Contains(a));
return page;
}
return nullptr;
}
void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
chunk->ResetProgressBar();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
}
}
void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
chunk_map_[current] = page;
}
}
void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
for (Address current = page->address();
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
chunk_map_.erase(current);
}
}
void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
size_t object_size = static_cast<size_t>(page->GetObject().Size());
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
page->ClearFlag(MemoryChunk::FROM_PAGE);
AddPage(page, object_size);
}
void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
memory_chunk_list_.PushBack(page);
page->set_owner(this);
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
}
void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size());
objects_size_ -= object_size;
page_count_--;
memory_chunk_list_.Remove(page);
page->set_owner(nullptr);
}
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
size_t surviving_object_size = 0;
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
size_t size = static_cast<size_t>(object.Size());
if (marking_state->IsBlack(object)) {
Address free_start;
surviving_object_size += size;
if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
current->area_start() + object.Size());
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
} else {
RemovePage(current, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current);
}
current = next_current;
}
objects_size_ = surviving_object_size;
}
bool LargeObjectSpace::Contains(HeapObject object) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
bool owned = (chunk->owner() == this);
SLOW_DCHECK(!owned || ContainsSlow(object.address()));
return owned;
}
bool LargeObjectSpace::ContainsSlow(Address addr) {
for (LargePage* page : *this) {
if (page->Contains(addr)) return true;
}
return false;
}
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
Heap* heap) {
return std::unique_ptr<ObjectIterator>(
new LargeObjectSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify(Isolate* isolate) {
size_t external_backing_store_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
HeapObject object = chunk->GetObject();
Page* page = Page::FromHeapObject(object);
CHECK(object.address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
if (!(object.IsAbstractCode() || object.IsSeqString() ||
object.IsExternalString() || object.IsThinString() ||
object.IsFixedArray() || object.IsFixedDoubleArray() ||
object.IsWeakFixedArray() || object.IsWeakArrayList() ||
object.IsPropertyArray() || object.IsByteArray() ||
object.IsFeedbackVector() || object.IsBigInt() ||
object.IsFreeSpace() || object.IsFeedbackMetadata() ||
object.IsContext() || object.IsUncompiledDataWithoutPreparseData() ||
object.IsPreparseData()) &&
!FLAG_young_generation_large_objects) {
FATAL("Found invalid Object (instance_type=%i) in large object space.",
object.map().instance_type());
}
// The object itself should look OK.
object.ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
heap()->VerifyRememberedSetFor(object);
}
// Byte arrays and strings don't have interior pointers.
if (object.IsAbstractCode()) {
VerifyPointersVisitor code_visitor(heap());
object.IterateBody(map, object.Size(), &code_visitor);
} else if (object.IsFixedArray()) {
FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array.length(); j++) {
Object element = array.get(j);
if (element.IsHeapObject()) {
HeapObject element_object = HeapObject::cast(element);
CHECK(IsValidHeapObject(heap(), element_object));
CHECK(element_object.map().IsMap());
}
}
} else if (object.IsPropertyArray()) {
PropertyArray array = PropertyArray::cast(object);
for (int j = 0; j < array.length(); j++) {
Object property = array.get(j);
if (property.IsHeapObject()) {
HeapObject property_object = HeapObject::cast(property);
CHECK(heap()->Contains(property_object));
CHECK(property_object.map().IsMap());
}
}
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
}
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
}
}
#endif
#ifdef DEBUG
void LargeObjectSpace::Print() {
StdoutStream os;
LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
obj.Print(os);
}
}
#endif // DEBUG
OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {}
OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
: LargeObjectSpace(heap, id) {}
void OldLargeObjectSpace::MergeOffThreadSpace(
OffThreadLargeObjectSpace* other) {
DCHECK(identity() == other->identity());
while (!other->memory_chunk_list().Empty()) {
LargePage* page = other->first_page();
HeapObject object = page->GetObject();
int size = object.Size();
other->RemovePage(page, size);
AddPage(page, size);
// TODO(leszeks): Here we should AllocationStep, see the TODO in
// PagedSpace::MergeOffThreadSpace.
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
}
}
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
: LargeObjectSpace(heap, NEW_LO_SPACE),
pending_object_(0),
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
return AllocationResult::Retry(identity());
}
// Allocation for the first object must succeed independent from the capacity.
if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
// The size of the first object may exceed the capacity.
capacity_ = Max(capacity_, SizeOfObjects());
HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
pending_object_.store(result.address(), std::memory_order_relaxed);
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
->ClearLiveness(page);
}
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AllocationStep(object_size, result.address(), object_size);
return result;
}
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
void NewLargeObjectSpace::Flip() {
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
chunk->SetFlag(MemoryChunk::FROM_PAGE);
chunk->ClearFlag(MemoryChunk::TO_PAGE);
}
}
void NewLargeObjectSpace::FreeDeadObjects(
const std::function<bool(HeapObject)>& is_dead) {
bool is_marking = heap()->incremental_marking()->IsMarking();
size_t surviving_object_size = 0;
bool freed_pages = false;
for (auto it = begin(); it != end();) {
LargePage* page = *it;
it++;
HeapObject object = page->GetObject();
size_t size = static_cast<size_t>(object.Size());
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}
} else {
surviving_object_size += size;
}
}
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
objects_size_ = surviving_object_size;
if (freed_pages) {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
}
void NewLargeObjectSpace::SetCapacity(size_t capacity) {
capacity_ = Max(capacity, SizeOfObjects());
}
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
: OldLargeObjectSpace(heap, CODE_LO_SPACE),
chunk_map_(kInitialChunkMapCapacity) {}
AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
OldLargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page);
heap()->isolate()->AddCodeMemoryChunk(page);
}
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
RemoveChunkMapEntries(page);
heap()->isolate()->RemoveCodeMemoryChunk(page);
OldLargeObjectSpace::RemovePage(page, object_size);
}
OffThreadLargeObjectSpace::OffThreadLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
// OffThreadLargeObjectSpace doesn't work with third-party heap.
UNREACHABLE();
#endif
}
AllocationResult OffThreadLargeObjectSpace::AllocateRaw(int object_size) {
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
return page->GetObject();
}
void OffThreadLargeObjectSpace::FreeUnmarkedObjects() {
// We should never try to free objects in this space.
UNREACHABLE();
}
} // namespace internal
} // namespace v8

231
src/heap/large-spaces.h Normal file
View File

@ -0,0 +1,231 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_LARGE_SPACES_H_
#define V8_HEAP_LARGE_SPACES_H_
#include <atomic>
#include <functional>
#include <memory>
#include <unordered_map>
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
class Isolate;
class LargePage : public MemoryChunk {
public:
// A limit to guarantee that we do not overflow typed slot offset in the old
// to old remembered set. Note that this limit is higher than what assembler
// already imposes on x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
static LargePage* FromHeapObject(HeapObject o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start);
private:
static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable);
friend class MemoryAllocator;
};
STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and managed by
// the large object space. Large objects do not move during garbage collections.
class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
public:
using iterator = LargePageIterator;
~LargeObjectSpace() override { TearDown(); }
// Releases internal resources, frees objects in this space.
void TearDown();
// Available bytes for objects in this space.
size_t Available() override;
size_t Size() override { return size_; }
size_t SizeOfObjects() override { return objects_size_; }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
int PageCount() { return page_count_; }
// Frees unmarked objects.
virtual void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates all
// objects in the space. May be slow.
bool ContainsSlow(Address addr);
// Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; }
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
virtual bool is_off_thread() const { return false; }
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate);
#endif
#ifdef DEBUG
void Print() override;
#endif
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
LargePage* AllocateLargePage(int object_size, Executability executable);
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
private:
friend class LargeObjectSpaceObjectIterator;
};
class OffThreadLargeObjectSpace;
class OldLargeObjectSpace : public LargeObjectSpace {
public:
explicit OldLargeObjectSpace(Heap* heap);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
void PromoteNewLargeObject(LargePage* page);
V8_EXPORT_PRIVATE void MergeOffThreadSpace(OffThreadLargeObjectSpace* other);
protected:
explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
};
class NewLargeObjectSpace : public LargeObjectSpace {
public:
NewLargeObjectSpace(Heap* heap, size_t capacity);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
void Flip();
void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
void SetCapacity(size_t capacity);
// The last allocated object that is not guaranteed to be initialized when the
// concurrent marker visits it.
Address pending_object() {
return pending_object_.load(std::memory_order_relaxed);
}
void ResetPendingObject() { pending_object_.store(0); }
private:
std::atomic<Address> pending_object_;
size_t capacity_;
};
class CodeLargeObjectSpace : public OldLargeObjectSpace {
public:
explicit CodeLargeObjectSpace(Heap* heap);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Finds a large object page containing the given address, returns nullptr if
// such a page doesn't exist.
LargePage* FindPage(Address a);
protected:
void AddPage(LargePage* page, size_t object_size) override;
void RemovePage(LargePage* page, size_t object_size) override;
private:
static const size_t kInitialChunkMapCapacity = 1024;
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
// Page-aligned addresses to their corresponding LargePage.
std::unordered_map<Address, LargePage*> chunk_map_;
};
class V8_EXPORT_PRIVATE OffThreadLargeObjectSpace : public LargeObjectSpace {
public:
explicit OffThreadLargeObjectSpace(Heap* heap);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
void FreeUnmarkedObjects() override;
bool is_off_thread() const override { return true; }
protected:
// OldLargeObjectSpace can mess with OffThreadLargeObjectSpace during merging.
friend class OldLargeObjectSpace;
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
};
class LargeObjectSpaceObjectIterator : public ObjectIterator {
public:
explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
HeapObject Next() override;
private:
LargePage* current_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_LARGE_SPACES_H_

View File

@ -20,6 +20,7 @@
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/large-spaces.h"
#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-visitor-inl.h"

View File

@ -7,6 +7,7 @@
#include <vector>
#include "src/common/globals.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces.h"
namespace v8 {

View File

@ -265,10 +265,6 @@ void Page::ClearEvacuationCandidate() {
InitializeFreeListCategories();
}
HeapObject LargePage::GetObject() {
return HeapObject::FromAddress(area_start());
}
OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
: heap_(heap),
state_(kOldSpaceState),

View File

@ -24,6 +24,7 @@
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
@ -48,7 +49,6 @@ namespace internal {
// in order to figure out if it's a cleared weak reference or not.
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
// ----------------------------------------------------------------------------
// PagedSpaceObjectIterator
@ -792,21 +792,6 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
return page;
}
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable) {
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
LargePage* page = static_cast<LargePage*>(chunk);
page->SetFlag(MemoryChunk::LARGE_PAGE);
page->list_node().Initialize();
return page;
}
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ = new FreeListCategory*[free_list()->number_of_categories()]();
@ -2652,12 +2637,6 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true;
}
size_t LargeObjectSpace::Available() {
// We return zero here since we cannot take advantage of already allocated
// large object memory.
return 0;
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
@ -4167,500 +4146,5 @@ void ReadOnlySpace::Unseal() {
is_marked_read_only_ = false;
}
Address LargePage::GetAddressToShrink(Address object_address,
size_t object_size) {
if (executable() == EXECUTABLE) {
return 0;
}
size_t used_size = ::RoundUp((object_address - address()) + object_size,
MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
return 0;
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
DCHECK_NULL(this->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
}
// -----------------------------------------------------------------------------
// LargeObjectSpaceObjectIterator
LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
LargeObjectSpace* space) {
current_ = space->first_page();
}
HeapObject LargeObjectSpaceObjectIterator::Next() {
if (current_ == nullptr) return HeapObject();
HeapObject object = current_->GetObject();
current_ = current_->next_page();
return object;
}
// -----------------------------------------------------------------------------
// OldLargeObjectSpace
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, new NoFreeList()),
size_(0),
page_count_(0),
objects_size_(0) {}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
LargePage* page = first_page();
LOG(heap()->isolate(),
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
}
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
return AllocateRaw(object_size, NOT_EXECUTABLE);
}
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, executable);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion();
AllocationStep(object_size, object.address(), object_size);
return object;
}
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
AddPage(page, object_size);
HeapObject object = page->GetObject();
heap()->CreateFillerObjectAt(object.address(), object_size,
ClearRecordedSlots::kNo);
return page;
}
size_t LargeObjectSpace::CommittedPhysicalMemory() {
// On a platform that provides lazy committing of memory, we over-account
// the actually committed memory. There is no easy way right now to support
// precise accounting of committed memory in large object space.
return CommittedMemory();
}
LargePage* CodeLargeObjectSpace::FindPage(Address a) {
const Address key = MemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) {
LargePage* page = it->second;
CHECK(page->Contains(a));
return page;
}
return nullptr;
}
void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
chunk->ResetProgressBar();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
}
}
void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
chunk_map_[current] = page;
}
}
void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
for (Address current = page->address();
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
chunk_map_.erase(current);
}
}
void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
size_t object_size = static_cast<size_t>(page->GetObject().Size());
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
page->ClearFlag(MemoryChunk::FROM_PAGE);
AddPage(page, object_size);
}
void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
memory_chunk_list_.PushBack(page);
page->set_owner(this);
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
}
void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size());
objects_size_ -= object_size;
page_count_--;
memory_chunk_list_.Remove(page);
page->set_owner(nullptr);
}
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
size_t surviving_object_size = 0;
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
size_t size = static_cast<size_t>(object.Size());
if (marking_state->IsBlack(object)) {
Address free_start;
surviving_object_size += size;
if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
current->area_start() + object.Size());
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
} else {
RemovePage(current, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current);
}
current = next_current;
}
objects_size_ = surviving_object_size;
}
bool LargeObjectSpace::Contains(HeapObject object) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
bool owned = (chunk->owner() == this);
SLOW_DCHECK(!owned || ContainsSlow(object.address()));
return owned;
}
bool LargeObjectSpace::ContainsSlow(Address addr) {
for (LargePage* page : *this) {
if (page->Contains(addr)) return true;
}
return false;
}
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
Heap* heap) {
return std::unique_ptr<ObjectIterator>(
new LargeObjectSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify(Isolate* isolate) {
size_t external_backing_store_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
HeapObject object = chunk->GetObject();
Page* page = Page::FromHeapObject(object);
CHECK(object.address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
if (!(object.IsAbstractCode() || object.IsSeqString() ||
object.IsExternalString() || object.IsThinString() ||
object.IsFixedArray() || object.IsFixedDoubleArray() ||
object.IsWeakFixedArray() || object.IsWeakArrayList() ||
object.IsPropertyArray() || object.IsByteArray() ||
object.IsFeedbackVector() || object.IsBigInt() ||
object.IsFreeSpace() || object.IsFeedbackMetadata() ||
object.IsContext() || object.IsUncompiledDataWithoutPreparseData() ||
object.IsPreparseData()) &&
!FLAG_young_generation_large_objects) {
FATAL("Found invalid Object (instance_type=%i) in large object space.",
object.map().instance_type());
}
// The object itself should look OK.
object.ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
heap()->VerifyRememberedSetFor(object);
}
// Byte arrays and strings don't have interior pointers.
if (object.IsAbstractCode()) {
VerifyPointersVisitor code_visitor(heap());
object.IterateBody(map, object.Size(), &code_visitor);
} else if (object.IsFixedArray()) {
FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array.length(); j++) {
Object element = array.get(j);
if (element.IsHeapObject()) {
HeapObject element_object = HeapObject::cast(element);
CHECK(IsValidHeapObject(heap(), element_object));
CHECK(element_object.map().IsMap());
}
}
} else if (object.IsPropertyArray()) {
PropertyArray array = PropertyArray::cast(object);
for (int j = 0; j < array.length(); j++) {
Object property = array.get(j);
if (property.IsHeapObject()) {
HeapObject property_object = HeapObject::cast(property);
CHECK(heap()->Contains(property_object));
CHECK(property_object.map().IsMap());
}
}
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
}
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
}
}
#endif
#ifdef DEBUG
void LargeObjectSpace::Print() {
StdoutStream os;
LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
obj.Print(os);
}
}
#endif // DEBUG
OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {}
OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
: LargeObjectSpace(heap, id) {}
void OldLargeObjectSpace::MergeOffThreadSpace(
OffThreadLargeObjectSpace* other) {
DCHECK(identity() == other->identity());
while (!other->memory_chunk_list().Empty()) {
LargePage* page = other->first_page();
HeapObject object = page->GetObject();
int size = object.Size();
other->RemovePage(page, size);
AddPage(page, size);
// TODO(leszeks): Here we should AllocationStep, see the TODO in
// PagedSpace::MergeOffThreadSpace.
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
}
}
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
: LargeObjectSpace(heap, NEW_LO_SPACE),
pending_object_(0),
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
return AllocationResult::Retry(identity());
}
// Allocation for the first object must succeed independent from the capacity.
if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
// The size of the first object may exceed the capacity.
capacity_ = Max(capacity_, SizeOfObjects());
HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
pending_object_.store(result.address(), std::memory_order_relaxed);
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
->ClearLiveness(page);
}
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AllocationStep(object_size, result.address(), object_size);
return result;
}
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
void NewLargeObjectSpace::Flip() {
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
chunk->SetFlag(MemoryChunk::FROM_PAGE);
chunk->ClearFlag(MemoryChunk::TO_PAGE);
}
}
void NewLargeObjectSpace::FreeDeadObjects(
const std::function<bool(HeapObject)>& is_dead) {
bool is_marking = heap()->incremental_marking()->IsMarking();
size_t surviving_object_size = 0;
bool freed_pages = false;
for (auto it = begin(); it != end();) {
LargePage* page = *it;
it++;
HeapObject object = page->GetObject();
size_t size = static_cast<size_t>(object.Size());
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}
} else {
surviving_object_size += size;
}
}
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
objects_size_ = surviving_object_size;
if (freed_pages) {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
}
void NewLargeObjectSpace::SetCapacity(size_t capacity) {
capacity_ = Max(capacity, SizeOfObjects());
}
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
: OldLargeObjectSpace(heap, CODE_LO_SPACE),
chunk_map_(kInitialChunkMapCapacity) {}
AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
OldLargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page);
heap()->isolate()->AddCodeMemoryChunk(page);
}
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
RemoveChunkMapEntries(page);
heap()->isolate()->RemoveCodeMemoryChunk(page);
OldLargeObjectSpace::RemovePage(page, object_size);
}
OffThreadLargeObjectSpace::OffThreadLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
// OffThreadLargeObjectSpace doesn't work with third-party heap.
UNREACHABLE();
#endif
}
AllocationResult OffThreadLargeObjectSpace::AllocateRaw(int object_size) {
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
return page->GetObject();
}
void OffThreadLargeObjectSpace::FreeUnmarkedObjects() {
// We should never try to free objects in this space.
UNREACHABLE();
}
} // namespace internal
} // namespace v8

View File

@ -51,6 +51,7 @@ class CompactionSpaceCollection;
class FreeList;
class Isolate;
class LargeObjectSpace;
class LargePage;
class LinearAllocationArea;
class LocalArrayBufferTracker;
class LocalSpace;
@ -1126,41 +1127,9 @@ class ReadOnlyPage : public Page {
friend class ReadOnlySpace;
};
class LargePage : public MemoryChunk {
public:
// A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
static LargePage* FromHeapObject(HeapObject o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
inline HeapObject GetObject();
inline LargePage* next_page() {
return static_cast<LargePage*>(list_node_.next());
}
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start);
private:
static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable);
friend class MemoryAllocator;
};
// Validate our estimates on the header size.
STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
// The process-wide singleton that keeps track of code range regions with the
@ -3326,181 +3295,6 @@ class SharedReadOnlySpace : public ReadOnlySpace {
~SharedReadOnlySpace() override;
};
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
// managed by the large object space.
// Large objects do not move during garbage collections.
class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
public:
using iterator = LargePageIterator;
~LargeObjectSpace() override { TearDown(); }
// Releases internal resources, frees objects in this space.
void TearDown();
// Available bytes for objects in this space.
size_t Available() override;
size_t Size() override { return size_; }
size_t SizeOfObjects() override { return objects_size_; }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
int PageCount() { return page_count_; }
// Frees unmarked objects.
virtual void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates
// all objects in the space. May be slow.
bool ContainsSlow(Address addr);
// Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; }
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
virtual bool is_off_thread() const { return false; }
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate);
#endif
#ifdef DEBUG
void Print() override;
#endif
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
LargePage* AllocateLargePage(int object_size, Executability executable);
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
private:
friend class LargeObjectSpaceObjectIterator;
};
class OffThreadLargeObjectSpace;
class OldLargeObjectSpace : public LargeObjectSpace {
public:
explicit OldLargeObjectSpace(Heap* heap);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
void PromoteNewLargeObject(LargePage* page);
V8_EXPORT_PRIVATE void MergeOffThreadSpace(OffThreadLargeObjectSpace* other);
protected:
explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
};
class NewLargeObjectSpace : public LargeObjectSpace {
public:
NewLargeObjectSpace(Heap* heap, size_t capacity);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
void Flip();
void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
void SetCapacity(size_t capacity);
// The last allocated object that is not guaranteed to be initialized when
// the concurrent marker visits it.
Address pending_object() {
return pending_object_.load(std::memory_order_relaxed);
}
void ResetPendingObject() { pending_object_.store(0); }
private:
std::atomic<Address> pending_object_;
size_t capacity_;
};
class CodeLargeObjectSpace : public OldLargeObjectSpace {
public:
explicit CodeLargeObjectSpace(Heap* heap);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
LargePage* FindPage(Address a);
protected:
void AddPage(LargePage* page, size_t object_size) override;
void RemovePage(LargePage* page, size_t object_size) override;
private:
static const size_t kInitialChunkMapCapacity = 1024;
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
// Page-aligned addresses to their corresponding LargePage.
std::unordered_map<Address, LargePage*> chunk_map_;
};
class V8_EXPORT_PRIVATE OffThreadLargeObjectSpace : public LargeObjectSpace {
public:
explicit OffThreadLargeObjectSpace(Heap* heap);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
void FreeUnmarkedObjects() override;
bool is_off_thread() const override { return true; }
protected:
// OldLargeObjectSpace can mess with OffThreadLargeObjectSpace during merging.
friend class OldLargeObjectSpace;
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
};
class LargeObjectSpaceObjectIterator : public ObjectIterator {
public:
explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
HeapObject Next() override;
private:
LargePage* current_;
};
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {

View File

@ -30,6 +30,7 @@
#include "src/base/bounded-page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/heap/factory.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/free-space.h"

View File

@ -9,6 +9,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h"
#include "test/unittests/test-utils.h"