heap: Mark space methods as const
A lot of the space/chunk methods can be trivially marked as const. There are more methods that can be made const but these will require creating new const object iterators, so those are left out for now. Bug: v8:12612 Change-Id: I753b8b3f7a200ecf255596c7825917e4eb600b81 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3571815 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Commit-Queue: Omer Katz <omerkatz@chromium.org> Cr-Commit-Position: refs/heads/main@{#79824}
This commit is contained in:
parent
ea1b92ceac
commit
0dc4d88c60
@ -29,12 +29,12 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
|
||||
return heap_;
|
||||
}
|
||||
|
||||
AllocationSpace identity() { return id_; }
|
||||
AllocationSpace identity() const { return id_; }
|
||||
|
||||
// Returns name of the space.
|
||||
static const char* GetSpaceName(AllocationSpace space);
|
||||
|
||||
const char* name() { return GetSpaceName(id_); }
|
||||
const char* name() const { return GetSpaceName(id_); }
|
||||
|
||||
void AccountCommitted(size_t bytes) {
|
||||
DCHECK_GE(committed_ + bytes, committed_);
|
||||
@ -51,15 +51,15 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
|
||||
|
||||
// Return the total amount committed memory for this space, i.e., allocatable
|
||||
// memory and page headers.
|
||||
virtual size_t CommittedMemory() { return committed_; }
|
||||
virtual size_t CommittedMemory() const { return committed_; }
|
||||
|
||||
virtual size_t MaximumCommittedMemory() { return max_committed_; }
|
||||
virtual size_t MaximumCommittedMemory() const { return max_committed_; }
|
||||
|
||||
// Approximate amount of physical memory committed for this space.
|
||||
virtual size_t CommittedPhysicalMemory() = 0;
|
||||
virtual size_t CommittedPhysicalMemory() const = 0;
|
||||
|
||||
// Returns allocated size.
|
||||
virtual size_t Size() = 0;
|
||||
virtual size_t Size() const = 0;
|
||||
|
||||
protected:
|
||||
BaseSpace(Heap* heap, AllocationSpace id)
|
||||
|
@ -138,7 +138,9 @@ class BasicMemoryChunk {
|
||||
Address address() const { return reinterpret_cast<Address>(this); }
|
||||
|
||||
// Returns the offset of a given address to this page.
|
||||
inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
|
||||
inline size_t Offset(Address a) const {
|
||||
return static_cast<size_t>(a - address());
|
||||
}
|
||||
|
||||
// Some callers rely on the fact that this can operate on both
|
||||
// tagged and aligned object addresses.
|
||||
@ -200,11 +202,11 @@ class BasicMemoryChunk {
|
||||
return IsFlagSet(READ_ONLY_HEAP);
|
||||
}
|
||||
|
||||
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
|
||||
bool NeverEvacuate() const { return IsFlagSet(NEVER_EVACUATE); }
|
||||
|
||||
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
|
||||
|
||||
bool CanAllocate() {
|
||||
bool CanAllocate() const {
|
||||
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
|
||||
}
|
||||
|
||||
@ -219,7 +221,7 @@ class BasicMemoryChunk {
|
||||
((flags & COMPACTION_WAS_ABORTED) == 0);
|
||||
}
|
||||
|
||||
Executability executable() {
|
||||
Executability executable() const {
|
||||
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
|
||||
}
|
||||
|
||||
@ -288,7 +290,7 @@ class BasicMemoryChunk {
|
||||
Bitmap::FromAddress(address() + kMarkingBitmapOffset));
|
||||
}
|
||||
|
||||
Address HighWaterMark() { return address() + high_water_mark_; }
|
||||
Address HighWaterMark() const { return address() + high_water_mark_; }
|
||||
|
||||
static inline void UpdateHighWaterMark(Address mark) {
|
||||
if (mark == kNullAddress) return;
|
||||
|
@ -60,7 +60,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
|
||||
return page;
|
||||
}
|
||||
|
||||
size_t LargeObjectSpace::Available() {
|
||||
size_t LargeObjectSpace::Available() const {
|
||||
// We return zero here since we cannot take advantage of already allocated
|
||||
// large object memory.
|
||||
return 0;
|
||||
@ -226,7 +226,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
|
||||
return page;
|
||||
}
|
||||
|
||||
size_t LargeObjectSpace::CommittedPhysicalMemory() {
|
||||
size_t LargeObjectSpace::CommittedPhysicalMemory() const {
|
||||
// On a platform that provides lazy committing of memory, we over-account
|
||||
// the actually committed memory. There is no easy way right now to support
|
||||
// precise accounting of committed memory in large object space.
|
||||
@ -347,7 +347,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
|
||||
objects_size_ = surviving_object_size;
|
||||
}
|
||||
|
||||
bool LargeObjectSpace::Contains(HeapObject object) {
|
||||
bool LargeObjectSpace::Contains(HeapObject object) const {
|
||||
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
|
||||
|
||||
bool owned = (chunk->owner() == this);
|
||||
@ -357,8 +357,8 @@ bool LargeObjectSpace::Contains(HeapObject object) {
|
||||
return owned;
|
||||
}
|
||||
|
||||
bool LargeObjectSpace::ContainsSlow(Address addr) {
|
||||
for (LargePage* page : *this) {
|
||||
bool LargeObjectSpace::ContainsSlow(Address addr) const {
|
||||
for (const LargePage* page : *this) {
|
||||
if (page->Contains(addr)) return true;
|
||||
}
|
||||
return false;
|
||||
@ -536,7 +536,9 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
|
||||
return AllocationResult::FromObject(result);
|
||||
}
|
||||
|
||||
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
|
||||
size_t NewLargeObjectSpace::Available() const {
|
||||
return capacity_ - SizeOfObjects();
|
||||
}
|
||||
|
||||
void NewLargeObjectSpace::Flip() {
|
||||
for (LargePage* chunk = first_page(); chunk != nullptr;
|
||||
|
@ -43,6 +43,9 @@ class LargePage : public MemoryChunk {
|
||||
HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
|
||||
|
||||
LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
|
||||
const LargePage* next_page() const {
|
||||
return static_cast<const LargePage*>(list_node_.next());
|
||||
}
|
||||
|
||||
// Uncommit memory that is not in use anymore by the object. If the object
|
||||
// cannot be shrunk 0 is returned.
|
||||
@ -66,6 +69,7 @@ STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
|
||||
class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
|
||||
public:
|
||||
using iterator = LargePageIterator;
|
||||
using const_iterator = ConstLargePageIterator;
|
||||
|
||||
~LargeObjectSpace() override { TearDown(); }
|
||||
|
||||
@ -73,27 +77,27 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
|
||||
void TearDown();
|
||||
|
||||
// Available bytes for objects in this space.
|
||||
size_t Available() override;
|
||||
size_t Available() const override;
|
||||
|
||||
size_t Size() override { return size_; }
|
||||
size_t SizeOfObjects() override { return objects_size_; }
|
||||
size_t Size() const override { return size_; }
|
||||
size_t SizeOfObjects() const override { return objects_size_; }
|
||||
|
||||
// Approximate amount of physical memory committed for this space.
|
||||
size_t CommittedPhysicalMemory() override;
|
||||
size_t CommittedPhysicalMemory() const override;
|
||||
|
||||
int PageCount() { return page_count_; }
|
||||
int PageCount() const { return page_count_; }
|
||||
|
||||
// Frees unmarked objects.
|
||||
virtual void FreeUnmarkedObjects();
|
||||
|
||||
// Checks whether a heap object is in this space; O(1).
|
||||
bool Contains(HeapObject obj);
|
||||
bool Contains(HeapObject obj) const;
|
||||
// Checks whether an address is in the object area in this space. Iterates all
|
||||
// objects in the space. May be slow.
|
||||
bool ContainsSlow(Address addr);
|
||||
bool ContainsSlow(Address addr) const;
|
||||
|
||||
// Checks whether the space is empty.
|
||||
bool IsEmpty() { return first_page() == nullptr; }
|
||||
bool IsEmpty() const { return first_page() == nullptr; }
|
||||
|
||||
virtual void AddPage(LargePage* page, size_t object_size);
|
||||
virtual void RemovePage(LargePage* page, size_t object_size);
|
||||
@ -101,10 +105,16 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
|
||||
LargePage* first_page() override {
|
||||
return reinterpret_cast<LargePage*>(memory_chunk_list_.front());
|
||||
}
|
||||
const LargePage* first_page() const override {
|
||||
return reinterpret_cast<const LargePage*>(memory_chunk_list_.front());
|
||||
}
|
||||
|
||||
iterator begin() { return iterator(first_page()); }
|
||||
iterator end() { return iterator(nullptr); }
|
||||
|
||||
const_iterator begin() const { return const_iterator(first_page()); }
|
||||
const_iterator end() const { return const_iterator(nullptr); }
|
||||
|
||||
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
|
||||
|
||||
virtual bool is_off_thread() const { return false; }
|
||||
@ -119,7 +129,7 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
|
||||
|
||||
// The last allocated object that is not guaranteed to be initialized when the
|
||||
// concurrent marker visits it.
|
||||
Address pending_object() {
|
||||
Address pending_object() const {
|
||||
return pending_object_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
@ -187,7 +197,7 @@ class NewLargeObjectSpace : public LargeObjectSpace {
|
||||
AllocateRaw(int object_size);
|
||||
|
||||
// Available bytes for objects in this space.
|
||||
size_t Available() override;
|
||||
size_t Available() const override;
|
||||
|
||||
void Flip();
|
||||
|
||||
|
@ -197,7 +197,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t MemoryChunk::CommittedPhysicalMemory() {
|
||||
size_t MemoryChunk::CommittedPhysicalMemory() const {
|
||||
if (!base::OS::HasLazyCommits() || IsLargePage()) return size();
|
||||
return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits());
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
|
||||
}
|
||||
|
||||
bool SweepingDone() {
|
||||
bool SweepingDone() const {
|
||||
return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
|
||||
}
|
||||
|
||||
@ -158,7 +158,7 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
int FreeListsLength();
|
||||
|
||||
// Approximate amount of physical memory committed for this chunk.
|
||||
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
|
||||
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const;
|
||||
|
||||
class ProgressBar& ProgressBar() {
|
||||
return progress_bar_;
|
||||
@ -171,7 +171,7 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
|
||||
size_t amount);
|
||||
|
||||
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
|
||||
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const {
|
||||
return external_backing_store_bytes_[type];
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ bool SemiSpace::Uncommit() {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t SemiSpace::CommittedPhysicalMemory() {
|
||||
size_t SemiSpace::CommittedPhysicalMemory() const {
|
||||
if (!IsCommitted()) return 0;
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
return committed_physical_memory_;
|
||||
@ -361,7 +361,7 @@ void SemiSpace::Print() {}
|
||||
#endif
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
void SemiSpace::Verify() {
|
||||
void SemiSpace::Verify() const {
|
||||
bool is_from_space = (id_ == kFromSpace);
|
||||
size_t external_backing_store_bytes[kNumTypes];
|
||||
|
||||
@ -372,7 +372,7 @@ void SemiSpace::Verify() {
|
||||
int actual_pages = 0;
|
||||
size_t computed_committed_physical_memory = 0;
|
||||
|
||||
for (Page* page : *this) {
|
||||
for (const Page* page : *this) {
|
||||
CHECK_EQ(page->owner(), this);
|
||||
CHECK(page->InNewSpace());
|
||||
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
|
||||
@ -435,7 +435,7 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
|
||||
// -----------------------------------------------------------------------------
|
||||
// SemiSpaceObjectIterator implementation.
|
||||
|
||||
SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
|
||||
SemiSpaceObjectIterator::SemiSpaceObjectIterator(const NewSpace* space) {
|
||||
Initialize(space->first_allocatable_address(), space->top());
|
||||
}
|
||||
|
||||
@ -445,7 +445,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
|
||||
limit_ = end;
|
||||
}
|
||||
|
||||
size_t NewSpace::CommittedPhysicalMemory() {
|
||||
size_t NewSpace::CommittedPhysicalMemory() const {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
|
||||
size_t size = to_space_.CommittedPhysicalMemory();
|
||||
@ -736,7 +736,7 @@ void NewSpace::FreeLinearAllocationArea() {
|
||||
UpdateInlineAllocationLimit(0);
|
||||
}
|
||||
|
||||
void NewSpace::VerifyTop() {
|
||||
void NewSpace::VerifyTop() const {
|
||||
// Ensure validity of LAB: start <= top <= limit
|
||||
DCHECK_LE(allocation_info_->start(), allocation_info_->top());
|
||||
DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
|
||||
@ -754,7 +754,7 @@ void NewSpace::VerifyTop() {
|
||||
#ifdef VERIFY_HEAP
|
||||
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
|
||||
// that it works (it depends on the invariants we are checking).
|
||||
void NewSpace::Verify(Isolate* isolate) {
|
||||
void NewSpace::Verify(Isolate* isolate) const {
|
||||
// The allocation pointer should be in the space or at the very end.
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
|
||||
|
@ -59,7 +59,7 @@ class SemiSpace : public Space {
|
||||
|
||||
bool Commit();
|
||||
bool Uncommit();
|
||||
bool IsCommitted() { return !memory_chunk_list_.Empty(); }
|
||||
bool IsCommitted() const { return !memory_chunk_list_.Empty(); }
|
||||
|
||||
// Grow the semispace to the new capacity. The new capacity requested must
|
||||
// be larger than the current capacity and less than the maximum capacity.
|
||||
@ -73,7 +73,7 @@ class SemiSpace : public Space {
|
||||
bool EnsureCurrentCapacity();
|
||||
|
||||
// Returns the start address of the first page of the space.
|
||||
Address space_start() {
|
||||
Address space_start() const {
|
||||
DCHECK_NE(memory_chunk_list_.front(), nullptr);
|
||||
return memory_chunk_list_.front()->area_start();
|
||||
}
|
||||
@ -81,10 +81,10 @@ class SemiSpace : public Space {
|
||||
Page* current_page() { return current_page_; }
|
||||
|
||||
// Returns the start address of the current page of the space.
|
||||
Address page_low() { return current_page_->area_start(); }
|
||||
Address page_low() const { return current_page_->area_start(); }
|
||||
|
||||
// Returns one past the end address of the current page of the space.
|
||||
Address page_high() { return current_page_->area_end(); }
|
||||
Address page_high() const { return current_page_->area_end(); }
|
||||
|
||||
bool AdvancePage() {
|
||||
Page* next_page = current_page_->next_page();
|
||||
@ -109,34 +109,34 @@ class SemiSpace : public Space {
|
||||
Page* InitializePage(MemoryChunk* chunk) override;
|
||||
|
||||
// Age mark accessors.
|
||||
Address age_mark() { return age_mark_; }
|
||||
Address age_mark() const { return age_mark_; }
|
||||
void set_age_mark(Address mark);
|
||||
|
||||
// Returns the current capacity of the semispace.
|
||||
size_t current_capacity() { return current_capacity_; }
|
||||
size_t current_capacity() const { return current_capacity_; }
|
||||
|
||||
// Returns the target capacity of the semispace.
|
||||
size_t target_capacity() { return target_capacity_; }
|
||||
size_t target_capacity() const { return target_capacity_; }
|
||||
|
||||
// Returns the maximum capacity of the semispace.
|
||||
size_t maximum_capacity() { return maximum_capacity_; }
|
||||
size_t maximum_capacity() const { return maximum_capacity_; }
|
||||
|
||||
// Returns the initial capacity of the semispace.
|
||||
size_t minimum_capacity() { return minimum_capacity_; }
|
||||
size_t minimum_capacity() const { return minimum_capacity_; }
|
||||
|
||||
SemiSpaceId id() { return id_; }
|
||||
SemiSpaceId id() const { return id_; }
|
||||
|
||||
// Approximate amount of physical memory committed for this space.
|
||||
size_t CommittedPhysicalMemory() override;
|
||||
size_t CommittedPhysicalMemory() const override;
|
||||
|
||||
// If we don't have these here then SemiSpace will be abstract. However
|
||||
// they should never be called:
|
||||
|
||||
size_t Size() override { UNREACHABLE(); }
|
||||
size_t Size() const override { UNREACHABLE(); }
|
||||
|
||||
size_t SizeOfObjects() override { return Size(); }
|
||||
size_t SizeOfObjects() const override { return Size(); }
|
||||
|
||||
size_t Available() override { UNREACHABLE(); }
|
||||
size_t Available() const override { UNREACHABLE(); }
|
||||
|
||||
Page* first_page() override {
|
||||
return reinterpret_cast<Page*>(memory_chunk_list_.front());
|
||||
@ -172,7 +172,7 @@ class SemiSpace : public Space {
|
||||
#endif
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
virtual void Verify();
|
||||
virtual void Verify() const;
|
||||
#endif
|
||||
|
||||
void AddRangeToActiveSystemPages(Address start, Address end);
|
||||
@ -220,7 +220,7 @@ class SemiSpace : public Space {
|
||||
class SemiSpaceObjectIterator : public ObjectIterator {
|
||||
public:
|
||||
// Create an iterator over the allocated objects in the given to-space.
|
||||
explicit SemiSpaceObjectIterator(NewSpace* space);
|
||||
explicit SemiSpaceObjectIterator(const NewSpace* space);
|
||||
|
||||
inline HeapObject Next() override;
|
||||
|
||||
@ -239,7 +239,7 @@ class SemiSpaceObjectIterator : public ObjectIterator {
|
||||
// The new space consists of a contiguous pair of semispaces. It simply
|
||||
// forwards most functions to the appropriate semispace.
|
||||
|
||||
class V8_EXPORT_PRIVATE NewSpace
|
||||
class V8_EXPORT_PRIVATE NewSpace final
|
||||
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
|
||||
public:
|
||||
using iterator = PageIterator;
|
||||
@ -268,17 +268,17 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
void Shrink();
|
||||
|
||||
// Return the allocated bytes in the active semispace.
|
||||
size_t Size() final {
|
||||
size_t Size() const final {
|
||||
DCHECK_GE(top(), to_space_.page_low());
|
||||
return (to_space_.current_capacity() - Page::kPageSize) / Page::kPageSize *
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage() +
|
||||
static_cast<size_t>(top() - to_space_.page_low());
|
||||
}
|
||||
|
||||
size_t SizeOfObjects() final { return Size(); }
|
||||
size_t SizeOfObjects() const final { return Size(); }
|
||||
|
||||
// Return the allocatable capacity of a semispace.
|
||||
size_t Capacity() {
|
||||
size_t Capacity() const {
|
||||
SLOW_DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
|
||||
return (to_space_.target_capacity() / Page::kPageSize) *
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage();
|
||||
@ -286,27 +286,27 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
|
||||
// Return the current size of a semispace, allocatable and non-allocatable
|
||||
// memory.
|
||||
size_t TotalCapacity() {
|
||||
size_t TotalCapacity() const {
|
||||
DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
|
||||
return to_space_.target_capacity();
|
||||
}
|
||||
|
||||
// Committed memory for NewSpace is the committed memory of both semi-spaces
|
||||
// combined.
|
||||
size_t CommittedMemory() final {
|
||||
size_t CommittedMemory() const final {
|
||||
return from_space_.CommittedMemory() + to_space_.CommittedMemory();
|
||||
}
|
||||
|
||||
size_t MaximumCommittedMemory() final {
|
||||
size_t MaximumCommittedMemory() const final {
|
||||
return from_space_.MaximumCommittedMemory() +
|
||||
to_space_.MaximumCommittedMemory();
|
||||
}
|
||||
|
||||
// Approximate amount of physical memory committed for this space.
|
||||
size_t CommittedPhysicalMemory() final;
|
||||
size_t CommittedPhysicalMemory() const final;
|
||||
|
||||
// Return the available bytes without growing.
|
||||
size_t Available() final {
|
||||
size_t Available() const final {
|
||||
DCHECK_GE(Capacity(), Size());
|
||||
return Capacity() - Size();
|
||||
}
|
||||
@ -318,7 +318,7 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
return to_space_.ExternalBackingStoreBytes(type);
|
||||
}
|
||||
|
||||
size_t ExternalBackingStoreBytes() {
|
||||
size_t ExternalBackingStoreBytes() const {
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
|
||||
result +=
|
||||
@ -327,7 +327,7 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t AllocatedSinceLastGC() {
|
||||
size_t AllocatedSinceLastGC() const {
|
||||
const Address age_mark = to_space_.age_mark();
|
||||
DCHECK_NE(age_mark, kNullAddress);
|
||||
DCHECK_NE(top(), kNullAddress);
|
||||
@ -364,34 +364,36 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
bool Rebalance();
|
||||
|
||||
// Return the maximum capacity of a semispace.
|
||||
size_t MaximumCapacity() {
|
||||
size_t MaximumCapacity() const {
|
||||
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
|
||||
return to_space_.maximum_capacity();
|
||||
}
|
||||
|
||||
bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
|
||||
bool IsAtMaximumCapacity() const {
|
||||
return TotalCapacity() == MaximumCapacity();
|
||||
}
|
||||
|
||||
// Returns the initial capacity of a semispace.
|
||||
size_t InitialTotalCapacity() {
|
||||
size_t InitialTotalCapacity() const {
|
||||
DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
|
||||
return to_space_.minimum_capacity();
|
||||
}
|
||||
|
||||
void VerifyTop();
|
||||
void VerifyTop() const;
|
||||
|
||||
Address original_top_acquire() {
|
||||
Address original_top_acquire() const {
|
||||
return original_top_.load(std::memory_order_acquire);
|
||||
}
|
||||
Address original_limit_relaxed() {
|
||||
Address original_limit_relaxed() const {
|
||||
return original_limit_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
// Return the address of the first allocatable address in the active
|
||||
// semispace. This may be the address where the first object resides.
|
||||
Address first_allocatable_address() { return to_space_.space_start(); }
|
||||
Address first_allocatable_address() const { return to_space_.space_start(); }
|
||||
|
||||
// Get the age mark of the inactive semispace.
|
||||
Address age_mark() { return from_space_.age_mark(); }
|
||||
Address age_mark() const { return from_space_.age_mark(); }
|
||||
// Set the age mark in the active semispace.
|
||||
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
|
||||
|
||||
@ -433,7 +435,7 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
// Verify the active semispace.
|
||||
virtual void Verify(Isolate* isolate);
|
||||
virtual void Verify(Isolate* isolate) const;
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -452,7 +454,7 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
return from_space_.Uncommit();
|
||||
}
|
||||
|
||||
bool IsFromSpaceCommitted() { return from_space_.IsCommitted(); }
|
||||
bool IsFromSpaceCommitted() const { return from_space_.IsCommitted(); }
|
||||
|
||||
SemiSpace* active_space() { return &to_space_; }
|
||||
|
||||
@ -532,7 +534,7 @@ class V8_EXPORT_PRIVATE NewSpace
|
||||
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
|
||||
|
||||
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
|
||||
bool SupportsAllocationObserver() override { return true; }
|
||||
bool SupportsAllocationObserver() const override { return true; }
|
||||
|
||||
friend class SemiSpaceObjectIterator;
|
||||
};
|
||||
|
@ -205,7 +205,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
DCHECK_EQ(0u, other->Capacity());
|
||||
}
|
||||
|
||||
size_t PagedSpace::CommittedPhysicalMemory() {
|
||||
size_t PagedSpace::CommittedPhysicalMemory() const {
|
||||
if (!base::OS::HasLazyCommits()) {
|
||||
DCHECK_EQ(0, committed_physical_memory());
|
||||
return CommittedMemory();
|
||||
@ -231,10 +231,10 @@ void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
void PagedSpace::VerifyCommittedPhysicalMemory() {
|
||||
void PagedSpace::VerifyCommittedPhysicalMemory() const {
|
||||
heap()->safepoint()->AssertActive();
|
||||
size_t size = 0;
|
||||
for (Page* page : *this) {
|
||||
for (const Page* page : *this) {
|
||||
DCHECK(page->SweepingDone());
|
||||
size += page->CommittedPhysicalMemory();
|
||||
}
|
||||
@ -371,9 +371,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
|
||||
return std::make_pair(object_start, size_in_bytes);
|
||||
}
|
||||
|
||||
int PagedSpace::CountTotalPages() {
|
||||
int PagedSpace::CountTotalPages() const {
|
||||
int count = 0;
|
||||
for (Page* page : *this) {
|
||||
for (const Page* page : *this) {
|
||||
count++;
|
||||
USE(page);
|
||||
}
|
||||
@ -447,7 +447,7 @@ void PagedSpace::MakeLinearAllocationAreaIterable() {
|
||||
}
|
||||
}
|
||||
|
||||
size_t PagedSpace::Available() {
|
||||
size_t PagedSpace::Available() const {
|
||||
ConcurrentAllocationMutex guard(this);
|
||||
return free_list_->Available();
|
||||
}
|
||||
@ -719,7 +719,7 @@ PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
|
||||
return std::make_pair(start, used_size_in_bytes);
|
||||
}
|
||||
|
||||
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
|
||||
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) const {
|
||||
// Code space sweeping is only allowed on main thread.
|
||||
return (local_heap && local_heap->is_main_thread()) ||
|
||||
identity() != CODE_SPACE;
|
||||
@ -1085,7 +1085,7 @@ void MapSpace::SortFreeList() {
|
||||
}
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
|
||||
void MapSpace::VerifyObject(HeapObject object) const { CHECK(object.IsMap()); }
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
@ -69,7 +69,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
|
||||
|
||||
Address cur_addr_; // Current iteration point.
|
||||
Address cur_end_; // End iteration point.
|
||||
PagedSpace* space_;
|
||||
const PagedSpace* const space_;
|
||||
PageRange page_range_;
|
||||
PageRange::iterator current_page_;
|
||||
#if V8_COMPRESS_POINTERS
|
||||
@ -99,19 +99,19 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
bool ContainsSlow(Address addr) const;
|
||||
|
||||
// Does the space need executable memory?
|
||||
Executability executable() { return executable_; }
|
||||
Executability executable() const { return executable_; }
|
||||
|
||||
// Prepares for a mark-compact GC.
|
||||
void PrepareForMarkCompact();
|
||||
|
||||
// Current capacity without growing (Size() + Available()).
|
||||
size_t Capacity() { return accounting_stats_.Capacity(); }
|
||||
size_t Capacity() const { return accounting_stats_.Capacity(); }
|
||||
|
||||
// Approximate amount of physical memory committed for this space.
|
||||
size_t CommittedPhysicalMemory() override;
|
||||
size_t CommittedPhysicalMemory() const override;
|
||||
|
||||
#if DEBUG
|
||||
void VerifyCommittedPhysicalMemory();
|
||||
void VerifyCommittedPhysicalMemory() const;
|
||||
#endif // DEBUG
|
||||
|
||||
void IncrementCommittedPhysicalMemory(size_t increment_value);
|
||||
@ -131,17 +131,17 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
// The bytes in the linear allocation area are not included in this total
|
||||
// because updating the stats would slow down allocation. New pages are
|
||||
// immediately added to the free list so they show up here.
|
||||
size_t Available() override;
|
||||
size_t Available() const override;
|
||||
|
||||
// Allocated bytes in this space. Garbage bytes that were not found due to
|
||||
// concurrent sweeping are counted as being allocated! The bytes in the
|
||||
// current linear allocation area (between top and limit) are also counted
|
||||
// here.
|
||||
size_t Size() override { return accounting_stats_.Size(); }
|
||||
size_t Size() const override { return accounting_stats_.Size(); }
|
||||
|
||||
// Wasted bytes in this space. These are just the bytes that were thrown away
|
||||
// due to being too small to use for allocation.
|
||||
virtual size_t Waste() { return free_list_->wasted_bytes(); }
|
||||
virtual size_t Waste() const { return free_list_->wasted_bytes(); }
|
||||
|
||||
// Allocate the requested number of bytes in the space if possible, return a
|
||||
// failure object if not.
|
||||
@ -257,7 +257,7 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
|
||||
// Overridden by subclasses to verify space-specific object
|
||||
// properties (e.g., only maps or free-list nodes are in map space).
|
||||
virtual void VerifyObject(HeapObject obj) {}
|
||||
virtual void VerifyObject(HeapObject obj) const {}
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -271,19 +271,21 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
static void ResetCodeStatistics(Isolate* isolate);
|
||||
#endif
|
||||
|
||||
bool CanExpand(size_t size);
|
||||
bool CanExpand(size_t size) const;
|
||||
|
||||
// Returns the number of total pages in this space.
|
||||
int CountTotalPages();
|
||||
int CountTotalPages() const;
|
||||
|
||||
// Return size of allocatable area on a page in this space.
|
||||
inline int AreaSize() { return static_cast<int>(area_size_); }
|
||||
inline int AreaSize() const { return static_cast<int>(area_size_); }
|
||||
|
||||
bool is_compaction_space() {
|
||||
bool is_compaction_space() const {
|
||||
return compaction_space_kind_ != CompactionSpaceKind::kNone;
|
||||
}
|
||||
|
||||
CompactionSpaceKind compaction_space_kind() { return compaction_space_kind_; }
|
||||
CompactionSpaceKind compaction_space_kind() const {
|
||||
return compaction_space_kind_;
|
||||
}
|
||||
|
||||
// Merges {other} into the current space. Note that this modifies {other},
|
||||
// e.g., removes its bump pointer area and resets statistics.
|
||||
@ -321,9 +323,9 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
|
||||
void SetLinearAllocationArea(Address top, Address limit);
|
||||
|
||||
Address original_top() { return original_top_; }
|
||||
Address original_top() const { return original_top_; }
|
||||
|
||||
Address original_limit() { return original_limit_; }
|
||||
Address original_limit() const { return original_limit_; }
|
||||
|
||||
void MoveOriginalTopForward() {
|
||||
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
|
||||
@ -343,7 +345,7 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
private:
|
||||
class ConcurrentAllocationMutex {
|
||||
public:
|
||||
explicit ConcurrentAllocationMutex(PagedSpace* space) {
|
||||
explicit ConcurrentAllocationMutex(const PagedSpace* space) {
|
||||
if (space->SupportsConcurrentAllocation()) {
|
||||
guard_.emplace(&space->space_mutex_);
|
||||
}
|
||||
@ -352,13 +354,15 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
base::Optional<base::MutexGuard> guard_;
|
||||
};
|
||||
|
||||
bool SupportsConcurrentAllocation() { return !is_compaction_space(); }
|
||||
bool SupportsConcurrentAllocation() const { return !is_compaction_space(); }
|
||||
|
||||
// Set space linear allocation area.
|
||||
void SetTopAndLimit(Address top, Address limit);
|
||||
void DecreaseLimit(Address new_limit);
|
||||
void UpdateInlineAllocationLimit(size_t min_size) override;
|
||||
bool SupportsAllocationObserver() override { return !is_compaction_space(); }
|
||||
bool SupportsAllocationObserver() const override {
|
||||
return !is_compaction_space();
|
||||
}
|
||||
|
||||
// Slow path of allocation function
|
||||
V8_WARN_UNUSED_RESULT AllocationResult
|
||||
@ -368,13 +372,13 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
protected:
|
||||
// PagedSpaces that should be included in snapshots have different, i.e.,
|
||||
// smaller, initial pages.
|
||||
virtual bool snapshotable() { return true; }
|
||||
virtual bool snapshotable() const { return true; }
|
||||
|
||||
bool HasPages() { return first_page() != nullptr; }
|
||||
bool HasPages() const { return first_page() != nullptr; }
|
||||
|
||||
// Returns whether sweeping of this space is safe on this thread. Code space
|
||||
// sweeping is only allowed on the main thread.
|
||||
bool IsSweepingAllowedOnThread(LocalHeap* local_heap);
|
||||
bool IsSweepingAllowedOnThread(LocalHeap* local_heap) const;
|
||||
|
||||
// Cleans up the space, frees all pages in this space except those belonging
|
||||
// to the initial chunk, uncommits addresses in the initial chunk.
|
||||
@ -448,7 +452,7 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
AllocationStats accounting_stats_;
|
||||
|
||||
// Mutex guarding any concurrent access to the space.
|
||||
base::Mutex space_mutex_;
|
||||
mutable base::Mutex space_mutex_;
|
||||
|
||||
// The top and the limit at the time of setting the linear allocation area.
|
||||
// These values are protected by pending_allocation_mutex_.
|
||||
@ -490,7 +494,7 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
|
||||
|
||||
Page* Expand() override;
|
||||
// The space is temporary and not included in any snapshots.
|
||||
bool snapshotable() override { return false; }
|
||||
bool snapshotable() const override { return false; }
|
||||
// Pages that were allocated in this local space and need to be merged
|
||||
// to the main space.
|
||||
std::vector<Page*> new_pages_;
|
||||
@ -576,7 +580,7 @@ class MapSpace : public PagedSpace {
|
||||
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
|
||||
&paged_allocation_info_) {}
|
||||
|
||||
int RoundSizeDownToObjectAlignment(int size) override {
|
||||
int RoundSizeDownToObjectAlignment(int size) const override {
|
||||
if (base::bits::IsPowerOfTwo(Map::kSize)) {
|
||||
return RoundDown(size, Map::kSize);
|
||||
} else {
|
||||
@ -587,7 +591,7 @@ class MapSpace : public PagedSpace {
|
||||
void SortFreeList();
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
void VerifyObject(HeapObject obj) override;
|
||||
void VerifyObject(HeapObject obj) const override;
|
||||
#endif
|
||||
|
||||
private:
|
||||
@ -612,7 +616,7 @@ class OldGenerationMemoryChunkIterator {
|
||||
kCodeLargeObjectState,
|
||||
kFinishedState
|
||||
};
|
||||
Heap* heap_;
|
||||
Heap* const heap_;
|
||||
State state_;
|
||||
PageIterator old_iterator_;
|
||||
PageIterator code_iterator_;
|
||||
|
@ -253,10 +253,12 @@ size_t ReadOnlyHeap::read_only_object_cache_size() const {
|
||||
return read_only_object_cache_.size();
|
||||
}
|
||||
|
||||
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
|
||||
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(
|
||||
const ReadOnlyHeap* ro_heap)
|
||||
: ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {}
|
||||
|
||||
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
|
||||
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(
|
||||
const ReadOnlySpace* ro_space)
|
||||
: ro_space_(ro_space),
|
||||
current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
|
||||
? std::vector<ReadOnlyPage*>::iterator()
|
||||
|
@ -147,13 +147,13 @@ class SoleReadOnlyHeap : public ReadOnlyHeap {
|
||||
// This class enables iterating over all read-only heap objects.
|
||||
class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
|
||||
public:
|
||||
explicit ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap);
|
||||
explicit ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space);
|
||||
explicit ReadOnlyHeapObjectIterator(const ReadOnlyHeap* ro_heap);
|
||||
explicit ReadOnlyHeapObjectIterator(const ReadOnlySpace* ro_space);
|
||||
|
||||
HeapObject Next();
|
||||
|
||||
private:
|
||||
ReadOnlySpace* const ro_space_;
|
||||
const ReadOnlySpace* const ro_space_;
|
||||
std::vector<ReadOnlyPage*>::const_iterator current_page_;
|
||||
Address current_addr_;
|
||||
};
|
||||
|
@ -430,7 +430,7 @@ void ReadOnlySpace::Unseal() {
|
||||
is_marked_read_only_ = false;
|
||||
}
|
||||
|
||||
bool ReadOnlySpace::ContainsSlow(Address addr) {
|
||||
bool ReadOnlySpace::ContainsSlow(Address addr) const {
|
||||
BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
|
||||
for (BasicMemoryChunk* chunk : pages_) {
|
||||
if (chunk == c) return true;
|
||||
@ -442,7 +442,7 @@ namespace {
|
||||
// Only iterates over a single chunk as the chunk iteration is done externally.
|
||||
class ReadOnlySpaceObjectIterator : public ObjectIterator {
|
||||
public:
|
||||
ReadOnlySpaceObjectIterator(Heap* heap, ReadOnlySpace* space,
|
||||
ReadOnlySpaceObjectIterator(const Heap* heap, const ReadOnlySpace* space,
|
||||
BasicMemoryChunk* chunk)
|
||||
: cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
|
||||
|
||||
@ -481,7 +481,7 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator {
|
||||
|
||||
Address cur_addr_; // Current iteration point.
|
||||
Address cur_end_; // End iteration point.
|
||||
ReadOnlySpace* space_;
|
||||
const ReadOnlySpace* const space_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
@ -510,7 +510,7 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
|
||||
};
|
||||
} // namespace
|
||||
|
||||
void ReadOnlySpace::Verify(Isolate* isolate) {
|
||||
void ReadOnlySpace::Verify(Isolate* isolate) const {
|
||||
bool allocation_pointer_found_in_space = top_ == limit_;
|
||||
VerifyReadOnlyPointersVisitor visitor(isolate->heap());
|
||||
|
||||
@ -558,7 +558,7 @@ void ReadOnlySpace::Verify(Isolate* isolate) {
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
void ReadOnlySpace::VerifyCounters(Heap* heap) {
|
||||
void ReadOnlySpace::VerifyCounters(Heap* heap) const {
|
||||
size_t total_capacity = 0;
|
||||
size_t total_allocated = 0;
|
||||
for (BasicMemoryChunk* page : pages_) {
|
||||
@ -582,7 +582,7 @@ void ReadOnlySpace::VerifyCounters(Heap* heap) {
|
||||
#endif // DEBUG
|
||||
#endif // VERIFY_HEAP
|
||||
|
||||
size_t ReadOnlySpace::CommittedPhysicalMemory() {
|
||||
size_t ReadOnlySpace::CommittedPhysicalMemory() const {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
BasicMemoryChunk::UpdateHighWaterMark(top_);
|
||||
size_t size = 0;
|
||||
|
@ -212,20 +212,20 @@ class ReadOnlySpace : public BaseSpace {
|
||||
// to write it into the free space nodes that were already created.
|
||||
void RepairFreeSpacesAfterDeserialization();
|
||||
|
||||
size_t Size() override { return accounting_stats_.Size(); }
|
||||
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() override;
|
||||
size_t Size() const override { return accounting_stats_.Size(); }
|
||||
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const override;
|
||||
|
||||
const std::vector<ReadOnlyPage*>& pages() const { return pages_; }
|
||||
Address top() const { return top_; }
|
||||
Address limit() const { return limit_; }
|
||||
size_t Capacity() const { return capacity_; }
|
||||
|
||||
bool ContainsSlow(Address addr);
|
||||
bool ContainsSlow(Address addr) const;
|
||||
V8_EXPORT_PRIVATE void ShrinkPages();
|
||||
#ifdef VERIFY_HEAP
|
||||
void Verify(Isolate* isolate);
|
||||
void Verify(Isolate* isolate) const;
|
||||
#ifdef DEBUG
|
||||
void VerifyCounters(Heap* heap);
|
||||
void VerifyCounters(Heap* heap) const;
|
||||
#endif // DEBUG
|
||||
#endif // VERIFY_HEAP
|
||||
|
||||
|
@ -236,7 +236,7 @@ void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
|
||||
void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
|
||||
|
||||
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
|
||||
size_t min_size) {
|
||||
size_t min_size) const {
|
||||
DCHECK_GE(end - start, min_size);
|
||||
|
||||
if (!use_lab_) {
|
||||
@ -288,7 +288,7 @@ void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
|
||||
allocations_origins_[static_cast<int>(origin)]++;
|
||||
}
|
||||
|
||||
void SpaceWithLinearArea::PrintAllocationsOrigins() {
|
||||
void SpaceWithLinearArea::PrintAllocationsOrigins() const {
|
||||
PrintIsolate(
|
||||
heap()->isolate(),
|
||||
"Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
|
||||
|
@ -145,12 +145,12 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
|
||||
|
||||
// Returns size of objects. Can differ from the allocated size
|
||||
// (e.g. see OldLargeObjectSpace).
|
||||
virtual size_t SizeOfObjects() { return Size(); }
|
||||
virtual size_t SizeOfObjects() const { return Size(); }
|
||||
|
||||
// Return the available bytes without growing.
|
||||
virtual size_t Available() = 0;
|
||||
virtual size_t Available() const = 0;
|
||||
|
||||
virtual int RoundSizeDownToObjectAlignment(int size) {
|
||||
virtual int RoundSizeDownToObjectAlignment(int size) const {
|
||||
if (id_ == CODE_SPACE) {
|
||||
return RoundDown(size, kCodeAlignment);
|
||||
} else {
|
||||
@ -372,6 +372,7 @@ class PageIteratorImpl
|
||||
using PageIterator = PageIteratorImpl<Page>;
|
||||
using ConstPageIterator = PageIteratorImpl<const Page>;
|
||||
using LargePageIterator = PageIteratorImpl<LargePage>;
|
||||
using ConstLargePageIterator = PageIteratorImpl<const LargePage>;
|
||||
|
||||
class PageRange {
|
||||
public:
|
||||
@ -466,7 +467,7 @@ class SpaceWithLinearArea : public Space {
|
||||
LinearAllocationArea* allocation_info)
|
||||
: Space(heap, id, free_list), allocation_info_(allocation_info) {}
|
||||
|
||||
virtual bool SupportsAllocationObserver() = 0;
|
||||
virtual bool SupportsAllocationObserver() const = 0;
|
||||
|
||||
// Returns the allocation pointer in this space.
|
||||
Address top() const { return allocation_info_->top(); }
|
||||
@ -504,7 +505,7 @@ class SpaceWithLinearArea : public Space {
|
||||
// area bounded by [start, end), this function computes the limit to use to
|
||||
// allow proper observation based on existing observers. min_size specifies
|
||||
// the minimum size that the limited area should have.
|
||||
Address ComputeLimit(Address start, Address end, size_t min_size);
|
||||
Address ComputeLimit(Address start, Address end, size_t min_size) const;
|
||||
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
|
||||
size_t min_size) = 0;
|
||||
|
||||
@ -512,7 +513,7 @@ class SpaceWithLinearArea : public Space {
|
||||
void EnableInlineAllocation();
|
||||
bool IsInlineAllocationEnabled() const { return use_lab_; }
|
||||
|
||||
void PrintAllocationsOrigins();
|
||||
void PrintAllocationsOrigins() const;
|
||||
|
||||
protected:
|
||||
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
|
||||
|
Loading…
Reference in New Issue
Block a user