[heap] Clean-up MemoryChunk allocation area constants.
Change-Id: I8ba59546ab93c7af98bc5ece2f0160628844dd92 Reviewed-on: https://chromium-review.googlesource.com/c/1280584 Reviewed-by: Yang Guo <yangguo@chromium.org> Commit-Queue: Hannes Payer <hpayer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56908}
This commit is contained in:
parent
ec969ea3b1
commit
1d83709303
@ -148,9 +148,8 @@ Heap::Heap()
|
||||
}
|
||||
|
||||
size_t Heap::MaxReserved() {
|
||||
const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
|
||||
return static_cast<size_t>(
|
||||
(2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
|
||||
return static_cast<size_t>(2 * max_semi_space_size_ +
|
||||
max_old_generation_size_);
|
||||
}
|
||||
|
||||
size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
|
||||
@ -240,6 +239,8 @@ size_t Heap::Available() {
|
||||
for (SpaceIterator it(this); it.has_next();) {
|
||||
total += it.next()->Available();
|
||||
}
|
||||
|
||||
total += memory_allocator()->Available();
|
||||
return total;
|
||||
}
|
||||
|
||||
@ -1514,7 +1515,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
|
||||
AllocationResult allocation;
|
||||
int size = chunk.size;
|
||||
DCHECK_LE(static_cast<size_t>(size),
|
||||
MemoryAllocator::PageAreaSize(
|
||||
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
|
||||
static_cast<AllocationSpace>(space)));
|
||||
if (space == NEW_SPACE) {
|
||||
allocation = new_space()->AllocateRawUnaligned(size);
|
||||
|
@ -2366,10 +2366,11 @@ class Evacuator : public Malloced {
|
||||
|
||||
// NewSpacePages with more live bytes than this threshold qualify for fast
|
||||
// evacuation.
|
||||
static int PageEvacuationThreshold() {
|
||||
static intptr_t NewSpacePageEvacuationThreshold() {
|
||||
if (FLAG_page_promotion)
|
||||
return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
|
||||
return Page::kAllocatableMemory + kPointerSize;
|
||||
return FLAG_page_promotion_threshold *
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
|
||||
return MemoryChunkLayout::AllocatableMemoryInDataPage() + kPointerSize;
|
||||
}
|
||||
|
||||
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
|
||||
@ -2619,7 +2620,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
|
||||
const bool reduce_memory = heap()->ShouldReduceMemory();
|
||||
const Address age_mark = heap()->new_space()->age_mark();
|
||||
return !reduce_memory && !p->NeverEvacuate() &&
|
||||
(live_bytes > Evacuator::PageEvacuationThreshold()) &&
|
||||
(live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
|
||||
!p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
|
||||
}
|
||||
|
||||
|
@ -242,7 +242,8 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
|
||||
HeapObjectReference** slot,
|
||||
HeapObject* object,
|
||||
int object_size) {
|
||||
SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
|
||||
SLOW_DCHECK(static_cast<size_t>(object_size) <=
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage());
|
||||
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
|
||||
CopyAndForwardResult result;
|
||||
|
||||
|
@ -458,6 +458,62 @@ void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t MemoryChunkLayout::CodePageGuardStartOffset() {
|
||||
// We are guarding code pages: the first OS page after the header
|
||||
// will be protected as non-writable.
|
||||
return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
|
||||
}
|
||||
|
||||
size_t MemoryChunkLayout::CodePageGuardSize() {
|
||||
return MemoryAllocator::GetCommitPageSize();
|
||||
}
|
||||
|
||||
intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
|
||||
// We are guarding code pages: the first OS page after the header
|
||||
// will be protected as non-writable.
|
||||
return CodePageGuardStartOffset() + CodePageGuardSize();
|
||||
}
|
||||
|
||||
intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
|
||||
// We are guarding code pages: the last OS page will be protected as
|
||||
// non-writable.
|
||||
return Page::kPageSize -
|
||||
static_cast<int>(MemoryAllocator::GetCommitPageSize());
|
||||
}
|
||||
|
||||
size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
|
||||
size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
|
||||
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
|
||||
return memory;
|
||||
}
|
||||
|
||||
intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
|
||||
return MemoryChunk::kHeaderSize +
|
||||
(kPointerSize - MemoryChunk::kHeaderSize % kPointerSize);
|
||||
}
|
||||
|
||||
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
|
||||
AllocationSpace space) {
|
||||
if (space == CODE_SPACE) {
|
||||
return ObjectStartOffsetInCodePage();
|
||||
}
|
||||
return ObjectStartOffsetInDataPage();
|
||||
}
|
||||
|
||||
size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
|
||||
size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
|
||||
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
|
||||
return memory;
|
||||
}
|
||||
|
||||
size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
|
||||
AllocationSpace space) {
|
||||
if (space == CODE_SPACE) {
|
||||
return AllocatableMemoryInCodePage();
|
||||
}
|
||||
return AllocatableMemoryInDataPage();
|
||||
}
|
||||
|
||||
Heap* MemoryChunk::synchronized_heap() {
|
||||
return reinterpret_cast<Heap*>(
|
||||
base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
|
||||
@ -491,7 +547,7 @@ void MemoryChunk::SetReadAndExecutable() {
|
||||
DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
|
||||
if (write_unprotect_counter_ == 0) {
|
||||
Address protect_start =
|
||||
address() + MemoryAllocator::CodePageAreaStartOffset();
|
||||
address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
|
||||
size_t page_size = MemoryAllocator::GetCommitPageSize();
|
||||
DCHECK(IsAligned(protect_start, page_size));
|
||||
size_t protect_size = RoundUp(area_size(), page_size);
|
||||
@ -510,7 +566,7 @@ void MemoryChunk::SetReadAndWritable() {
|
||||
DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
|
||||
if (write_unprotect_counter_ == 1) {
|
||||
Address unprotect_start =
|
||||
address() + MemoryAllocator::CodePageAreaStartOffset();
|
||||
address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
|
||||
size_t page_size = MemoryAllocator::GetCommitPageSize();
|
||||
DCHECK(IsAligned(unprotect_start, page_size));
|
||||
size_t unprotect_size = RoundUp(area_size(), page_size);
|
||||
@ -597,7 +653,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
|
||||
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
|
||||
Page* page = static_cast<Page*>(chunk);
|
||||
DCHECK_GE(Page::kAllocatableMemory, page->area_size());
|
||||
DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
|
||||
page->owner()->identity()),
|
||||
page->area_size());
|
||||
// Make sure that categories are initialized before freeing the area.
|
||||
page->ResetAllocatedBytes();
|
||||
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
|
||||
@ -733,7 +791,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
// Non-executable
|
||||
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
|
||||
// | Header |
|
||||
// +----------------------------+<- area_start_ (base + kObjectStartOffset)
|
||||
// +----------------------------+<- area_start_ (base + area_start_)
|
||||
// | Area |
|
||||
// +----------------------------+<- area_end_ (area_start + commit_area_size)
|
||||
// | Committed but not used |
|
||||
@ -743,13 +801,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
//
|
||||
|
||||
if (executable == EXECUTABLE) {
|
||||
chunk_size = ::RoundUp(
|
||||
CodePageAreaStartOffset() + reserve_area_size + CodePageGuardSize(),
|
||||
GetCommitPageSize());
|
||||
chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
|
||||
reserve_area_size +
|
||||
MemoryChunkLayout::CodePageGuardSize(),
|
||||
GetCommitPageSize());
|
||||
|
||||
// Size of header (not executable) plus area (executable).
|
||||
size_t commit_size = ::RoundUp(
|
||||
CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
|
||||
MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
|
||||
GetCommitPageSize());
|
||||
base =
|
||||
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
|
||||
executable, address_hint, &reservation);
|
||||
@ -758,18 +818,20 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
size_executable_ += reservation.size();
|
||||
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(base, CodePageGuardStartOffset(), kZapValue);
|
||||
ZapBlock(base + CodePageAreaStartOffset(), commit_area_size, kZapValue);
|
||||
ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
|
||||
ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
|
||||
commit_area_size, kZapValue);
|
||||
}
|
||||
|
||||
area_start = base + CodePageAreaStartOffset();
|
||||
area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
|
||||
area_end = area_start + commit_area_size;
|
||||
} else {
|
||||
chunk_size = ::RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
|
||||
GetCommitPageSize());
|
||||
size_t commit_size =
|
||||
::RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
|
||||
GetCommitPageSize());
|
||||
chunk_size = ::RoundUp(
|
||||
MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
|
||||
GetCommitPageSize());
|
||||
size_t commit_size = ::RoundUp(
|
||||
MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
|
||||
GetCommitPageSize());
|
||||
base =
|
||||
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
|
||||
executable, address_hint, &reservation);
|
||||
@ -777,10 +839,13 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
if (base == kNullAddress) return nullptr;
|
||||
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(base, Page::kObjectStartOffset + commit_area_size, kZapValue);
|
||||
ZapBlock(
|
||||
base,
|
||||
MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
|
||||
kZapValue);
|
||||
}
|
||||
|
||||
area_start = base + Page::kObjectStartOffset;
|
||||
area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
|
||||
area_end = area_start + commit_area_size;
|
||||
}
|
||||
|
||||
@ -954,7 +1019,7 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
|
||||
size_t page_size = GetCommitPageSize();
|
||||
DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
|
||||
DCHECK_EQ(chunk->address() + chunk->size(),
|
||||
chunk->area_end() + CodePageGuardSize());
|
||||
chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
|
||||
reservation->SetPermissions(chunk->area_end_, page_size,
|
||||
PageAllocator::kNoAccess);
|
||||
}
|
||||
@ -1053,7 +1118,9 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
|
||||
Executability executable) {
|
||||
MemoryChunk* chunk = nullptr;
|
||||
if (alloc_mode == kPooled) {
|
||||
DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
|
||||
DCHECK_EQ(size, static_cast<size_t>(
|
||||
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
|
||||
owner->identity())));
|
||||
DCHECK_EQ(executable, NOT_EXECUTABLE);
|
||||
chunk = AllocatePagePooled(owner);
|
||||
}
|
||||
@ -1088,7 +1155,9 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
|
||||
if (chunk == nullptr) return nullptr;
|
||||
const int size = MemoryChunk::kPageSize;
|
||||
const Address start = reinterpret_cast<Address>(chunk);
|
||||
const Address area_start = start + MemoryChunk::kObjectStartOffset;
|
||||
const Address area_start =
|
||||
start +
|
||||
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
|
||||
const Address area_end = start + size;
|
||||
// Pooled pages are always regular data pages.
|
||||
DCHECK_NE(CODE_SPACE, owner->identity());
|
||||
@ -1112,26 +1181,6 @@ void MemoryAllocator::ZapBlock(Address start, size_t size,
|
||||
}
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::CodePageGuardStartOffset() {
|
||||
// We are guarding code pages: the first OS page after the header
|
||||
// will be protected as non-writable.
|
||||
return ::RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::CodePageGuardSize() { return GetCommitPageSize(); }
|
||||
|
||||
size_t MemoryAllocator::CodePageAreaStartOffset() {
|
||||
// We are guarding code pages: the first OS page after the header
|
||||
// will be protected as non-writable.
|
||||
return CodePageGuardStartOffset() + CodePageGuardSize();
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::CodePageAreaEndOffset() {
|
||||
// We are guarding code pages: the last OS page will be protected as
|
||||
// non-writable.
|
||||
return Page::kPageSize - static_cast<int>(GetCommitPageSize());
|
||||
}
|
||||
|
||||
intptr_t MemoryAllocator::GetCommitPageSize() {
|
||||
if (FLAG_v8_os_page_size != 0) {
|
||||
DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
|
||||
@ -1162,9 +1211,10 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
|
||||
DCHECK(IsAligned(start, page_size));
|
||||
DCHECK_EQ(0, commit_size % page_size);
|
||||
DCHECK_EQ(0, reserved_size % page_size);
|
||||
const size_t guard_size = CodePageGuardSize();
|
||||
const size_t pre_guard_offset = CodePageGuardStartOffset();
|
||||
const size_t code_area_offset = CodePageAreaStartOffset();
|
||||
const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
|
||||
const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
|
||||
const size_t code_area_offset =
|
||||
MemoryChunkLayout::ObjectStartOffsetInCodePage();
|
||||
// reserved_size includes two guard regions, commit_size does not.
|
||||
DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
|
||||
const Address pre_guard_page = start + pre_guard_offset;
|
||||
@ -1419,7 +1469,7 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
|
||||
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
|
||||
Executability executable)
|
||||
: SpaceWithLinearArea(heap, space), executable_(executable) {
|
||||
area_size_ = MemoryAllocator::PageAreaSize(space);
|
||||
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
|
||||
accounting_stats_.Clear();
|
||||
}
|
||||
|
||||
@ -2111,7 +2161,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
|
||||
actual_pages++;
|
||||
current_page =
|
||||
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
|
||||
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
|
||||
NOT_EXECUTABLE);
|
||||
if (current_page == nullptr) return false;
|
||||
DCHECK_NOT_NULL(current_page);
|
||||
memory_chunk_list_.PushBack(current_page);
|
||||
@ -2213,7 +2264,7 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
|
||||
|
||||
bool NewSpace::AddFreshPage() {
|
||||
Address top = allocation_info_.top();
|
||||
DCHECK(!Page::IsAtObjectStart(top));
|
||||
DCHECK(!OldSpace::IsAtPageStart(top));
|
||||
|
||||
// Do a step to account for memory allocated on previous page.
|
||||
InlineAllocationStep(top, top, kNullAddress, 0);
|
||||
@ -2273,7 +2324,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
|
||||
}
|
||||
|
||||
size_t LargeObjectSpace::Available() {
|
||||
return ObjectSizeFor(heap()->memory_allocator()->Available());
|
||||
// We return zero here since we cannot take advantage of already allocated
|
||||
// large object memory.
|
||||
return 0;
|
||||
}
|
||||
|
||||
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
|
||||
@ -2450,7 +2503,8 @@ bool SemiSpace::Commit() {
|
||||
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
|
||||
Page* new_page =
|
||||
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
|
||||
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
|
||||
NOT_EXECUTABLE);
|
||||
if (new_page == nullptr) {
|
||||
if (pages_added) RewindPages(pages_added);
|
||||
return false;
|
||||
@ -2507,7 +2561,8 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
|
||||
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
|
||||
Page* new_page =
|
||||
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
|
||||
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
|
||||
NOT_EXECUTABLE);
|
||||
if (new_page == nullptr) {
|
||||
if (pages_added) RewindPages(pages_added);
|
||||
return false;
|
||||
@ -3219,7 +3274,8 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
|
||||
|
||||
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
|
||||
const size_t page_size = MemoryAllocator::GetCommitPageSize();
|
||||
const size_t area_start_offset = RoundUp(Page::kObjectStartOffset, page_size);
|
||||
const size_t area_start_offset =
|
||||
RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage(), page_size);
|
||||
MemoryAllocator* memory_allocator = heap()->memory_allocator();
|
||||
for (Page* p : *this) {
|
||||
ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
|
||||
|
@ -45,6 +45,7 @@ class LinearAllocationArea;
|
||||
class LocalArrayBufferTracker;
|
||||
class MemoryAllocator;
|
||||
class MemoryChunk;
|
||||
class MemoryChunkLayout;
|
||||
class Page;
|
||||
class PagedSpace;
|
||||
class SemiSpace;
|
||||
@ -121,9 +122,6 @@ class Space;
|
||||
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
|
||||
DCHECK((0 < size) && (size <= code_space->AreaSize()))
|
||||
|
||||
#define DCHECK_PAGE_OFFSET(offset) \
|
||||
DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
|
||||
|
||||
enum FreeListCategoryType {
|
||||
kTiniest,
|
||||
kTiny,
|
||||
@ -239,6 +237,19 @@ class FreeListCategory {
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
|
||||
};
|
||||
|
||||
class MemoryChunkLayout {
|
||||
public:
|
||||
static size_t CodePageGuardStartOffset();
|
||||
static size_t CodePageGuardSize();
|
||||
static intptr_t ObjectStartOffsetInCodePage();
|
||||
static intptr_t ObjectEndOffsetInCodePage();
|
||||
static size_t AllocatableMemoryInCodePage();
|
||||
static intptr_t ObjectStartOffsetInDataPage();
|
||||
V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
|
||||
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
|
||||
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
|
||||
};
|
||||
|
||||
// MemoryChunk represents a memory region owned by a specific space.
|
||||
// It is divided into the header and the body. Chunk start is always
|
||||
// 1MB aligned. Start of the body is aligned so it can accommodate
|
||||
@ -349,7 +360,7 @@ class MemoryChunk {
|
||||
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kPointerSize;
|
||||
static const intptr_t kReservationOffset = kMarkBitmapOffset + kPointerSize;
|
||||
|
||||
static const size_t kMinHeaderSize =
|
||||
static const size_t kHeaderSize =
|
||||
kSizeOffset // NOLINT
|
||||
+ kSizetSize // size_t size
|
||||
+ kUIntptrSize // uintptr_t flags_
|
||||
@ -382,17 +393,9 @@ class MemoryChunk {
|
||||
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
|
||||
+ kPointerSize; // Bitmap* young_generation_bitmap_
|
||||
|
||||
static const size_t kHeaderSize = kMinHeaderSize;
|
||||
|
||||
// TODO(hpayer): Fix kObjectStartOffset and kAllocatableMemory for code pages.
|
||||
static const int kObjectStartOffset =
|
||||
kHeaderSize + (kPointerSize - kHeaderSize % kPointerSize);
|
||||
|
||||
// Page size in bytes. This must be a multiple of the OS page size.
|
||||
static const int kPageSize = 1 << kPageSizeBits;
|
||||
|
||||
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
|
||||
|
||||
// Maximum number of nested code memory modification scopes.
|
||||
// TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
|
||||
static const int kMaxWriteUnprotectCounter = 4;
|
||||
@ -748,10 +751,6 @@ class MemoryChunk {
|
||||
static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
|
||||
"sizeof(std::atomic<intptr_t>) == kPointerSize");
|
||||
|
||||
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
|
||||
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A page is a memory chunk of a size 512K. Large object pages may be larger.
|
||||
//
|
||||
@ -782,7 +781,7 @@ class Page : public MemoryChunk {
|
||||
// Returns the page containing the address provided. The address can
|
||||
// potentially point righter after the page. To be also safe for tagged values
|
||||
// we subtract a hole word. The valid address ranges from
|
||||
// [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
|
||||
// [page_addr + area_start_ .. page_addr + kPageSize + kPointerSize].
|
||||
static Page* FromAllocationAreaAddress(Address address) {
|
||||
return Page::FromAddress(address - kPointerSize);
|
||||
}
|
||||
@ -797,10 +796,6 @@ class Page : public MemoryChunk {
|
||||
return (addr & kPageAlignmentMask) == 0;
|
||||
}
|
||||
|
||||
static bool IsAtObjectStart(Address addr) {
|
||||
return (addr & kPageAlignmentMask) == kObjectStartOffset;
|
||||
}
|
||||
|
||||
static Page* ConvertNewToOld(Page* old_page);
|
||||
|
||||
inline void MarkNeverAllocateForTesting();
|
||||
@ -822,8 +817,10 @@ class Page : public MemoryChunk {
|
||||
|
||||
// Returns the address for a given offset to the this page.
|
||||
Address OffsetToAddress(size_t offset) {
|
||||
DCHECK_PAGE_OFFSET(offset);
|
||||
return address() + offset;
|
||||
Address address_in_page = address() + offset;
|
||||
DCHECK_GE(address_in_page, area_start_);
|
||||
DCHECK_LT(address_in_page, area_end_);
|
||||
return address_in_page;
|
||||
}
|
||||
|
||||
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
|
||||
@ -1269,24 +1266,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
kPooledAndQueue,
|
||||
};
|
||||
|
||||
static size_t CodePageGuardStartOffset();
|
||||
|
||||
static size_t CodePageGuardSize();
|
||||
|
||||
static size_t CodePageAreaStartOffset();
|
||||
|
||||
static size_t CodePageAreaEndOffset();
|
||||
|
||||
static size_t CodePageAreaSize() {
|
||||
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
|
||||
}
|
||||
|
||||
static size_t PageAreaSize(AllocationSpace space) {
|
||||
DCHECK_NE(LO_SPACE, space);
|
||||
return (space == CODE_SPACE) ? CodePageAreaSize()
|
||||
: Page::kAllocatableMemory;
|
||||
}
|
||||
|
||||
static intptr_t GetCommitPageSize();
|
||||
|
||||
// Computes the memory area of discardable memory within a given memory area
|
||||
@ -1325,11 +1304,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
return capacity_ < size ? 0 : capacity_ - size;
|
||||
}
|
||||
|
||||
// Returns maximum available bytes that the old space can have.
|
||||
size_t MaxAvailable() {
|
||||
return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
|
||||
}
|
||||
|
||||
// Returns an indication of whether a pointer is in a space that has
|
||||
// been allocated by this MemoryAllocator.
|
||||
V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
|
||||
@ -1910,7 +1884,10 @@ class V8_EXPORT_PRIVATE FreeList {
|
||||
|
||||
// The size range of blocks, in bytes.
|
||||
static const size_t kMinBlockSize = 3 * kPointerSize;
|
||||
static const size_t kMaxBlockSize = Page::kAllocatableMemory;
|
||||
|
||||
// This is a conservative upper bound. The actual maximum block size takes
|
||||
// padding and alignment of data and code pages into account.
|
||||
static const size_t kMaxBlockSize = Page::kPageSize;
|
||||
|
||||
static const size_t kTiniestListMax = 0xa * kPointerSize;
|
||||
static const size_t kTinyListMax = 0x1f * kPointerSize;
|
||||
@ -2606,7 +2583,8 @@ class NewSpace : public SpaceWithLinearArea {
|
||||
// Return the allocated bytes in the active semispace.
|
||||
size_t Size() override {
|
||||
DCHECK_GE(top(), to_space_.page_low());
|
||||
return to_space_.pages_used() * Page::kAllocatableMemory +
|
||||
return to_space_.pages_used() *
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage() +
|
||||
static_cast<size_t>(top() - to_space_.page_low());
|
||||
}
|
||||
|
||||
@ -2616,7 +2594,7 @@ class NewSpace : public SpaceWithLinearArea {
|
||||
size_t Capacity() {
|
||||
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
|
||||
return (to_space_.current_capacity() / Page::kPageSize) *
|
||||
Page::kAllocatableMemory;
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage();
|
||||
}
|
||||
|
||||
// Return the current size of a semispace, allocatable and non-allocatable
|
||||
@ -2671,7 +2649,7 @@ class NewSpace : public SpaceWithLinearArea {
|
||||
}
|
||||
while (current_page != last_page) {
|
||||
DCHECK_NE(current_page, age_mark_page);
|
||||
allocated += Page::kAllocatableMemory;
|
||||
allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
|
||||
current_page = current_page->next_page();
|
||||
}
|
||||
DCHECK_GE(top(), current_page->area_start());
|
||||
@ -2876,6 +2854,11 @@ class OldSpace : public PagedSpace {
|
||||
// Creates an old space object. The constructor does not allocate pages
|
||||
// from OS.
|
||||
explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
|
||||
|
||||
static bool IsAtPageStart(Address addr) {
|
||||
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
|
||||
MemoryChunkLayout::ObjectStartOffsetInDataPage();
|
||||
}
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
@ -2888,7 +2871,6 @@ class CodeSpace : public PagedSpace {
|
||||
explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
|
||||
};
|
||||
|
||||
|
||||
// For contiguous spaces, top should be in the space (or at the end) and limit
|
||||
// should be the end of the space.
|
||||
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
|
||||
@ -2956,9 +2938,7 @@ class ReadOnlySpace : public PagedSpace {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
|
||||
// managed by the large object space. A large object is allocated from OS
|
||||
// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
|
||||
// A large object always starts at Page::kObjectStartOffset to a page.
|
||||
// managed by the large object space.
|
||||
// Large objects do not move during garbage collections.
|
||||
|
||||
class LargeObjectSpace : public Space {
|
||||
@ -2973,11 +2953,6 @@ class LargeObjectSpace : public Space {
|
||||
// Releases internal resources, frees objects in this space.
|
||||
void TearDown();
|
||||
|
||||
static size_t ObjectSizeFor(size_t chunk_size) {
|
||||
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
|
||||
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
|
||||
}
|
||||
|
||||
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
|
||||
Executability executable);
|
||||
|
||||
|
@ -47,7 +47,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
|
||||
DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
|
||||
uint32_t builtin_size =
|
||||
deserializer()->ExtractCodeObjectSize(Builtins::kDeserializeLazy);
|
||||
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
|
||||
DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
|
||||
result.push_back({builtin_size, kNullAddress, kNullAddress});
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
|
||||
}
|
||||
|
||||
uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i);
|
||||
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
|
||||
DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
|
||||
result.push_back({builtin_size, kNullAddress, kNullAddress});
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
|
||||
|
||||
const uint32_t builtin_size =
|
||||
deserializer()->ExtractCodeObjectSize(builtin_id);
|
||||
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
|
||||
DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
|
||||
|
||||
Handle<HeapObject> o =
|
||||
isolate()->factory()->NewCodeForDeserialization(builtin_size);
|
||||
|
@ -26,7 +26,8 @@ void DefaultSerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
|
||||
|
||||
static uint32_t PageSizeOfSpace(int space) {
|
||||
return static_cast<uint32_t>(
|
||||
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
|
||||
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
|
||||
static_cast<AllocationSpace>(space)));
|
||||
}
|
||||
|
||||
uint32_t DefaultSerializerAllocator::TargetChunkSize(int space) {
|
||||
|
@ -39,10 +39,12 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
|
||||
Handle<FixedArray> array;
|
||||
int allocated = 0;
|
||||
do {
|
||||
if (allocated + kArraySize * 2 > MemoryChunk::kAllocatableMemory) {
|
||||
if (allocated + kArraySize * 2 >
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) {
|
||||
int size =
|
||||
kArraySize * 2 -
|
||||
((allocated + kArraySize * 2) - MemoryChunk::kAllocatableMemory) -
|
||||
((allocated + kArraySize * 2) -
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) -
|
||||
remainder;
|
||||
int last_array_len = heap::FixedArrayLenFromSize(size);
|
||||
array = isolate->factory()->NewFixedArray(last_array_len, TENURED);
|
||||
@ -59,7 +61,8 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
|
||||
Page::FromAddress(array->address())->area_start());
|
||||
}
|
||||
handles.push_back(array);
|
||||
} while (allocated < MemoryChunk::kAllocatableMemory);
|
||||
} while (allocated <
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()));
|
||||
return handles;
|
||||
}
|
||||
|
||||
|
@ -59,8 +59,10 @@ HEAP_TEST(CompactionFullAbortedPage) {
|
||||
{
|
||||
HandleScope scope2(isolate);
|
||||
CHECK(heap->old_space()->Expand());
|
||||
auto compaction_page_handles =
|
||||
heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED);
|
||||
auto compaction_page_handles = heap::CreatePadding(
|
||||
heap,
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
|
||||
TENURED);
|
||||
Page* to_be_aborted_page =
|
||||
Page::FromAddress(compaction_page_handles.front()->address());
|
||||
to_be_aborted_page->SetFlag(
|
||||
@ -93,7 +95,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
|
||||
const int objects_per_page = 10;
|
||||
const int object_size = Page::kAllocatableMemory / objects_per_page;
|
||||
const int object_size =
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
|
||||
objects_per_page;
|
||||
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
@ -109,7 +113,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
|
||||
// properly adjusted).
|
||||
CHECK(heap->old_space()->Expand());
|
||||
auto compaction_page_handles = heap::CreatePadding(
|
||||
heap, Page::kAllocatableMemory, TENURED, object_size);
|
||||
heap,
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
|
||||
TENURED, object_size);
|
||||
Page* to_be_aborted_page =
|
||||
Page::FromAddress(compaction_page_handles.front()->address());
|
||||
to_be_aborted_page->SetFlag(
|
||||
@ -168,7 +174,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
|
||||
const int objects_per_page = 10;
|
||||
const int object_size = Page::kAllocatableMemory / objects_per_page;
|
||||
const int object_size =
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
|
||||
objects_per_page;
|
||||
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
@ -187,8 +195,11 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
|
||||
// properly adjusted).
|
||||
CHECK(heap->old_space()->Expand());
|
||||
std::vector<Handle<FixedArray>> compaction_page_handles =
|
||||
heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED,
|
||||
object_size);
|
||||
heap::CreatePadding(
|
||||
heap,
|
||||
static_cast<int>(
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage()),
|
||||
TENURED, object_size);
|
||||
to_be_aborted_page =
|
||||
Page::FromAddress(compaction_page_handles.front()->address());
|
||||
to_be_aborted_page->SetFlag(
|
||||
@ -257,7 +268,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
|
||||
const int objects_per_page = 10;
|
||||
const int object_size = Page::kAllocatableMemory / objects_per_page;
|
||||
const int object_size =
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
|
||||
objects_per_page;
|
||||
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
@ -275,7 +288,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
|
||||
// properly adjusted).
|
||||
CHECK(heap->old_space()->Expand());
|
||||
auto compaction_page_handles = heap::CreatePadding(
|
||||
heap, Page::kAllocatableMemory, TENURED, object_size);
|
||||
heap,
|
||||
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
|
||||
TENURED, object_size);
|
||||
// Sanity check that we have enough space for linking up arrays.
|
||||
CHECK_GE(compaction_page_handles.front()->length(), 2);
|
||||
to_be_aborted_page =
|
||||
|
@ -4597,7 +4597,8 @@ TEST(Regress388880) {
|
||||
// Allocate padding objects in old pointer space so, that object allocated
|
||||
// afterwards would end at the end of the page.
|
||||
heap::SimulateFullSpace(heap->old_space());
|
||||
size_t padding_size = desired_offset - Page::kObjectStartOffset;
|
||||
size_t padding_size =
|
||||
desired_offset - MemoryChunkLayout::ObjectStartOffsetInDataPage();
|
||||
heap::CreatePadding(heap, static_cast<int>(padding_size), TENURED);
|
||||
|
||||
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
|
||||
@ -6132,7 +6133,8 @@ size_t NearHeapLimitCallback(void* raw_state, size_t current_heap_limit,
|
||||
|
||||
size_t MemoryAllocatorSizeFromHeapCapacity(size_t capacity) {
|
||||
// Size to capacity factor.
|
||||
double factor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
|
||||
double factor =
|
||||
Page::kPageSize * 1.0 / MemoryChunkLayout::AllocatableMemoryInDataPage();
|
||||
// Some tables (e.g. deoptimization table) are allocated directly with the
|
||||
// memory allocator. Allow some slack to account for them.
|
||||
size_t slack = 5 * MB;
|
||||
|
@ -76,8 +76,9 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
|
||||
// To perform a sanity check on live bytes we need to mark the heap.
|
||||
heap::SimulateIncrementalMarking(heap, true);
|
||||
// Sanity check that the page meets the requirements for promotion.
|
||||
const int threshold_bytes =
|
||||
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
|
||||
const int threshold_bytes = static_cast<int>(
|
||||
FLAG_page_promotion_threshold *
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage() / 100);
|
||||
CHECK_GE(heap->incremental_marking()->marking_state()->live_bytes(
|
||||
to_be_promoted_page),
|
||||
threshold_bytes);
|
||||
|
@ -98,19 +98,19 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
|
||||
v8::PageAllocator* page_allocator =
|
||||
memory_allocator->page_allocator(executable);
|
||||
|
||||
size_t header_size = (executable == EXECUTABLE)
|
||||
? MemoryAllocator::CodePageGuardStartOffset()
|
||||
: MemoryChunk::kObjectStartOffset;
|
||||
size_t allocatable_memory_area_offset =
|
||||
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
|
||||
size_t guard_size =
|
||||
(executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
|
||||
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
|
||||
|
||||
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
|
||||
reserve_area_size, commit_area_size, executable, space);
|
||||
size_t reserved_size =
|
||||
((executable == EXECUTABLE))
|
||||
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
|
||||
page_allocator->CommitPageSize())
|
||||
: RoundUp(header_size + reserve_area_size,
|
||||
? allocatable_memory_area_offset +
|
||||
RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
|
||||
guard_size
|
||||
: RoundUp(allocatable_memory_area_offset + reserve_area_size,
|
||||
page_allocator->CommitPageSize());
|
||||
CHECK(memory_chunk->size() == reserved_size);
|
||||
CHECK(memory_chunk->area_start() <
|
||||
@ -319,13 +319,9 @@ TEST(LargeObjectSpace) {
|
||||
CHECK(lo->Contains(ho));
|
||||
|
||||
while (true) {
|
||||
size_t available = lo->Available();
|
||||
{ AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
|
||||
if (allocation.IsRetry()) break;
|
||||
}
|
||||
// The available value is conservative such that it may report
|
||||
// zero prior to heap exhaustion.
|
||||
CHECK(lo->Available() < available || available == 0);
|
||||
}
|
||||
|
||||
CHECK(!lo->IsEmpty());
|
||||
@ -670,9 +666,10 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
|
||||
HeapObject::FromAddress(array->address() + array->Size());
|
||||
CHECK(filler->IsFreeSpace());
|
||||
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
|
||||
size_t should_have_shrunk =
|
||||
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
|
||||
CommitPageSize());
|
||||
size_t should_have_shrunk = RoundDown(
|
||||
static_cast<size_t>(MemoryChunkLayout::AllocatableMemoryInDataPage() -
|
||||
array->Size()),
|
||||
CommitPageSize());
|
||||
CHECK_EQ(should_have_shrunk, shrunk);
|
||||
}
|
||||
|
||||
|
@ -19322,6 +19322,8 @@ TEST(GetHeapSpaceStatistics) {
|
||||
CHECK_GT(space_statistics.physical_space_size(), 0u);
|
||||
total_physical_size += space_statistics.physical_space_size();
|
||||
}
|
||||
total_available_size += CcTest::heap()->memory_allocator()->Available();
|
||||
|
||||
CHECK_EQ(total_size, heap_statistics.total_heap_size());
|
||||
CHECK_EQ(total_used_size, heap_statistics.used_heap_size());
|
||||
CHECK_EQ(total_available_size, heap_statistics.total_available_size());
|
||||
|
@ -48,10 +48,10 @@ bool SequentialUnmapperTest::old_flag_;
|
||||
|
||||
// See v8:5945.
|
||||
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
|
||||
Page* page =
|
||||
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
Page* page = allocator()->AllocatePage(
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(),
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
EXPECT_NE(nullptr, page);
|
||||
const int page_size = getpagesize();
|
||||
void* start_address = reinterpret_cast<void*>(page->address());
|
||||
@ -66,10 +66,10 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
|
||||
|
||||
// See v8:5945.
|
||||
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
|
||||
Page* page =
|
||||
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
Page* page = allocator()->AllocatePage(
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(),
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
EXPECT_NE(nullptr, page);
|
||||
const int page_size = getpagesize();
|
||||
void* start_address = reinterpret_cast<void*>(page->address());
|
||||
|
Loading…
Reference in New Issue
Block a user