Use MemoryChunk-based allocation for deoptimization entry code
This is done by first committing the deoptimization entry code with a minimal area size (OS::CommitPageSize) and later using CommitArea to adjust the size. Review URL: https://codereview.chromium.org/11566011 Patch from Haitao Feng <haitao.feng@intel.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13494 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
3c52343738
commit
de17ce7701
@ -44,8 +44,18 @@ DeoptimizerData::DeoptimizerData() {
|
||||
eager_deoptimization_entry_code_entries_ = -1;
|
||||
lazy_deoptimization_entry_code_entries_ = -1;
|
||||
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
|
||||
eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
|
||||
lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
|
||||
MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
|
||||
size_t initial_commit_size = OS::CommitPageSize();
|
||||
eager_deoptimization_entry_code_ =
|
||||
allocator->AllocateChunk(deopt_table_size,
|
||||
initial_commit_size,
|
||||
EXECUTABLE,
|
||||
NULL);
|
||||
lazy_deoptimization_entry_code_ =
|
||||
allocator->AllocateChunk(deopt_table_size,
|
||||
initial_commit_size,
|
||||
EXECUTABLE,
|
||||
NULL);
|
||||
current_ = NULL;
|
||||
deoptimizing_code_list_ = NULL;
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
@ -55,9 +65,11 @@ DeoptimizerData::DeoptimizerData() {
|
||||
|
||||
|
||||
DeoptimizerData::~DeoptimizerData() {
|
||||
delete eager_deoptimization_entry_code_;
|
||||
Isolate::Current()->memory_allocator()->Free(
|
||||
eager_deoptimization_entry_code_);
|
||||
eager_deoptimization_entry_code_ = NULL;
|
||||
delete lazy_deoptimization_entry_code_;
|
||||
Isolate::Current()->memory_allocator()->Free(
|
||||
lazy_deoptimization_entry_code_);
|
||||
lazy_deoptimization_entry_code_ = NULL;
|
||||
|
||||
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
|
||||
@ -617,7 +629,7 @@ Address Deoptimizer::GetDeoptimizationEntry(int id,
|
||||
GetEntryMode mode) {
|
||||
ASSERT(id >= 0);
|
||||
if (id >= kMaxNumberOfEntries) return NULL;
|
||||
VirtualMemory* base = NULL;
|
||||
MemoryChunk* base = NULL;
|
||||
if (mode == ENSURE_ENTRY_CODE) {
|
||||
EnsureCodeForDeoptimizationEntry(type, id);
|
||||
} else {
|
||||
@ -629,28 +641,27 @@ Address Deoptimizer::GetDeoptimizationEntry(int id,
|
||||
} else {
|
||||
base = data->lazy_deoptimization_entry_code_;
|
||||
}
|
||||
return
|
||||
static_cast<Address>(base->address()) + (id * table_entry_size_);
|
||||
return base->area_start() + (id * table_entry_size_);
|
||||
}
|
||||
|
||||
|
||||
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
|
||||
VirtualMemory* base = NULL;
|
||||
MemoryChunk* base = NULL;
|
||||
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
|
||||
if (type == EAGER) {
|
||||
base = data->eager_deoptimization_entry_code_;
|
||||
} else {
|
||||
base = data->lazy_deoptimization_entry_code_;
|
||||
}
|
||||
Address base_casted = reinterpret_cast<Address>(base->address());
|
||||
Address start = base->area_start();
|
||||
if (base == NULL ||
|
||||
addr < base->address() ||
|
||||
addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
|
||||
addr < start ||
|
||||
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
|
||||
return kNotDeoptimizationEntry;
|
||||
}
|
||||
ASSERT_EQ(0,
|
||||
static_cast<int>(addr - base_casted) % table_entry_size_);
|
||||
return static_cast<int>(addr - base_casted) / table_entry_size_;
|
||||
static_cast<int>(addr - start) % table_entry_size_);
|
||||
return static_cast<int>(addr - start) / table_entry_size_;
|
||||
}
|
||||
|
||||
|
||||
@ -1569,14 +1580,14 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
|
||||
CodeDesc desc;
|
||||
masm.GetCode(&desc);
|
||||
|
||||
VirtualMemory* memory = type == EAGER
|
||||
MemoryChunk* chunk = type == EAGER
|
||||
? data->eager_deoptimization_entry_code_
|
||||
: data->lazy_deoptimization_entry_code_;
|
||||
size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
|
||||
ASSERT(static_cast<int>(table_size) >= desc.instr_size);
|
||||
memory->Commit(memory->address(), table_size, true);
|
||||
memcpy(memory->address(), desc.buffer, desc.instr_size);
|
||||
CPU::FlushICache(memory->address(), desc.instr_size);
|
||||
ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
|
||||
desc.instr_size);
|
||||
chunk->CommitArea(desc.instr_size);
|
||||
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
|
||||
CPU::FlushICache(chunk->area_start(), desc.instr_size);
|
||||
|
||||
if (type == EAGER) {
|
||||
data->eager_deoptimization_entry_code_entries_ = entry_count;
|
||||
|
@ -113,8 +113,8 @@ class DeoptimizerData {
|
||||
private:
|
||||
int eager_deoptimization_entry_code_entries_;
|
||||
int lazy_deoptimization_entry_code_entries_;
|
||||
VirtualMemory* eager_deoptimization_entry_code_;
|
||||
VirtualMemory* lazy_deoptimization_entry_code_;
|
||||
MemoryChunk* eager_deoptimization_entry_code_;
|
||||
MemoryChunk* lazy_deoptimization_entry_code_;
|
||||
Deoptimizer* current_;
|
||||
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
|
@ -1311,6 +1311,7 @@ class Isolate {
|
||||
friend class StackGuard;
|
||||
friend class ThreadId;
|
||||
friend class TestMemoryAllocatorScope;
|
||||
friend class TestCodeRangeScope;
|
||||
friend class v8::Isolate;
|
||||
friend class v8::Locker;
|
||||
friend class v8::Unlocker;
|
||||
|
173
src/spaces.cc
173
src/spaces.cc
@ -206,17 +206,18 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
|
||||
}
|
||||
|
||||
|
||||
|
||||
Address CodeRange::AllocateRawMemory(const size_t requested,
|
||||
Address CodeRange::AllocateRawMemory(const size_t requested_size,
|
||||
const size_t commit_size,
|
||||
size_t* allocated) {
|
||||
ASSERT(commit_size <= requested_size);
|
||||
ASSERT(current_allocation_block_index_ < allocation_list_.length());
|
||||
if (requested > allocation_list_[current_allocation_block_index_].size) {
|
||||
if (requested_size > allocation_list_[current_allocation_block_index_].size) {
|
||||
// Find an allocation block large enough. This function call may
|
||||
// call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
|
||||
GetNextAllocationBlock(requested);
|
||||
GetNextAllocationBlock(requested_size);
|
||||
}
|
||||
// Commit the requested memory at the start of the current allocation block.
|
||||
size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
|
||||
size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
|
||||
FreeBlock current = allocation_list_[current_allocation_block_index_];
|
||||
if (aligned_requested >= (current.size - Page::kPageSize)) {
|
||||
// Don't leave a small free block, useless for a large object or chunk.
|
||||
@ -226,9 +227,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
|
||||
}
|
||||
ASSERT(*allocated <= current.size);
|
||||
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
|
||||
if (!MemoryAllocator::CommitCodePage(code_range_,
|
||||
current.start,
|
||||
*allocated)) {
|
||||
if (!MemoryAllocator::CommitExecutableMemory(code_range_,
|
||||
current.start,
|
||||
commit_size,
|
||||
*allocated)) {
|
||||
*allocated = 0;
|
||||
return NULL;
|
||||
}
|
||||
@ -241,6 +243,16 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
|
||||
}
|
||||
|
||||
|
||||
bool CodeRange::CommitRawMemory(Address start, size_t length) {
|
||||
return code_range_->Commit(start, length, true);
|
||||
}
|
||||
|
||||
|
||||
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
|
||||
return code_range_->Uncommit(start, length);
|
||||
}
|
||||
|
||||
|
||||
void CodeRange::FreeRawMemory(Address address, size_t length) {
|
||||
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
|
||||
free_list_.Add(FreeBlock(address, length));
|
||||
@ -345,27 +357,31 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
|
||||
|
||||
if (!reservation.IsReserved()) return NULL;
|
||||
size_ += reservation.size();
|
||||
Address base = RoundUp(static_cast<Address>(reservation.address()),
|
||||
alignment);
|
||||
Address base = static_cast<Address>(reservation.address());
|
||||
controller->TakeControl(&reservation);
|
||||
return base;
|
||||
}
|
||||
|
||||
|
||||
Address MemoryAllocator::AllocateAlignedMemory(size_t size,
|
||||
Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
|
||||
size_t commit_size,
|
||||
size_t alignment,
|
||||
Executability executable,
|
||||
VirtualMemory* controller) {
|
||||
ASSERT(commit_size <= reserve_size);
|
||||
VirtualMemory reservation;
|
||||
Address base = ReserveAlignedMemory(size, alignment, &reservation);
|
||||
Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
|
||||
if (base == NULL) return NULL;
|
||||
|
||||
if (executable == EXECUTABLE) {
|
||||
if (!CommitCodePage(&reservation, base, size)) {
|
||||
if (!CommitExecutableMemory(&reservation,
|
||||
base,
|
||||
commit_size,
|
||||
reserve_size)) {
|
||||
base = NULL;
|
||||
}
|
||||
} else {
|
||||
if (!reservation.Commit(base, size, false)) {
|
||||
if (!reservation.Commit(base, commit_size, false)) {
|
||||
base = NULL;
|
||||
}
|
||||
}
|
||||
@ -469,6 +485,53 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
|
||||
}
|
||||
|
||||
|
||||
// Commit MemoryChunk area to the requested size.
|
||||
bool MemoryChunk::CommitArea(size_t requested) {
|
||||
size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
|
||||
MemoryAllocator::CodePageGuardSize() : 0;
|
||||
size_t header_size = area_start() - address() - guard_size;
|
||||
size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
|
||||
size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
|
||||
OS::CommitPageSize());
|
||||
|
||||
if (commit_size > committed_size) {
|
||||
// Commit size should be less or equal than the reserved size.
|
||||
ASSERT(commit_size <= size() - 2 * guard_size);
|
||||
// Append the committed area.
|
||||
Address start = address() + committed_size + guard_size;
|
||||
size_t length = commit_size - committed_size;
|
||||
if (reservation_.IsReserved()) {
|
||||
if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
CodeRange* code_range = heap_->isolate()->code_range();
|
||||
ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
|
||||
if (!code_range->CommitRawMemory(start, length)) return false;
|
||||
}
|
||||
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
heap_->isolate()->memory_allocator()->ZapBlock(start, length);
|
||||
}
|
||||
} else if (commit_size < committed_size) {
|
||||
ASSERT(commit_size > 0);
|
||||
// Shrink the committed area.
|
||||
size_t length = committed_size - commit_size;
|
||||
Address start = address() + committed_size + guard_size - length;
|
||||
if (reservation_.IsReserved()) {
|
||||
if (!reservation_.Uncommit(start, length)) return false;
|
||||
} else {
|
||||
CodeRange* code_range = heap_->isolate()->code_range();
|
||||
ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
|
||||
if (!code_range->UncommitRawMemory(start, length)) return false;
|
||||
}
|
||||
}
|
||||
|
||||
area_end_ = area_start_ + requested;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void MemoryChunk::InsertAfter(MemoryChunk* other) {
|
||||
next_chunk_ = other->next_chunk_;
|
||||
prev_chunk_ = other;
|
||||
@ -489,9 +552,12 @@ void MemoryChunk::Unlink() {
|
||||
}
|
||||
|
||||
|
||||
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
|
||||
intptr_t commit_area_size,
|
||||
Executability executable,
|
||||
Space* owner) {
|
||||
ASSERT(commit_area_size <= reserve_area_size);
|
||||
|
||||
size_t chunk_size;
|
||||
Heap* heap = isolate_->heap();
|
||||
Address base = NULL;
|
||||
@ -499,8 +565,38 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
Address area_start = NULL;
|
||||
Address area_end = NULL;
|
||||
|
||||
//
|
||||
// MemoryChunk layout:
|
||||
//
|
||||
// Executable
|
||||
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
|
||||
// | Header |
|
||||
// +----------------------------+<- base + CodePageGuardStartOffset
|
||||
// | Guard |
|
||||
// +----------------------------+<- area_start_
|
||||
// | Area |
|
||||
// +----------------------------+<- area_end_ (area_start + commit_area_size)
|
||||
// | Committed but not used |
|
||||
// +----------------------------+<- aligned at OS page boundary
|
||||
// | Reserved but not committed |
|
||||
// +----------------------------+<- aligned at OS page boundary
|
||||
// | Guard |
|
||||
// +----------------------------+<- base + chunk_size
|
||||
//
|
||||
// Non-executable
|
||||
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
|
||||
// | Header |
|
||||
// +----------------------------+<- area_start_ (base + kObjectStartOffset)
|
||||
// | Area |
|
||||
// +----------------------------+<- area_end_ (area_start + commit_area_size)
|
||||
// | Committed but not used |
|
||||
// +----------------------------+<- aligned at OS page boundary
|
||||
// | Reserved but not committed |
|
||||
// +----------------------------+<- base + chunk_size
|
||||
//
|
||||
|
||||
if (executable == EXECUTABLE) {
|
||||
chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
|
||||
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
|
||||
OS::CommitPageSize()) + CodePageGuardSize();
|
||||
|
||||
// Check executable memory limit.
|
||||
@ -511,10 +607,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Size of header (not executable) plus area (executable).
|
||||
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
|
||||
OS::CommitPageSize());
|
||||
// Allocate executable memory either from code range or from the
|
||||
// OS.
|
||||
if (isolate_->code_range()->exists()) {
|
||||
base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
|
||||
base = isolate_->code_range()->AllocateRawMemory(chunk_size,
|
||||
commit_size,
|
||||
&chunk_size);
|
||||
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
|
||||
MemoryChunk::kAlignment));
|
||||
if (base == NULL) return NULL;
|
||||
@ -523,6 +624,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
size_executable_ += chunk_size;
|
||||
} else {
|
||||
base = AllocateAlignedMemory(chunk_size,
|
||||
commit_size,
|
||||
MemoryChunk::kAlignment,
|
||||
executable,
|
||||
&reservation);
|
||||
@ -533,14 +635,18 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(base, CodePageGuardStartOffset());
|
||||
ZapBlock(base + CodePageAreaStartOffset(), body_size);
|
||||
ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
|
||||
}
|
||||
|
||||
area_start = base + CodePageAreaStartOffset();
|
||||
area_end = area_start + body_size;
|
||||
area_end = area_start + commit_area_size;
|
||||
} else {
|
||||
chunk_size = MemoryChunk::kObjectStartOffset + body_size;
|
||||
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
|
||||
OS::CommitPageSize());
|
||||
size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
|
||||
commit_area_size, OS::CommitPageSize());
|
||||
base = AllocateAlignedMemory(chunk_size,
|
||||
commit_size,
|
||||
MemoryChunk::kAlignment,
|
||||
executable,
|
||||
&reservation);
|
||||
@ -548,13 +654,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
if (base == NULL) return NULL;
|
||||
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(base, chunk_size);
|
||||
ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
|
||||
}
|
||||
|
||||
area_start = base + Page::kObjectStartOffset;
|
||||
area_end = base + chunk_size;
|
||||
area_end = area_start + commit_area_size;
|
||||
}
|
||||
|
||||
// Use chunk_size for statistics and callbacks because we assume that they
|
||||
// treat reserved but not-yet committed memory regions of chunks as allocated.
|
||||
isolate_->counters()->memory_allocated()->
|
||||
Increment(static_cast<int>(chunk_size));
|
||||
|
||||
@ -579,7 +687,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
Page* MemoryAllocator::AllocatePage(intptr_t size,
|
||||
PagedSpace* owner,
|
||||
Executability executable) {
|
||||
MemoryChunk* chunk = AllocateChunk(size, executable, owner);
|
||||
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
|
||||
|
||||
if (chunk == NULL) return NULL;
|
||||
|
||||
@ -590,7 +698,10 @@ Page* MemoryAllocator::AllocatePage(intptr_t size,
|
||||
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
|
||||
Space* owner,
|
||||
Executability executable) {
|
||||
MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
|
||||
MemoryChunk* chunk = AllocateChunk(object_size,
|
||||
object_size,
|
||||
executable,
|
||||
owner);
|
||||
if (chunk == NULL) return NULL;
|
||||
return LargePage::Initialize(isolate_->heap(), chunk);
|
||||
}
|
||||
@ -732,9 +843,10 @@ int MemoryAllocator::CodePageAreaEndOffset() {
|
||||
}
|
||||
|
||||
|
||||
bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
|
||||
Address start,
|
||||
size_t size) {
|
||||
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
|
||||
Address start,
|
||||
size_t commit_size,
|
||||
size_t reserved_size) {
|
||||
// Commit page header (not executable).
|
||||
if (!vm->Commit(start,
|
||||
CodePageGuardStartOffset(),
|
||||
@ -748,15 +860,14 @@ bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
|
||||
}
|
||||
|
||||
// Commit page body (executable).
|
||||
size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
|
||||
if (!vm->Commit(start + CodePageAreaStartOffset(),
|
||||
area_size,
|
||||
commit_size - CodePageGuardStartOffset(),
|
||||
true)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Create guard page after the allocatable area.
|
||||
if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
|
||||
// Create guard page before the end.
|
||||
if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
22
src/spaces.h
22
src/spaces.h
@ -645,6 +645,7 @@ class MemoryChunk {
|
||||
int area_size() {
|
||||
return static_cast<int>(area_end() - area_start());
|
||||
}
|
||||
bool CommitArea(size_t requested);
|
||||
|
||||
// Approximate amount of physical memory committed for this chunk.
|
||||
size_t CommittedPhysicalMemory() {
|
||||
@ -887,8 +888,11 @@ class CodeRange {
|
||||
// Allocates a chunk of memory from the large-object portion of
|
||||
// the code range. On platforms with no separate code range, should
|
||||
// not be called.
|
||||
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
|
||||
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
|
||||
const size_t commit_size,
|
||||
size_t* allocated);
|
||||
bool CommitRawMemory(Address start, size_t length);
|
||||
bool UncommitRawMemory(Address start, size_t length);
|
||||
void FreeRawMemory(Address buf, size_t length);
|
||||
|
||||
private:
|
||||
@ -1036,14 +1040,19 @@ class MemoryAllocator {
|
||||
void ReportStatistics();
|
||||
#endif
|
||||
|
||||
MemoryChunk* AllocateChunk(intptr_t body_size,
|
||||
// Returns a MemoryChunk in which the memory region from commit_area_size to
|
||||
// reserve_area_size of the chunk area is reserved but not committed, it
|
||||
// could be committed later by calling MemoryChunk::CommitArea.
|
||||
MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
|
||||
intptr_t commit_area_size,
|
||||
Executability executable,
|
||||
Space* space);
|
||||
|
||||
Address ReserveAlignedMemory(size_t requested,
|
||||
size_t alignment,
|
||||
VirtualMemory* controller);
|
||||
Address AllocateAlignedMemory(size_t requested,
|
||||
Address AllocateAlignedMemory(size_t reserve_size,
|
||||
size_t commit_size,
|
||||
size_t alignment,
|
||||
Executability executable,
|
||||
VirtualMemory* controller);
|
||||
@ -1093,9 +1102,10 @@ class MemoryAllocator {
|
||||
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
|
||||
}
|
||||
|
||||
MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm,
|
||||
Address start,
|
||||
size_t size);
|
||||
MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
|
||||
Address start,
|
||||
size_t commit_size,
|
||||
size_t reserved_size);
|
||||
|
||||
private:
|
||||
Isolate* isolate_;
|
||||
|
@ -204,7 +204,9 @@ TEST(CodeRange) {
|
||||
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
|
||||
Pseudorandom() % 5000 + 1;
|
||||
size_t allocated = 0;
|
||||
Address base = code_range->AllocateRawMemory(requested, &allocated);
|
||||
Address base = code_range->AllocateRawMemory(requested,
|
||||
requested,
|
||||
&allocated);
|
||||
CHECK(base != NULL);
|
||||
blocks.Add(Block(base, static_cast<int>(allocated)));
|
||||
current_allocated += static_cast<int>(allocated);
|
||||
|
@ -121,9 +121,148 @@ class TestMemoryAllocatorScope {
|
||||
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
|
||||
};
|
||||
|
||||
|
||||
// Temporarily sets a given code range in an isolate.
|
||||
class TestCodeRangeScope {
|
||||
public:
|
||||
TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
|
||||
: isolate_(isolate),
|
||||
old_code_range_(isolate->code_range_) {
|
||||
isolate->code_range_ = code_range;
|
||||
}
|
||||
|
||||
~TestCodeRangeScope() {
|
||||
isolate_->code_range_ = old_code_range_;
|
||||
}
|
||||
|
||||
private:
|
||||
Isolate* isolate_;
|
||||
CodeRange* old_code_range_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
|
||||
static void VerifyMemoryChunk(Isolate* isolate,
|
||||
Heap* heap,
|
||||
CodeRange* code_range,
|
||||
size_t reserve_area_size,
|
||||
size_t commit_area_size,
|
||||
size_t second_commit_area_size,
|
||||
Executability executable) {
|
||||
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
|
||||
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
|
||||
heap->MaxExecutableSize()));
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
|
||||
TestCodeRangeScope test_code_range_scope(isolate, code_range);
|
||||
|
||||
size_t header_size = (executable == EXECUTABLE)
|
||||
? MemoryAllocator::CodePageGuardStartOffset()
|
||||
: MemoryChunk::kObjectStartOffset;
|
||||
size_t guard_size = (executable == EXECUTABLE)
|
||||
? MemoryAllocator::CodePageGuardSize()
|
||||
: 0;
|
||||
|
||||
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
|
||||
commit_area_size,
|
||||
executable,
|
||||
NULL);
|
||||
size_t alignment = code_range->exists() ?
|
||||
MemoryChunk::kAlignment : OS::CommitPageSize();
|
||||
size_t reserved_size = ((executable == EXECUTABLE))
|
||||
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
|
||||
alignment)
|
||||
: RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
|
||||
CHECK(memory_chunk->size() == reserved_size);
|
||||
CHECK(memory_chunk->area_start() < memory_chunk->address() +
|
||||
memory_chunk->size());
|
||||
CHECK(memory_chunk->area_end() <= memory_chunk->address() +
|
||||
memory_chunk->size());
|
||||
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
|
||||
|
||||
Address area_start = memory_chunk->area_start();
|
||||
|
||||
memory_chunk->CommitArea(second_commit_area_size);
|
||||
CHECK(area_start == memory_chunk->area_start());
|
||||
CHECK(memory_chunk->area_start() < memory_chunk->address() +
|
||||
memory_chunk->size());
|
||||
CHECK(memory_chunk->area_end() <= memory_chunk->address() +
|
||||
memory_chunk->size());
|
||||
CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
|
||||
second_commit_area_size);
|
||||
|
||||
memory_allocator->Free(memory_chunk);
|
||||
memory_allocator->TearDown();
|
||||
delete memory_allocator;
|
||||
}
|
||||
|
||||
|
||||
static unsigned int Pseudorandom() {
|
||||
static uint32_t lo = 2345;
|
||||
lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
|
||||
return lo & 0xFFFFF;
|
||||
}
|
||||
|
||||
|
||||
TEST(MemoryChunk) {
|
||||
OS::SetUp();
|
||||
Isolate* isolate = Isolate::Current();
|
||||
isolate->InitializeLoggingAndCounters();
|
||||
Heap* heap = isolate->heap();
|
||||
CHECK(heap->ConfigureHeapDefault());
|
||||
|
||||
size_t reserve_area_size = 1 * MB;
|
||||
size_t initial_commit_area_size, second_commit_area_size;
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
initial_commit_area_size = Pseudorandom();
|
||||
second_commit_area_size = Pseudorandom();
|
||||
|
||||
// With CodeRange.
|
||||
CodeRange* code_range = new CodeRange(isolate);
|
||||
const int code_range_size = 32 * MB;
|
||||
if (!code_range->SetUp(code_range_size)) return;
|
||||
|
||||
VerifyMemoryChunk(isolate,
|
||||
heap,
|
||||
code_range,
|
||||
reserve_area_size,
|
||||
initial_commit_area_size,
|
||||
second_commit_area_size,
|
||||
EXECUTABLE);
|
||||
|
||||
VerifyMemoryChunk(isolate,
|
||||
heap,
|
||||
code_range,
|
||||
reserve_area_size,
|
||||
initial_commit_area_size,
|
||||
second_commit_area_size,
|
||||
NOT_EXECUTABLE);
|
||||
delete code_range;
|
||||
|
||||
// Without CodeRange.
|
||||
code_range = NULL;
|
||||
VerifyMemoryChunk(isolate,
|
||||
heap,
|
||||
code_range,
|
||||
reserve_area_size,
|
||||
initial_commit_area_size,
|
||||
second_commit_area_size,
|
||||
EXECUTABLE);
|
||||
|
||||
VerifyMemoryChunk(isolate,
|
||||
heap,
|
||||
code_range,
|
||||
reserve_area_size,
|
||||
initial_commit_area_size,
|
||||
second_commit_area_size,
|
||||
NOT_EXECUTABLE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(MemoryAllocator) {
|
||||
OS::SetUp();
|
||||
Isolate* isolate = Isolate::Current();
|
||||
|
Loading…
Reference in New Issue
Block a user