Reserve code range block for evacuation.

If we run out of code range, then GC wouldn't be able to compact code space,
because it wouldn't be able to allocate a new page. This can cause code space
fragmentation and OOM crashes.

BUG=chromium:430118
LOG=Y

Review URL: https://codereview.chromium.org/742733002

Cr-Commit-Position: refs/heads/master@{#25441}
This commit is contained in:
ulan 2014-11-20 06:52:23 -08:00 committed by Commit bot
parent b11825fe94
commit d703e87531
4 changed files with 78 additions and 23 deletions

View File

@ -4179,6 +4179,10 @@ void MarkCompactCollector::SweepSpaces() {
// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
CodeRange* code_range = heap()->isolate()->code_range();
if (code_range != NULL && code_range->valid()) {
code_range->ReserveEmergencyBlock();
}
if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -

View File

@ -93,7 +93,8 @@ CodeRange::CodeRange(Isolate* isolate)
code_range_(NULL),
free_list_(0),
allocation_list_(0),
current_allocation_block_index_(0) {}
current_allocation_block_index_(0),
emergency_block_() {}
bool CodeRange::SetUp(size_t requested) {
@ -144,6 +145,7 @@ bool CodeRange::SetUp(size_t requested) {
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
ReserveEmergencyBlock();
return true;
}
@ -202,35 +204,20 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
size_t* allocated) {
DCHECK(commit_size <= requested_size);
DCHECK(allocation_list_.length() == 0 ||
current_allocation_block_index_ < allocation_list_.length());
if (allocation_list_.length() == 0 ||
requested_size > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough.
if (!GetNextAllocationBlock(requested_size)) return NULL;
}
// Commit the requested memory at the start of the current allocation block.
size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
FreeBlock current = allocation_list_[current_allocation_block_index_];
if (aligned_requested >= (current.size - Page::kPageSize)) {
// Don't leave a small free block, useless for a large object or chunk.
*allocated = current.size;
} else {
*allocated = aligned_requested;
FreeBlock current;
if (!ReserveBlock(requested_size, &current)) {
*allocated = 0;
return NULL;
}
*allocated = current.size;
DCHECK(*allocated <= current.size);
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!isolate_->memory_allocator()->CommitExecutableMemory(
code_range_, current.start, commit_size, *allocated)) {
*allocated = 0;
ReleaseBlock(&current);
return NULL;
}
allocation_list_[current_allocation_block_index_].start += *allocated;
allocation_list_[current_allocation_block_index_].size -= *allocated;
if (*allocated == current.size) {
// This block is used up, get the next one.
GetNextAllocationBlock(0);
}
return current.start;
}
@ -260,6 +247,49 @@ void CodeRange::TearDown() {
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
DCHECK(allocation_list_.length() == 0 ||
current_allocation_block_index_ < allocation_list_.length());
if (allocation_list_.length() == 0 ||
requested_size > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough.
if (!GetNextAllocationBlock(requested_size)) return false;
}
// Commit the requested memory at the start of the current allocation block.
size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
*block = allocation_list_[current_allocation_block_index_];
// Don't leave a small free block, useless for a large object or chunk.
if (aligned_requested < (block->size - Page::kPageSize)) {
block->size = aligned_requested;
}
DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
allocation_list_[current_allocation_block_index_].start += block->size;
allocation_list_[current_allocation_block_index_].size -= block->size;
return true;
}
void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
void CodeRange::ReserveEmergencyBlock() {
const size_t requested_size = MemoryAllocator::CodePageAreaSize();
if (emergency_block_.size == 0) {
ReserveBlock(requested_size, &emergency_block_);
} else {
DCHECK(emergency_block_.size >= requested_size);
}
}
void CodeRange::ReleaseEmergencyBlock() {
if (emergency_block_.size != 0) {
ReleaseBlock(&emergency_block_);
emergency_block_.size = 0;
}
}
// -----------------------------------------------------------------------------
// MemoryAllocator
//
@ -1106,6 +1136,14 @@ void PagedSpace::ReleasePage(Page* page) {
void PagedSpace::CreateEmergencyMemory() {
if (identity() == CODE_SPACE) {
// Make the emergency block available to the allocator.
CodeRange* code_range = heap()->isolate()->code_range();
if (code_range != NULL && code_range->valid()) {
code_range->ReleaseEmergencyBlock();
}
DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
}
emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
AreaSize(), AreaSize(), executable(), this);
}

View File

@ -900,6 +900,9 @@ class CodeRange {
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
void ReserveEmergencyBlock();
void ReleaseEmergencyBlock();
private:
Isolate* isolate_;
@ -908,6 +911,7 @@ class CodeRange {
// Plain old data class, just a struct plus a constructor.
class FreeBlock {
public:
FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg, size_t size_arg)
: start(start_arg), size(size_arg) {
DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
@ -932,6 +936,12 @@ class CodeRange {
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
// Emergency block guarantees that we can always allocate a page for
// evacuation candidates when code space is compacted. Emergency block is
// reserved immediately after GC and is released immedietely before
// allocating a page for evacuation.
FreeBlock emergency_block_;
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
@ -940,6 +950,8 @@ class CodeRange {
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
bool ReserveBlock(const size_t requested_size, FreeBlock* block);
void ReleaseBlock(const FreeBlock* block);
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};

View File

@ -216,7 +216,8 @@ TEST(Regress3540) {
if (!code_range->SetUp(
code_range_size +
RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
MemoryChunk::kAlignment))) {
MemoryChunk::kAlignment) +
v8::internal::MemoryAllocator::CodePageAreaSize())) {
return;
}
Address address;