Add ability to disable inline bump-pointer allocation.
R=ulan@chromium.org, yurys@chromium.org TEST=cctest/test-heap/DisableInlineAllocation Review URL: https://codereview.chromium.org/69953023 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17752 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
eef8694a7e
commit
9f4591c368
@ -158,6 +158,7 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
|
||||
|
||||
void HeapProfiler::StartHeapAllocationsRecording() {
|
||||
StartHeapObjectsTracking();
|
||||
heap()->DisableInlineAllocation();
|
||||
is_tracking_allocations_ = true;
|
||||
DropCompiledCode();
|
||||
snapshots_->UpdateHeapObjectsMap();
|
||||
@ -166,6 +167,7 @@ void HeapProfiler::StartHeapAllocationsRecording() {
|
||||
|
||||
void HeapProfiler::StopHeapAllocationsRecording() {
|
||||
StopHeapObjectsTracking();
|
||||
heap()->EnableInlineAllocation();
|
||||
is_tracking_allocations_ = false;
|
||||
DropCompiledCode();
|
||||
}
|
||||
|
27
src/heap.cc
27
src/heap.cc
@ -114,6 +114,7 @@ Heap::Heap()
|
||||
amount_of_external_allocated_memory_(0),
|
||||
amount_of_external_allocated_memory_at_last_global_gc_(0),
|
||||
old_gen_exhausted_(false),
|
||||
inline_allocation_disabled_(false),
|
||||
store_buffer_rebuilder_(store_buffer()),
|
||||
hidden_string_(NULL),
|
||||
gc_safe_size_of_old_object_(NULL),
|
||||
@ -6580,6 +6581,32 @@ intptr_t Heap::PromotedExternalMemorySize() {
|
||||
}
|
||||
|
||||
|
||||
void Heap::EnableInlineAllocation() {
|
||||
ASSERT(inline_allocation_disabled_);
|
||||
inline_allocation_disabled_ = false;
|
||||
|
||||
// Update inline allocation limit for new space.
|
||||
new_space()->UpdateInlineAllocationLimit(0);
|
||||
}
|
||||
|
||||
|
||||
void Heap::DisableInlineAllocation() {
|
||||
ASSERT(!inline_allocation_disabled_);
|
||||
inline_allocation_disabled_ = true;
|
||||
|
||||
// Update inline allocation limit for new space.
|
||||
new_space()->UpdateInlineAllocationLimit(0);
|
||||
|
||||
// Update inline allocation limit for old spaces.
|
||||
PagedSpaces spaces(this);
|
||||
for (PagedSpace* space = spaces.next();
|
||||
space != NULL;
|
||||
space = spaces.next()) {
|
||||
space->EmptyAllocationInfo();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
V8_DECLARE_ONCE(initialize_gc_once);
|
||||
|
||||
static void InitializeGCOnce() {
|
||||
|
11
src/heap.h
11
src/heap.h
@ -1547,6 +1547,13 @@ class Heap {
|
||||
return Min(limit, halfway_to_the_max);
|
||||
}
|
||||
|
||||
// Indicates whether inline bump-pointer allocation has been disabled.
|
||||
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
|
||||
|
||||
// Switch whether inline bump-pointer allocation should be used.
|
||||
void EnableInlineAllocation();
|
||||
void DisableInlineAllocation();
|
||||
|
||||
// Implements the corresponding V8 API function.
|
||||
bool IdleNotification(int hint);
|
||||
|
||||
@ -1993,6 +2000,10 @@ class Heap {
|
||||
// last GC.
|
||||
bool old_gen_exhausted_;
|
||||
|
||||
// Indicates that inline bump-pointer allocation has been globally disabled
|
||||
// for all spaces. This is used to disable allocations in generated code.
|
||||
bool inline_allocation_disabled_;
|
||||
|
||||
// Weak list heads, threaded through the objects.
|
||||
// List heads are initilized lazily and contain the undefined_value at start.
|
||||
Object* native_contexts_list_;
|
||||
|
@ -1350,7 +1350,6 @@ void NewSpace::Shrink() {
|
||||
}
|
||||
}
|
||||
}
|
||||
allocation_info_.set_limit(to_space_.page_high());
|
||||
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
}
|
||||
|
||||
@ -1359,14 +1358,7 @@ void NewSpace::UpdateAllocationInfo() {
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
allocation_info_.set_top(to_space_.page_low());
|
||||
allocation_info_.set_limit(to_space_.page_high());
|
||||
|
||||
// Lower limit during incremental marking.
|
||||
if (heap()->incremental_marking()->IsMarking() &&
|
||||
inline_allocation_limit_step() != 0) {
|
||||
Address new_limit =
|
||||
allocation_info_.top() + inline_allocation_limit_step();
|
||||
allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
|
||||
}
|
||||
UpdateInlineAllocationLimit(0);
|
||||
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
}
|
||||
|
||||
@ -1383,6 +1375,26 @@ void NewSpace::ResetAllocationInfo() {
|
||||
}
|
||||
|
||||
|
||||
void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
|
||||
if (heap()->inline_allocation_disabled()) {
|
||||
// Lowest limit when linear allocation was disabled.
|
||||
Address high = to_space_.page_high();
|
||||
Address new_top = allocation_info_.top() + size_in_bytes;
|
||||
allocation_info_.set_limit(Min(new_top, high));
|
||||
} else if (inline_allocation_limit_step() == 0) {
|
||||
// Normal limit is the end of the current page.
|
||||
allocation_info_.set_limit(to_space_.page_high());
|
||||
} else {
|
||||
// Lower limit during incremental marking.
|
||||
Address high = to_space_.page_high();
|
||||
Address new_top = allocation_info_.top() + size_in_bytes;
|
||||
Address new_limit = new_top + inline_allocation_limit_step_;
|
||||
allocation_info_.set_limit(Min(new_limit, high));
|
||||
}
|
||||
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
}
|
||||
|
||||
|
||||
bool NewSpace::AddFreshPage() {
|
||||
Address top = allocation_info_.top();
|
||||
if (NewSpacePage::IsAtStart(top)) {
|
||||
@ -1417,18 +1429,16 @@ bool NewSpace::AddFreshPage() {
|
||||
|
||||
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
Address old_top = allocation_info_.top();
|
||||
Address new_top = old_top + size_in_bytes;
|
||||
Address high = to_space_.page_high();
|
||||
if (allocation_info_.limit() < high) {
|
||||
// Incremental marking has lowered the limit to get a
|
||||
// chance to do a step.
|
||||
Address new_limit = Min(
|
||||
allocation_info_.limit() + inline_allocation_limit_step_,
|
||||
high);
|
||||
allocation_info_.set_limit(new_limit);
|
||||
// Either the limit has been lowered because linear allocation was disabled
|
||||
// or because incremental marking wants to get a chance to do a step. Set
|
||||
// the new limit accordingly.
|
||||
Address new_top = old_top + size_in_bytes;
|
||||
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
|
||||
heap()->incremental_marking()->Step(
|
||||
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
|
||||
UpdateInlineAllocationLimit(size_in_bytes);
|
||||
top_on_previous_step_ = new_top;
|
||||
return AllocateRaw(size_in_bytes);
|
||||
} else if (AddFreshPage()) {
|
||||
@ -2374,7 +2384,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
int new_node_size = 0;
|
||||
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
|
||||
if (new_node == NULL) {
|
||||
owner_->SetTop(NULL, NULL);
|
||||
owner_->SetTopAndLimit(NULL, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2399,26 +2409,31 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
// a little of this again immediately - see below.
|
||||
owner_->Allocate(new_node_size);
|
||||
|
||||
if (bytes_left > kThreshold &&
|
||||
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
|
||||
FLAG_incremental_marking_steps) {
|
||||
if (owner_->heap()->inline_allocation_disabled()) {
|
||||
// Keep the linear allocation area empty if requested to do so, just
|
||||
// return area back to the free list instead.
|
||||
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
|
||||
ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
|
||||
} else if (bytes_left > kThreshold &&
|
||||
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
|
||||
FLAG_incremental_marking_steps) {
|
||||
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
|
||||
// We don't want to give too large linear areas to the allocator while
|
||||
// incremental marking is going on, because we won't check again whether
|
||||
// we want to do another increment until the linear area is used up.
|
||||
owner_->Free(new_node->address() + size_in_bytes + linear_size,
|
||||
new_node_size - size_in_bytes - linear_size);
|
||||
owner_->SetTop(new_node->address() + size_in_bytes,
|
||||
new_node->address() + size_in_bytes + linear_size);
|
||||
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
|
||||
new_node->address() + size_in_bytes + linear_size);
|
||||
} else if (bytes_left > 0) {
|
||||
// Normally we give the rest of the node to the allocator as its new
|
||||
// linear allocation area.
|
||||
owner_->SetTop(new_node->address() + size_in_bytes,
|
||||
new_node->address() + new_node_size);
|
||||
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
|
||||
new_node->address() + new_node_size);
|
||||
} else {
|
||||
// TODO(gc) Try not freeing linear allocation region when bytes_left
|
||||
// are zero.
|
||||
owner_->SetTop(NULL, NULL);
|
||||
owner_->SetTopAndLimit(NULL, NULL);
|
||||
}
|
||||
|
||||
return new_node;
|
||||
@ -2507,11 +2522,7 @@ intptr_t FreeList::SumFreeLists() {
|
||||
void PagedSpace::PrepareForMarkCompact() {
|
||||
// We don't have a linear allocation area while sweeping. It will be restored
|
||||
// on the first allocation after the sweep.
|
||||
// Mark the old linear allocation area with a free space map so it can be
|
||||
// skipped when scanning the heap.
|
||||
int old_linear_size = static_cast<int>(limit() - top());
|
||||
Free(top(), old_linear_size);
|
||||
SetTop(NULL, NULL);
|
||||
EmptyAllocationInfo();
|
||||
|
||||
// Stop lazy sweeping and clear marking bits for unswept pages.
|
||||
if (first_unswept_page_ != NULL) {
|
||||
|
21
src/spaces.h
21
src/spaces.h
@ -1783,7 +1783,7 @@ class PagedSpace : public Space {
|
||||
}
|
||||
|
||||
// Set space allocation info.
|
||||
void SetTop(Address top, Address limit) {
|
||||
void SetTopAndLimit(Address top, Address limit) {
|
||||
ASSERT(top == limit ||
|
||||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
@ -1791,6 +1791,15 @@ class PagedSpace : public Space {
|
||||
allocation_info_.set_limit(limit);
|
||||
}
|
||||
|
||||
// Empty space allocation info, returning unused area to free list.
|
||||
void EmptyAllocationInfo() {
|
||||
// Mark the old linear allocation area with a free space map so it can be
|
||||
// skipped when scanning the heap.
|
||||
int old_linear_size = static_cast<int>(limit() - top());
|
||||
Free(top(), old_linear_size);
|
||||
SetTopAndLimit(NULL, NULL);
|
||||
}
|
||||
|
||||
void Allocate(int bytes) {
|
||||
accounting_stats_.AllocateBytes(bytes);
|
||||
}
|
||||
@ -2478,16 +2487,10 @@ class NewSpace : public Space {
|
||||
// Reset the allocation pointer to the beginning of the active semispace.
|
||||
void ResetAllocationInfo();
|
||||
|
||||
void UpdateInlineAllocationLimit(int size_in_bytes);
|
||||
void LowerInlineAllocationLimit(intptr_t step) {
|
||||
inline_allocation_limit_step_ = step;
|
||||
if (step == 0) {
|
||||
allocation_info_.set_limit(to_space_.page_high());
|
||||
} else {
|
||||
Address new_limit = Min(
|
||||
allocation_info_.top() + inline_allocation_limit_step_,
|
||||
allocation_info_.limit());
|
||||
allocation_info_.set_limit(new_limit);
|
||||
}
|
||||
UpdateInlineAllocationLimit(0);
|
||||
top_on_previous_step_ = allocation_info_.top();
|
||||
}
|
||||
|
||||
|
@ -4081,10 +4081,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
AllocationFlags flags) {
|
||||
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
|
||||
ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
|
||||
if (!FLAG_inline_new ||
|
||||
// TODO(mstarzinger): Implement more efficiently by keeping then
|
||||
// bump-pointer allocation area empty instead of recompiling code.
|
||||
isolate()->heap_profiler()->is_tracking_allocations()) {
|
||||
if (!FLAG_inline_new) {
|
||||
if (emit_debug_code()) {
|
||||
// Trash the registers to simulate an allocation failure.
|
||||
movl(result, Immediate(0x7091));
|
||||
@ -4164,10 +4161,7 @@ void MacroAssembler::Allocate(Register object_size,
|
||||
Label* gc_required,
|
||||
AllocationFlags flags) {
|
||||
ASSERT((flags & SIZE_IN_WORDS) == 0);
|
||||
if (!FLAG_inline_new ||
|
||||
// TODO(mstarzinger): Implement more efficiently by keeping then
|
||||
// bump-pointer allocation area empty instead of recompiling code.
|
||||
isolate()->heap_profiler()->is_tracking_allocations()) {
|
||||
if (!FLAG_inline_new) {
|
||||
if (emit_debug_code()) {
|
||||
// Trash the registers to simulate an allocation failure.
|
||||
movl(result, Immediate(0x7091));
|
||||
|
@ -332,6 +332,7 @@ static inline int FlagDependentPortOffset() {
|
||||
static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
|
||||
int new_linear_size = static_cast<int>(
|
||||
*space->allocation_limit_address() - *space->allocation_top_address());
|
||||
if (new_linear_size == 0) return;
|
||||
v8::internal::MaybeObject* maybe = space->AllocateRaw(new_linear_size);
|
||||
v8::internal::FreeListNode* node = v8::internal::FreeListNode::cast(maybe);
|
||||
node->set_size(space->heap(), new_linear_size);
|
||||
@ -340,9 +341,7 @@ static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
|
||||
|
||||
// Helper function that simulates a full old-space in the heap.
|
||||
static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
|
||||
int old_linear_size = static_cast<int>(space->limit() - space->top());
|
||||
space->Free(space->top(), old_linear_size);
|
||||
space->SetTop(space->limit(), space->limit());
|
||||
space->EmptyAllocationInfo();
|
||||
space->ResetFreeList();
|
||||
space->ClearStats();
|
||||
}
|
||||
|
@ -3516,3 +3516,36 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
|
||||
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
|
||||
ASSERT(marking->IsComplete());
|
||||
}
|
||||
|
||||
|
||||
TEST(DisableInlineAllocation) {
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
CcTest::InitializeVM();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
CompileRun("function test() {"
|
||||
" var x = [];"
|
||||
" for (var i = 0; i < 10; i++) {"
|
||||
" x[i] = [ {}, [1,2,3], [1,x,3] ];"
|
||||
" }"
|
||||
"}"
|
||||
"function run() {"
|
||||
" %OptimizeFunctionOnNextCall(test);"
|
||||
" test();"
|
||||
" %DeoptimizeFunction(test);"
|
||||
"}");
|
||||
|
||||
// Warm-up with inline allocation enabled.
|
||||
CompileRun("test(); test(); run();");
|
||||
|
||||
// Run test with inline allocation disabled.
|
||||
CcTest::heap()->DisableInlineAllocation();
|
||||
CompileRun("run()");
|
||||
|
||||
// Run test with inline allocation disabled and pretenuring.
|
||||
CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
|
||||
CompileRun("run()");
|
||||
|
||||
// Run test with inline allocation re-enabled.
|
||||
CcTest::heap()->EnableInlineAllocation();
|
||||
CompileRun("run()");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user