Attempt to collect more garbage before panicking with out of memory.

Currently weak handles retain an object for another GC round (oftem times,
major GC round.)  Instrumenting Chromium shows that navigation leaves
many global objects which are only collected in next go.  Let's
attempt to collect more garbage when approacing OOM condition.

This is a better version of rolled out r5455: now it's correctly
rebuilds object groups between additional GCs.

Review URL: http://codereview.chromium.org/4295004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5761 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
antonm@chromium.org 2010-11-03 13:00:28 +00:00
parent ad605a1bcc
commit 20938fc53c
5 changed files with 65 additions and 11 deletions

View File

@ -372,13 +372,14 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
int post_gc_processing_count = 0;
void GlobalHandles::PostGarbageCollectionProcessing() {
bool GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
// At the same time deallocate all DESTROYED nodes.
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count;
bool next_gc_could_collect_more = false;
Node** p = &head_;
while (*p != NULL) {
if ((*p)->PostGarbageCollectionProcessing()) {
@ -399,6 +400,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
}
node->set_next_free(first_deallocated());
set_first_deallocated(node);
next_gc_could_collect_more = true;
} else {
p = (*p)->next_addr();
}
@ -407,6 +409,8 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
return next_gc_could_collect_more;
}

View File

@ -96,7 +96,8 @@ class GlobalHandles : public AllStatic {
static bool IsWeak(Object** location);
// Process pending weak handles.
static void PostGarbageCollectionProcessing();
// Returns true if next major GC is likely to collect more garbage.
static bool PostGarbageCollectionProcessing();
// Iterates over all strong handles.
static void IterateStrongRoots(ObjectVisitor* v);

View File

@ -330,6 +330,11 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
bool Heap::CollectGarbage(AllocationSpace space) {
return CollectGarbage(space, SelectGarbageCollector(space));
}
MaybeObject* Heap::PrepareForCompare(String* str) {
// Always flatten small strings and force flattening of long strings
// after we have accumulated a certain amount we failed to flatten.
@ -413,7 +418,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
Counters::gc_last_resort_from_handles.Increment(); \
Heap::CollectAllGarbage(false); \
Heap::CollectAllAvailableGarbage(); \
{ \
AlwaysAllocateScope __scope__; \
__maybe_object__ = FUNCTION_CALL; \

View File

@ -429,7 +429,31 @@ void Heap::CollectAllGarbage(bool force_compaction) {
}
void Heap::CollectGarbage(AllocationSpace space) {
void Heap::CollectAllAvailableGarbage() {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
MarkCompactCollector::SetForceCompaction(true);
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
// has been invoked, we rerun major GC to release objects which become
// garbage.
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
break;
}
}
MarkCompactCollector::SetForceCompaction(false);
}
bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
@ -442,13 +466,14 @@ void Heap::CollectGarbage(AllocationSpace space) {
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
bool next_gc_likely_to_collect_more = false;
{ GCTracer tracer;
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
tracer.set_gc_count(gc_count_);
GarbageCollector collector = SelectGarbageCollector(space);
// Tell the tracer which collector we've selected.
tracer.set_collector(collector);
@ -456,7 +481,8 @@ void Heap::CollectGarbage(AllocationSpace space) {
? &Counters::gc_scavenger
: &Counters::gc_compactor;
rate->Start();
PerformGarbageCollection(collector, &tracer);
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, &tracer);
rate->Stop();
GarbageCollectionEpilogue();
@ -467,6 +493,8 @@ void Heap::CollectGarbage(AllocationSpace space) {
if (FLAG_log_gc) HeapProfiler::WriteSample();
if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
#endif
return next_gc_likely_to_collect_more;
}
@ -653,8 +681,10 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
survival_rate_ = survival_rate;
}
void Heap::PerformGarbageCollection(GarbageCollector collector,
bool Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer) {
bool next_gc_likely_to_collect_more = false;
if (collector != SCAVENGER) {
PROFILE(CodeMovingGCEvent());
}
@ -720,7 +750,8 @@ void Heap::PerformGarbageCollection(GarbageCollector collector,
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
GlobalHandles::PostGarbageCollectionProcessing();
next_gc_likely_to_collect_more =
GlobalHandles::PostGarbageCollectionProcessing();
}
// Update relocatables.
@ -747,6 +778,8 @@ void Heap::PerformGarbageCollection(GarbageCollector collector,
global_gc_epilogue_callback_();
}
VerifySymbolTable();
return next_gc_likely_to_collect_more;
}

View File

@ -705,13 +705,22 @@ class Heap : public AllStatic {
static void GarbageCollectionEpilogue();
// Performs garbage collection operation.
// Returns whether required_space bytes are available after the collection.
static void CollectGarbage(AllocationSpace space);
// Returns whether there is a chance that another major GC could
// collect more garbage.
static bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
inline static bool CollectGarbage(AllocationSpace space);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
static void CollectAllGarbage(bool force_compaction);
// Last hope GC, should try to squeeze as much as possible.
static void CollectAllAvailableGarbage();
// Notify the heap that a context has been disposed.
static int NotifyContextDisposed() { return ++contexts_disposed_; }
@ -1246,7 +1255,9 @@ class Heap : public AllStatic {
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
// Performs garbage collection
static void PerformGarbageCollection(GarbageCollector collector,
// Returns whether there is a chance another major GC could
// collect more garbage.
static bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
// Allocate an uninitialized object in map space. The behavior is identical