Revert r5455 from bleeding_edge: attempt to collect more garbage

before panicking with out of memory.

TBR=antonm@chromium.org
Review URL: http://codereview.chromium.org/4034002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5681 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
ager@chromium.org 2010-10-21 06:16:52 +00:00
parent 9bcdac5fef
commit 598de609a2
5 changed files with 39 additions and 85 deletions

View File

@ -372,14 +372,13 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
int post_gc_processing_count = 0;
bool GlobalHandles::PostGarbageCollectionProcessing() {
void GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
// At the same time deallocate all DESTROYED nodes.
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count;
bool weak_callback_invoked = false;
Node** p = &head_;
while (*p != NULL) {
if ((*p)->PostGarbageCollectionProcessing()) {
@ -390,7 +389,6 @@ bool GlobalHandles::PostGarbageCollectionProcessing() {
// restart the processing).
break;
}
weak_callback_invoked = true;
}
if ((*p)->state_ == Node::DESTROYED) {
// Delete the link.
@ -409,7 +407,6 @@ bool GlobalHandles::PostGarbageCollectionProcessing() {
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
return weak_callback_invoked;
}

View File

@ -95,9 +95,8 @@ class GlobalHandles : public AllStatic {
// Tells whether global handle is weak.
static bool IsWeak(Object** location);
// Process pending weak handles. Returns true if any weak handle
// callback has been invoked.
static bool PostGarbageCollectionProcessing();
// Process pending weak handles.
static void PostGarbageCollectionProcessing();
// Iterates over all strong handles.
static void IterateStrongRoots(ObjectVisitor* v);

View File

@ -35,16 +35,6 @@
namespace v8 {
namespace internal {
void Heap::UpdateOldSpaceLimits() {
intptr_t old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
}
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
@ -421,7 +411,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
} \
if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
Counters::gc_last_resort_from_handles.Increment(); \
Heap::CollectAllAvailableGarbage(); \
Heap::CollectAllGarbage(false); \
{ \
AlwaysAllocateScope __scope__; \
__object__ = FUNCTION_CALL; \

View File

@ -56,6 +56,7 @@ String* Heap::hidden_symbol_;
Object* Heap::roots_[Heap::kRootListLength];
Object* Heap::global_contexts_list_;
NewSpace Heap::new_space_;
OldSpace* Heap::old_pointer_space_ = NULL;
OldSpace* Heap::old_data_space_ = NULL;
@ -64,6 +65,9 @@ MapSpace* Heap::map_space_ = NULL;
CellSpace* Heap::cell_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
static const intptr_t kMinimumPromotionLimit = 2 * MB;
static const intptr_t kMinimumAllocationLimit = 8 * MB;
intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
@ -415,25 +419,17 @@ void Heap::GarbageCollectionEpilogue() {
}
void Heap::CollectAllGarbage(bool force_compaction,
CollectionPolicy collectionPolicy) {
void Heap::CollectAllGarbage(bool force_compaction) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
MarkCompactCollector::SetForceCompaction(force_compaction);
CollectGarbage(OLD_POINTER_SPACE, collectionPolicy);
CollectGarbage(OLD_POINTER_SPACE);
MarkCompactCollector::SetForceCompaction(false);
}
void Heap::CollectAllAvailableGarbage() {
CompilationCache::Clear();
CollectAllGarbage(true, AGGRESSIVE);
}
void Heap::CollectGarbage(AllocationSpace space,
CollectionPolicy collectionPolicy) {
void Heap::CollectGarbage(AllocationSpace space) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
@ -460,7 +456,7 @@ void Heap::CollectGarbage(AllocationSpace space,
? &Counters::gc_scavenger
: &Counters::gc_compactor;
rate->Start();
PerformGarbageCollection(collector, &tracer, collectionPolicy);
PerformGarbageCollection(collector, &tracer);
rate->Stop();
GarbageCollectionEpilogue();
@ -476,7 +472,7 @@ void Heap::CollectGarbage(AllocationSpace space,
void Heap::PerformScavenge() {
GCTracer tracer;
PerformGarbageCollection(SCAVENGER, &tracer, NORMAL);
PerformGarbageCollection(SCAVENGER, &tracer);
}
@ -661,8 +657,7 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
}
void Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer,
CollectionPolicy collectionPolicy) {
GCTracer* tracer) {
if (collector != SCAVENGER) {
PROFILE(CodeMovingGCEvent());
}
@ -696,45 +691,25 @@ void Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
UpdateOldSpaceLimits();
intptr_t old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
// has been invoked, we rerun major GC to release objects which become
// garbage.
if (collectionPolicy == AGGRESSIVE) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
{ DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
if (!GlobalHandles::PostGarbageCollectionProcessing()) break;
}
MarkCompact(tracer);
// Weak handle callbacks can allocate data, so keep limits correct.
UpdateOldSpaceLimits();
}
} else {
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
// Stable high survival rates of young objects both during partial and
// full collection indicate that mutator is either building or modifying
// a structure with a long lifetime.
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
old_gen_promotion_limit_ *= 2;
old_gen_allocation_limit_ *= 2;
}
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
// Stable high survival rates of young objects both during partial and
// full collection indicate that mutator is either building or modifying
// a structure with a long lifetime.
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
old_gen_promotion_limit_ *= 2;
old_gen_allocation_limit_ *= 2;
}
{ DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
GlobalHandles::PostGarbageCollectionProcessing();
}
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
Scavenge();
@ -745,6 +720,12 @@ void Heap::PerformGarbageCollection(GarbageCollector collector,
Counters::objs_since_last_young.Set(0);
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
GlobalHandles::PostGarbageCollectionProcessing();
}
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing();

View File

@ -693,20 +693,13 @@ class Heap : public AllStatic {
static void GarbageCollectionPrologue();
static void GarbageCollectionEpilogue();
enum CollectionPolicy { NORMAL, AGGRESSIVE };
// Performs garbage collection operation.
// Returns whether required_space bytes are available after the collection.
static void CollectGarbage(AllocationSpace space,
CollectionPolicy collectionPolicy = NORMAL);
static void CollectGarbage(AllocationSpace space);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
static void CollectAllGarbage(bool force_compaction,
CollectionPolicy collectionPolicy = NORMAL);
// Last hope GC, should try to squeeze as much as possible.
static void CollectAllAvailableGarbage();
static void CollectAllGarbage(bool force_compaction);
// Notify the heap that a context has been disposed.
static int NotifyContextDisposed() { return ++contexts_disposed_; }
@ -1242,13 +1235,7 @@ class Heap : public AllStatic {
// Performs garbage collection
static void PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer,
CollectionPolicy collectionPolicy);
static const intptr_t kMinimumPromotionLimit = 2 * MB;
static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline static void UpdateOldSpaceLimits();
GCTracer* tracer);
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't