From 6941a4f389894be24fa5a9bf2ce9293d99c99eb4 Mon Sep 17 00:00:00 2001 From: "whesse@chromium.org" Date: Mon, 27 Oct 2008 11:55:31 +0000 Subject: [PATCH] Collects unused maps that are only kept alive by map transitions. If a map has descendents in the map transition tree that are alive, it is kept. Only maps such that they and all their descendants have no live objects are collected. This happens in mark-sweep and mark-compact garbage collections. Review URL: http://codereview.chromium.org/8099 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/flag-definitions.h | 2 + src/mark-compact.cc | 177 +++++++++++++++++++++++++++++++++++++---- src/mark-compact.h | 18 +++++ src/objects-inl.h | 4 +- src/objects.cc | 65 ++++++++++++++- src/objects.h | 19 ++++- src/property.h | 5 +- src/runtime.cc | 11 ++- 8 files changed, 279 insertions(+), 22 deletions(-) diff --git a/src/flag-definitions.h b/src/flag-definitions.h index c4de404116..357ff3ee16 100644 --- a/src/flag-definitions.h +++ b/src/flag-definitions.h @@ -134,6 +134,8 @@ DEFINE_bool(gc_global, false, "always perform global GCs") DEFINE_int(gc_interval, -1, "garbage collect after allocations") DEFINE_bool(trace_gc, false, "print one trace line following each garbage collection") +DEFINE_bool(collect_maps, true, + "garbage collect maps from which no objects can be reached") // ic.cc DEFINE_bool(use_ic, true, "use inline caching") diff --git a/src/mark-compact.cc b/src/mark-compact.cc index 8daf28d160..41e6f5066f 100644 --- a/src/mark-compact.cc +++ b/src/mark-compact.cc @@ -78,6 +78,8 @@ void MarkCompactCollector::CollectGarbage(GCTracer* tracer) { MarkLiveObjects(); + if (FLAG_collect_maps) ClearNonLiveTransitions(); + SweepLargeObjectSpace(); if (compacting_collection_) { @@ -135,6 +137,7 @@ void MarkCompactCollector::Prepare() { } if (FLAG_never_compact) compacting_collection_ = false; + if (FLAG_collect_maps) CreateBackPointers(); #ifdef DEBUG if (compacting_collection_) { @@ -322,9 +325,13 @@ class MarkingVisitor : public ObjectVisitor { // Visit an unmarked object. void VisitUnmarkedObject(HeapObject* obj) { - ASSERT(Heap::Contains(obj)); #ifdef DEBUG + ASSERT(Heap::Contains(obj)); MarkCompactCollector::UpdateLiveObjectCount(obj); + ASSERT(!obj->IsMarked()); + // ASSERT(!obj->IsMap()); // Some maps are processed here. + // Their map transitions will be followed. If we do a test + // here to treat maps separately, will there be a performance impact? #endif Map* map = obj->map(); obj->SetMark(); @@ -425,14 +432,99 @@ void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { ASSERT(!object->IsMarked()); if (object->IsJSGlobalObject()) Counters::global_objects.Increment(); - if (FLAG_cleanup_caches_in_maps_at_gc && object->IsMap()) { - Map::cast(object)->ClearCodeCache(); - } - - object->SetMark(); tracer_->increment_marked_count(); ASSERT(Heap::Contains(object)); - marking_stack.Push(object); + if (object->IsMap()) { + if (FLAG_cleanup_caches_in_maps_at_gc) { + Map::cast(object)->ClearCodeCache(); + } + object->SetMark(); + if (FLAG_collect_maps) { + MarkMapContents(reinterpret_cast(object)); + } else { + marking_stack.Push(object); + } + } else { + object->SetMark(); + marking_stack.Push(object); + } +} + + +void MarkCompactCollector::MarkMapContents(Map* map) { + MarkDescriptorArray(reinterpret_cast( + HeapObject::RawField(map, Map::kInstanceDescriptorsOffset))); + + // Mark the Object* fields of the Map. + // Since the descriptor array has been marked already, it is fine + // that one of these fields contains a pointer to it. + MarkingVisitor visitor; // Has no state or contents. + visitor.VisitPointers(&HeapObject::RawField(map, Map::kPrototypeOffset), + &HeapObject::RawField(map, Map::kSize)); +} + + +void MarkCompactCollector::MarkDescriptorArray( + DescriptorArray *descriptors) { + if (descriptors->IsMarked()) return; + // Empty descriptor array is marked as a root before any maps are marked. + ASSERT(descriptors != Heap::empty_descriptor_array()); + + tracer_->increment_marked_count(); +#ifdef DEBUG + UpdateLiveObjectCount(descriptors); +#endif + descriptors->SetMark(); + + FixedArray* contents = reinterpret_cast( + descriptors->get(DescriptorArray::kContentArrayIndex)); + ASSERT(contents->IsHeapObject()); + ASSERT(!contents->IsMarked()); + ASSERT(contents->IsFixedArray()); + ASSERT(contents->length() >= 2); + tracer_->increment_marked_count(); +#ifdef DEBUG + UpdateLiveObjectCount(contents); +#endif + contents->SetMark(); + // Contents contains (value, details) pairs. If the details say + // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, + // or NULL_DESCRIPTOR, we don't mark the value as live. Only for + // type MAP_TRANSITION is the value a Object* (a Map*). + for (int i = 0; i < contents->length(); i += 2) { + // If the pair (value, details) at index i, i+1 is not + // a transition or null descriptor, mark the value. + PropertyDetails details(Smi::cast(contents->get(i + 1))); + if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { + HeapObject* object = reinterpret_cast(contents->get(i)); + if (object->IsHeapObject() && !object->IsMarked()) { + tracer_->increment_marked_count(); +#ifdef DEBUG + UpdateLiveObjectCount(object); +#endif + object->SetMark(); + marking_stack.Push(object); + } + } + } + // The DescriptorArray descriptors contains a pointer to its contents array, + // but the contents array is already marked. + marking_stack.Push(descriptors); +} + + +void MarkCompactCollector::CreateBackPointers() { + HeapObjectIterator iterator(Heap::map_space()); + while (iterator.has_next()) { + Object* next_object = iterator.next(); + if (next_object->IsMap()) { // Could also be ByteArray on free list. + Map* map = Map::cast(next_object); + if (map->instance_type() >= FIRST_JS_OBJECT_TYPE && + map->instance_type() <= LAST_JS_OBJECT_TYPE) { + map->CreateBackPointers(); + } + } + } } @@ -675,6 +767,13 @@ void MarkCompactCollector::MarkLiveObjects() { } +static int CountMarkedCallback(HeapObject* obj) { + MapWord map_word = obj->map_word(); + map_word.ClearMark(); + return obj->SizeFromMap(map_word.ToMap()); +} + + #ifdef DEBUG void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { live_bytes_ += obj->Size(); @@ -697,13 +796,6 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { } -static int CountMarkedCallback(HeapObject* obj) { - MapWord map_word = obj->map_word(); - map_word.ClearMark(); - return obj->SizeFromMap(map_word.ToMap()); -} - - void MarkCompactCollector::VerifyHeapAfterMarkingPhase() { Heap::new_space()->Verify(); Heap::old_pointer_space()->Verify(); @@ -755,6 +847,63 @@ void MarkCompactCollector::SweepLargeObjectSpace() { Heap::lo_space()->FreeUnmarkedObjects(); } +// Safe to use during marking phase only. +bool MarkCompactCollector::SafeIsMap(HeapObject* object) { + MapWord metamap = object->map_word(); + metamap.ClearMark(); + return metamap.ToMap()->instance_type() == MAP_TYPE; +} + +void MarkCompactCollector::ClearNonLiveTransitions() { + HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback); + // Iterate over the map space, setting map transitions that go from + // a marked map to an unmarked map to null transitions. At the same time, + // set all the prototype fields of maps back to their original value, + // dropping the back pointers temporarily stored in the prototype field. + // Setting the prototype field requires following the linked list of + // back pointers, reversing them all at once. This allows us to find + // those maps with map transitions that need to be nulled, and only + // scan the descriptor arrays of those maps, not all maps. + // All of these actions are carried out only on maps of JSObects + // and related subtypes. + while (map_iterator.has_next()) { + Map* map = reinterpret_cast(map_iterator.next()); + if (!map->IsMarked() && map->IsByteArray()) continue; + + ASSERT(SafeIsMap(map)); + // Only JSObject and subtypes have map transitions and back pointers. + if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; + if (map->instance_type() > LAST_JS_OBJECT_TYPE) continue; + // Follow the chain of back pointers to find the prototype. + Map* current = map; + while (SafeIsMap(current)) { + current = reinterpret_cast(current->prototype()); + ASSERT(current->IsHeapObject()); + } + Object* real_prototype = current; + + // Follow back pointers, setting them to prototype, + // clearing map transitions when necessary. + current = map; + bool on_dead_path = !current->IsMarked(); + Object *next; + while (SafeIsMap(current)) { + next = current->prototype(); + // There should never be a dead map above a live map. + ASSERT(on_dead_path || current->IsMarked()); + + // A live map above a dead map indicates a dead transition. + // This test will always be false on the first iteration. + if (on_dead_path && current->IsMarked()) { + on_dead_path = false; + current->ClearNonLiveTransitions(real_prototype); + } + HeapObject::RawField(current, Map::kPrototypeOffset) = + real_prototype; + current = reinterpret_cast(next); + } + } +} // ------------------------------------------------------------------------- // Phase 2: Encode forwarding addresses. diff --git a/src/mark-compact.h b/src/mark-compact.h index f3b4e2ab0e..2dcf4334ab 100644 --- a/src/mark-compact.h +++ b/src/mark-compact.h @@ -155,6 +155,17 @@ class MarkCompactCollector : public AllStatic { if (!obj->IsMarked()) MarkUnmarkedObject(obj); } + // Creates back pointers for all map transitions, stores them in + // the prototype field. The original prototype pointers are restored + // in ClearNonLiveTransitions(). All JSObject maps + // connected by map transitions have the same prototype object, which + // is why we can use this field temporarily for back pointers. + static void CreateBackPointers(); + + // Mark a Map and its DescriptorArray together, skipping transitions. + static void MarkMapContents(Map* map); + static void MarkDescriptorArray(DescriptorArray* descriptors); + // Mark the heap roots and all objects reachable from them. static void ProcessRoots(RootMarkingVisitor* visitor); @@ -194,6 +205,13 @@ class MarkCompactCollector : public AllStatic { // compacting or not, because the large object space is never compacted. static void SweepLargeObjectSpace(); + // Test whether a (possibly marked) object is a Map. + static inline bool SafeIsMap(HeapObject* object); + + // Map transitions from a live map to a dead map must be killed. + // We replace them with a null descriptor, with the same key. + static void ClearNonLiveTransitions(); + // -------------------------------------------------------------------------- // Phase 2: functions related to computing and encoding forwarding pointers // before: live objects' map pointers are marked as '00' diff --git a/src/objects-inl.h b/src/objects-inl.h index 33bca561fa..e51d192799 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -552,8 +552,8 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) { (*reinterpret_cast(FIELD_ADDR(p, offset)) = value) -Object* HeapObject::GetHeapObjectField(HeapObject* obj, int index) { - return READ_FIELD(obj, HeapObject::kHeaderSize + kPointerSize * index); +Object*& HeapObject::RawField(HeapObject* obj, int byte_offset) { + return READ_FIELD(obj, byte_offset); } diff --git a/src/objects.cc b/src/objects.cc index 7cfa5a0df8..7ffc819a1d 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -333,7 +333,7 @@ Object* Object::GetProperty(Object* receiver, // Check if we're allowed to read from the current object. Note // that even though we may not actually end up loading the named // property from the current object, we still check that we have - // access to the it. + // access to it. JSObject* checked = JSObject::cast(current); if (!Top::MayNamedAccess(checked, name, v8::ACCESS_GET)) { return checked->GetPropertyWithFailedAccessCheck(receiver, @@ -4050,6 +4050,67 @@ void String::PrintOn(FILE* file) { } +void Map::CreateBackPointers() { + DescriptorArray* descriptors = instance_descriptors(); + for (DescriptorReader r(descriptors); !r.eos(); r.advance()) { + if (r.type() == MAP_TRANSITION) { + // Get target. + Map* target = Map::cast(r.GetValue()); +#ifdef DEBUG + // Verify target. + Object* source_prototype = prototype(); + Object* target_prototype = target->prototype(); + ASSERT(source_prototype->IsJSObject() || + source_prototype->IsMap() || + source_prototype->IsNull()); + ASSERT(target_prototype->IsJSObject() || + target_prototype->IsNull()); + ASSERT(source_prototype->IsMap() || + source_prototype == target_prototype); +#endif + // Point target back to source. set_prototype() will not let us set + // the prototype to a map, as we do here. + RawField(target, kPrototypeOffset) = this; + } + } +} + + +void Map::ClearNonLiveTransitions(Object* real_prototype) { + // Live DescriptorArray objects will be marked, so we must use + // low-level accessors to get and modify their data. + DescriptorArray* d = reinterpret_cast( + RawField(this, Map::kInstanceDescriptorsOffset)); + if (d == Heap::empty_descriptor_array()) return; + Smi* NullDescriptorDetails = + PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi(); + FixedArray* contents = reinterpret_cast( + d->get(DescriptorArray::kContentArrayIndex)); + ASSERT(contents->length() >= 2); + for (int i = 0; i < contents->length(); i += 2) { + // If the pair (value, details) is a map transition, + // check if the target is live. If not, null the descriptor. + // Also drop the back pointer for that map transition, so that this + // map is not reached again by following a back pointer from a + // non-live object. + PropertyDetails details(Smi::cast(contents->get(i + 1))); + if (details.type() == MAP_TRANSITION) { + Map* target = reinterpret_cast(contents->get(i)); + ASSERT(target->IsHeapObject()); + if (!target->IsMarked()) { + ASSERT(target->IsMap()); + contents->set(i + 1, NullDescriptorDetails, SKIP_WRITE_BARRIER); + contents->set(i, Heap::null_value(), SKIP_WRITE_BARRIER); + ASSERT(target->prototype() == this || + target->prototype() == real_prototype); + // Getter prototype() is read-only, set_prototype() has side effects. + RawField(target, Map::kPrototypeOffset) = real_prototype; + } + } + } +} + + void Map::MapIterateBody(ObjectVisitor* v) { // Assumes all Object* members are contiguously allocated! IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize); @@ -6728,4 +6789,6 @@ int BreakPointInfo::GetBreakPointCount() { } +const int FunctionTemplateInfo::kSize; +const int ObjectTemplateInfo::kSize; } } // namespace v8::internal diff --git a/src/objects.h b/src/objects.h index a3a43abb21..daa0a0615f 100644 --- a/src/objects.h +++ b/src/objects.h @@ -1041,7 +1041,11 @@ class HeapObject: public Object { // object is overflowed (ie, partially restore the map pointer). inline void ClearOverflow(); - static inline Object* GetHeapObjectField(HeapObject* obj, int index); + // Returns the field at offset in obj, as a read/write Object* reference. + // Does no checking, and is safe to use during GC, while maps are invalid. + // Does not update remembered sets, so should only be assigned to + // during marking GC. + static inline Object* &RawField(HeapObject* obj, int offset); // Casting. static inline HeapObject* cast(Object* obj); @@ -2421,6 +2425,17 @@ class Map: public HeapObject { // Removes a code object from the code cache at the given index. void RemoveFromCodeCache(int index); + // For every transition in this map, makes the transition's + // target's prototype pointer point back to this map. + // This is undone in MarkCompactCollector::ClearNonLiveTransitions(). + void CreateBackPointers(); + + // Set all map transitions from this map to dead maps to null. + // Also, restore the original prototype on the targets of these + // transitions, so that we do not process this map again while + // following back pointers. + void ClearNonLiveTransitions(Object* real_prototype); + // Dispatched behavior. void MapIterateBody(ObjectVisitor* v); #ifdef DEBUG @@ -2805,7 +2820,7 @@ class GlobalObject: public JSObject { // [builtins]: the object holding the runtime routines written in JS. DECL_ACCESSORS(builtins, JSBuiltinsObject) - // [global context]: the global context corresponding to this global objet. + // [global context]: the global context corresponding to this global object. DECL_ACCESSORS(global_context, Context) // [global receiver]: the global receiver object of the context diff --git a/src/property.h b/src/property.h index 1c9b0d2952..dc72bb3ad8 100644 --- a/src/property.h +++ b/src/property.h @@ -99,7 +99,10 @@ class Descriptor BASE_EMBEDDED { friend class DescriptorArray; }; - +// A pointer from a map to the new map that is created by adding +// a named property. These are key to the speed and functioning of V8. +// The two maps should always have the same prototype, since +// MapSpace::CreateBackPointers depends on this. class MapTransitionDescriptor: public Descriptor { public: MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes) diff --git a/src/runtime.cc b/src/runtime.cc index dcd2b01822..048ebb72bf 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -320,9 +320,16 @@ static Object* Runtime_IsTemplate(Arguments args) { static Object* Runtime_GetTemplateField(Arguments args) { ASSERT(args.length() == 2); CONVERT_CHECKED(HeapObject, templ, args[0]); - RUNTIME_ASSERT(templ->IsStruct()); CONVERT_CHECKED(Smi, field, args[1]); - return HeapObject::GetHeapObjectField(templ, field->value()); + int index = field->value(); + int offset = index * kPointerSize + HeapObject::kHeaderSize; + InstanceType type = templ->map()->instance_type(); + RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE || + type == OBJECT_TEMPLATE_INFO_TYPE); + RUNTIME_ASSERT(offset > 0); + RUNTIME_ASSERT(offset < ((type == FUNCTION_TEMPLATE_INFO_TYPE) ? + FunctionTemplateInfo::kSize : ObjectTemplateInfo::kSize)); + return HeapObject::RawField(templ, offset); }