Allow partial scanning of large arrays in order to avoid
mark stack overflow. This is a reland of r12609 - https://chromiumcodereview.appspot.com/10959011 - but this time VisitPointers has been fixed (it used to assume that the first slot was on the first page of a large object). Review URL: https://chromiumcodereview.appspot.com/10996018 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12619 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
e8ffc2bebd
commit
aba09dcf2e
28
src/heap.cc
28
src/heap.cc
@ -1359,11 +1359,12 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
|
||||
|
||||
if (external_string_table_.new_space_strings_.is_empty()) return;
|
||||
|
||||
Object** start = &external_string_table_.new_space_strings_[0];
|
||||
Object** end = start + external_string_table_.new_space_strings_.length();
|
||||
Object** last = start;
|
||||
Object** start_slot = &external_string_table_.new_space_strings_[0];
|
||||
Object** end_slot =
|
||||
start_slot + external_string_table_.new_space_strings_.length();
|
||||
Object** last = start_slot;
|
||||
|
||||
for (Object** p = start; p < end; ++p) {
|
||||
for (Object** p = start_slot; p < end_slot; ++p) {
|
||||
ASSERT(InFromSpace(*p));
|
||||
String* target = updater_func(this, p);
|
||||
|
||||
@ -1381,8 +1382,8 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(last <= end);
|
||||
external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
|
||||
ASSERT(last <= end_slot);
|
||||
external_string_table_.ShrinkNewStrings(static_cast<int>(last - start_slot));
|
||||
}
|
||||
|
||||
|
||||
@ -1391,9 +1392,10 @@ void Heap::UpdateReferencesInExternalStringTable(
|
||||
|
||||
// Update old space string references.
|
||||
if (external_string_table_.old_space_strings_.length() > 0) {
|
||||
Object** start = &external_string_table_.old_space_strings_[0];
|
||||
Object** end = start + external_string_table_.old_space_strings_.length();
|
||||
for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
|
||||
Object** start_slot = &external_string_table_.old_space_strings_[0];
|
||||
Object** end_slot =
|
||||
start_slot + external_string_table_.old_space_strings_.length();
|
||||
for (Object** p = start_slot; p < end_slot; ++p) *p = updater_func(this, p);
|
||||
}
|
||||
|
||||
UpdateNewSpaceReferencesInExternalStringTable(updater_func);
|
||||
@ -6790,11 +6792,11 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
|
||||
// Scan the object body.
|
||||
if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
|
||||
// This is specialized to scan Context's properly.
|
||||
Object** start = reinterpret_cast<Object**>(obj->address() +
|
||||
Context::kHeaderSize);
|
||||
Object** end = reinterpret_cast<Object**>(obj->address() +
|
||||
Object** start_slot = reinterpret_cast<Object**>(obj->address() +
|
||||
Context::kHeaderSize);
|
||||
Object** end_slot = reinterpret_cast<Object**>(obj->address() +
|
||||
Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
|
||||
mark_visitor->VisitPointers(start, end);
|
||||
mark_visitor->VisitPointers(start_slot, end_slot);
|
||||
} else {
|
||||
obj->IterateBody(map_p->instance_type(),
|
||||
obj->SizeFromMap(map_p),
|
||||
|
@ -190,8 +190,11 @@ class IncrementalMarkingMarkingVisitor
|
||||
|
||||
static void VisitJSWeakMap(Map* map, HeapObject* object) {
|
||||
Heap* heap = map->GetHeap();
|
||||
Object** start_slot =
|
||||
HeapObject::RawField(object, JSWeakMap::kPropertiesOffset);
|
||||
VisitPointers(heap,
|
||||
HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
|
||||
start_slot,
|
||||
start_slot,
|
||||
HeapObject::RawField(object, JSWeakMap::kSize));
|
||||
}
|
||||
|
||||
@ -206,15 +209,54 @@ class IncrementalMarkingMarkingVisitor
|
||||
void>::Visit(map, object);
|
||||
}
|
||||
|
||||
static const int kScanningChunk = 32 * 1024;
|
||||
|
||||
static int VisitHugeArray(FixedArray* array) {
|
||||
Heap* heap = array->GetHeap();
|
||||
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
|
||||
Object** start_slot = array->data_start();
|
||||
int length = array->length();
|
||||
|
||||
if (chunk->owner()->identity() != LO_SPACE) {
|
||||
VisitPointers(heap, start_slot, start_slot, start_slot + length);
|
||||
return length;
|
||||
}
|
||||
|
||||
int from =
|
||||
chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0;
|
||||
int to = Min(from + kScanningChunk, length);
|
||||
|
||||
VisitPointers(heap, start_slot, start_slot + from, start_slot + to);
|
||||
|
||||
if (to == length) {
|
||||
// If it went from black to grey while it was waiting for the next bit to
|
||||
// be scanned then we have to start the scan again.
|
||||
MarkBit mark_bit = Marking::MarkBitFrom(array);
|
||||
if (!Marking::IsBlack(mark_bit)) {
|
||||
ASSERT(Marking::IsGrey(mark_bit));
|
||||
chunk->SetPartiallyScannedProgress(0);
|
||||
} else {
|
||||
chunk->SetCompletelyScanned();
|
||||
}
|
||||
} else {
|
||||
chunk->SetPartiallyScannedProgress(to);
|
||||
}
|
||||
return to - from;
|
||||
}
|
||||
|
||||
static inline void VisitJSFunction(Map* map, HeapObject* object) {
|
||||
Heap* heap = map->GetHeap();
|
||||
// Iterate over all fields in the body but take care in dealing with
|
||||
// the code entry and skip weak fields.
|
||||
Object** start_slot =
|
||||
HeapObject::RawField(object, JSFunction::kPropertiesOffset);
|
||||
VisitPointers(heap,
|
||||
HeapObject::RawField(object, JSFunction::kPropertiesOffset),
|
||||
start_slot,
|
||||
start_slot,
|
||||
HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
|
||||
VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
|
||||
VisitPointers(heap,
|
||||
start_slot,
|
||||
HeapObject::RawField(object,
|
||||
JSFunction::kCodeEntryOffset + kPointerSize),
|
||||
HeapObject::RawField(object,
|
||||
@ -229,11 +271,14 @@ class IncrementalMarkingMarkingVisitor
|
||||
}
|
||||
}
|
||||
|
||||
INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
|
||||
INLINE(static void VisitPointers(Heap* heap,
|
||||
Object** anchor,
|
||||
Object** start,
|
||||
Object** end)) {
|
||||
for (Object** p = start; p < end; p++) {
|
||||
Object* obj = *p;
|
||||
if (obj->NonFailureIsHeapObject()) {
|
||||
heap->mark_compact_collector()->RecordSlot(start, p, obj);
|
||||
heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
|
||||
MarkObject(heap, obj);
|
||||
}
|
||||
}
|
||||
@ -635,7 +680,8 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
|
||||
#ifdef DEBUG
|
||||
MarkBit mark_bit = Marking::MarkBitFrom(obj);
|
||||
ASSERT(Marking::IsGrey(mark_bit) ||
|
||||
(obj->IsFiller() && Marking::IsWhite(mark_bit)));
|
||||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
|
||||
MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -658,36 +704,57 @@ void IncrementalMarking::Hurry() {
|
||||
// was stopped.
|
||||
Map* filler_map = heap_->one_pointer_filler_map();
|
||||
Map* native_context_map = heap_->native_context_map();
|
||||
while (!marking_deque_.IsEmpty()) {
|
||||
HeapObject* obj = marking_deque_.Pop();
|
||||
do {
|
||||
while (!marking_deque_.IsEmpty()) {
|
||||
HeapObject* obj = marking_deque_.Pop();
|
||||
|
||||
// Explicitly skip one word fillers. Incremental markbit patterns are
|
||||
// correct only for objects that occupy at least two words.
|
||||
Map* map = obj->map();
|
||||
if (map == filler_map) {
|
||||
continue;
|
||||
} else if (map == native_context_map) {
|
||||
// Native contexts have weak fields.
|
||||
IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
|
||||
} else {
|
||||
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
|
||||
if (Marking::IsWhite(map_mark_bit)) {
|
||||
WhiteToGreyAndPush(map, map_mark_bit);
|
||||
// Explicitly skip one word fillers. Incremental markbit patterns are
|
||||
// correct only for objects that occupy at least two words.
|
||||
Map* map = obj->map();
|
||||
if (map == filler_map) {
|
||||
continue;
|
||||
} else if (map == native_context_map) {
|
||||
// Native contexts have weak fields.
|
||||
IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
|
||||
ASSERT(!Marking::IsBlack(Marking::MarkBitFrom(obj)));
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
|
||||
} else if (map->instance_type() == FIXED_ARRAY_TYPE &&
|
||||
FixedArray::cast(obj)->length() >
|
||||
IncrementalMarkingMarkingVisitor::kScanningChunk) {
|
||||
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
|
||||
if (Marking::IsWhite(map_mark_bit)) {
|
||||
WhiteToGreyAndPush(map, map_mark_bit);
|
||||
}
|
||||
MarkBit mark_bit = Marking::MarkBitFrom(obj);
|
||||
if (!Marking::IsBlack(mark_bit)) {
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
|
||||
} else {
|
||||
ASSERT(
|
||||
MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
|
||||
}
|
||||
IncrementalMarkingMarkingVisitor::VisitHugeArray(
|
||||
FixedArray::cast(obj));
|
||||
} else {
|
||||
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
|
||||
if (Marking::IsWhite(map_mark_bit)) {
|
||||
WhiteToGreyAndPush(map, map_mark_bit);
|
||||
}
|
||||
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
|
||||
ASSERT(!Marking::IsBlack(Marking::MarkBitFrom(obj)));
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
|
||||
}
|
||||
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
|
||||
}
|
||||
|
||||
MarkBit mark_bit = Marking::MarkBitFrom(obj);
|
||||
ASSERT(!Marking::IsBlack(mark_bit));
|
||||
Marking::MarkBlack(mark_bit);
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
|
||||
}
|
||||
state_ = COMPLETE;
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
double end = OS::TimeCurrentMillis();
|
||||
PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
|
||||
static_cast<int>(end - start));
|
||||
}
|
||||
MarkBit mark_bit = Marking::MarkBitFrom(obj);
|
||||
Marking::MarkBlack(mark_bit);
|
||||
}
|
||||
state_ = COMPLETE;
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
double end = OS::TimeCurrentMillis();
|
||||
PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
|
||||
static_cast<int>(end - start));
|
||||
}
|
||||
MarkCompactCollector::ProcessLargePostponedArrays(heap_, &marking_deque_);
|
||||
} while (!marking_deque_.IsEmpty());
|
||||
}
|
||||
|
||||
if (FLAG_cleanup_code_caches_at_gc) {
|
||||
@ -822,42 +889,71 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
|
||||
} else if (state_ == MARKING) {
|
||||
Map* filler_map = heap_->one_pointer_filler_map();
|
||||
Map* native_context_map = heap_->native_context_map();
|
||||
while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
|
||||
HeapObject* obj = marking_deque_.Pop();
|
||||
while (true) {
|
||||
while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
|
||||
HeapObject* obj = marking_deque_.Pop();
|
||||
|
||||
// Explicitly skip one word fillers. Incremental markbit patterns are
|
||||
// correct only for objects that occupy at least two words.
|
||||
Map* map = obj->map();
|
||||
if (map == filler_map) continue;
|
||||
// Explicitly skip one word fillers. Incremental markbit patterns are
|
||||
// correct only for objects that occupy at least two words.
|
||||
Map* map = obj->map();
|
||||
if (map == filler_map) continue;
|
||||
|
||||
int size = obj->SizeFromMap(map);
|
||||
bytes_to_process -= size;
|
||||
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
|
||||
if (Marking::IsWhite(map_mark_bit)) {
|
||||
WhiteToGreyAndPush(map, map_mark_bit);
|
||||
int size = obj->SizeFromMap(map);
|
||||
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
|
||||
if (Marking::IsWhite(map_mark_bit)) {
|
||||
WhiteToGreyAndPush(map, map_mark_bit);
|
||||
}
|
||||
|
||||
// TODO(gc) switch to static visitor instead of normal visitor.
|
||||
if (map == native_context_map) {
|
||||
// Native contexts have weak fields.
|
||||
Context* ctx = Context::cast(obj);
|
||||
|
||||
// We will mark cache black with a separate pass
|
||||
// when we finish marking.
|
||||
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
|
||||
|
||||
IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
|
||||
bytes_to_process -= size;
|
||||
SLOW_ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
|
||||
} else if (map->instance_type() == FIXED_ARRAY_TYPE &&
|
||||
FixedArray::cast(obj)->length() >
|
||||
IncrementalMarkingMarkingVisitor::kScanningChunk) {
|
||||
SLOW_ASSERT(
|
||||
Marking::IsGrey(Marking::MarkBitFrom(obj)) ||
|
||||
MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
|
||||
bytes_to_process -=
|
||||
IncrementalMarkingMarkingVisitor::VisitHugeArray(
|
||||
FixedArray::cast(obj));
|
||||
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
|
||||
if (!Marking::IsBlack(obj_mark_bit)) {
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
|
||||
}
|
||||
} else {
|
||||
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
|
||||
bytes_to_process -= size;
|
||||
SLOW_ASSERT(
|
||||
Marking::IsGrey(Marking::MarkBitFrom(obj)) ||
|
||||
(obj->IsFiller() && Marking::IsWhite(Marking::MarkBitFrom(obj))));
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
|
||||
}
|
||||
|
||||
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
|
||||
Marking::MarkBlack(obj_mark_bit);
|
||||
}
|
||||
|
||||
// TODO(gc) switch to static visitor instead of normal visitor.
|
||||
if (map == native_context_map) {
|
||||
// Native contexts have weak fields.
|
||||
Context* ctx = Context::cast(obj);
|
||||
|
||||
// We will mark cache black with a separate pass
|
||||
// when we finish marking.
|
||||
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
|
||||
|
||||
IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
|
||||
} else {
|
||||
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
|
||||
}
|
||||
|
||||
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
|
||||
SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
|
||||
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
|
||||
Marking::MarkBlack(obj_mark_bit);
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
|
||||
if (marking_deque_.IsEmpty()) {
|
||||
MarkCompactCollector::ProcessLargePostponedArrays(heap_,
|
||||
&marking_deque_);
|
||||
if (marking_deque_.IsEmpty()) {
|
||||
MarkingComplete(action);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
ASSERT(bytes_to_process <= 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (marking_deque_.IsEmpty()) MarkingComplete(action);
|
||||
}
|
||||
|
||||
steps_count_++;
|
||||
|
@ -83,6 +83,9 @@ void MarkCompactCollector::RecordSlot(Object** anchor_slot,
|
||||
Object** slot,
|
||||
Object* object) {
|
||||
Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
|
||||
// Ensure the anchor slot is on the first 'page' of a large object.
|
||||
ASSERT(Page::FromAddress(reinterpret_cast<Address>(anchor_slot))->owner() !=
|
||||
NULL);
|
||||
if (object_page->IsEvacuationCandidate() &&
|
||||
!ShouldSkipEvacuationSlotRecording(anchor_slot)) {
|
||||
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
|
||||
|
@ -1053,16 +1053,43 @@ class MarkCompactMarkingVisitor
|
||||
MarkObjectByPointer(heap->mark_compact_collector(), p, p);
|
||||
}
|
||||
|
||||
INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
|
||||
INLINE(static void VisitPointers(Heap* heap,
|
||||
Object** anchor,
|
||||
Object** start,
|
||||
Object** end)) {
|
||||
// Mark all objects pointed to in [start, end).
|
||||
const int kMinRangeForMarkingRecursion = 64;
|
||||
if (end - start >= kMinRangeForMarkingRecursion) {
|
||||
if (VisitUnmarkedObjects(heap, start, end)) return;
|
||||
if (VisitUnmarkedObjects(heap, anchor, start, end)) return;
|
||||
// We are close to a stack overflow, so just mark the objects.
|
||||
}
|
||||
MarkCompactCollector* collector = heap->mark_compact_collector();
|
||||
for (Object** p = start; p < end; p++) {
|
||||
MarkObjectByPointer(collector, start, p);
|
||||
MarkObjectByPointer(collector, anchor, p);
|
||||
}
|
||||
}
|
||||
|
||||
static void VisitHugeFixedArray(Heap* heap, FixedArray* array, int length);
|
||||
|
||||
// The deque is contiguous and we use new space, it is therefore contained in
|
||||
// one page minus the header. It also has a size that is a power of two so
|
||||
// it is half the size of a page. We want to scan a number of array entries
|
||||
// that is less than the number of entries in the deque, so we divide by 2
|
||||
// once more.
|
||||
static const int kScanningChunk = Page::kPageSize / 4 / kPointerSize;
|
||||
|
||||
INLINE(static void VisitFixedArray(Map* map, HeapObject* object)) {
|
||||
FixedArray* array = FixedArray::cast(object);
|
||||
int length = array->length();
|
||||
Heap* heap = map->GetHeap();
|
||||
|
||||
if (length < kScanningChunk ||
|
||||
MemoryChunk::FromAddress(array->address())->owner()->identity() !=
|
||||
LO_SPACE) {
|
||||
Object** start_slot = array->data_start();
|
||||
VisitPointers(heap, start_slot, start_slot, start_slot + length);
|
||||
} else {
|
||||
VisitHugeFixedArray(heap, array, length);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1112,21 +1139,22 @@ class MarkCompactMarkingVisitor
|
||||
IterateBody(map, obj);
|
||||
}
|
||||
|
||||
// Visit all unmarked objects pointed to by [start, end).
|
||||
// Visit all unmarked objects pointed to by [start_slot, end_slot).
|
||||
// Returns false if the operation fails (lack of stack space).
|
||||
static inline bool VisitUnmarkedObjects(Heap* heap,
|
||||
Object** start,
|
||||
Object** end) {
|
||||
Object** anchor_slot,
|
||||
Object** start_slot,
|
||||
Object** end_slot) {
|
||||
// Return false is we are close to the stack limit.
|
||||
StackLimitCheck check(heap->isolate());
|
||||
if (check.HasOverflowed()) return false;
|
||||
|
||||
MarkCompactCollector* collector = heap->mark_compact_collector();
|
||||
// Visit the unmarked objects.
|
||||
for (Object** p = start; p < end; p++) {
|
||||
for (Object** p = start_slot; p < end_slot; p++) {
|
||||
Object* o = *p;
|
||||
if (!o->IsHeapObject()) continue;
|
||||
collector->RecordSlot(start, p, o);
|
||||
collector->RecordSlot(anchor_slot, p, o);
|
||||
HeapObject* obj = HeapObject::cast(o);
|
||||
MarkBit mark = Marking::MarkBitFrom(obj);
|
||||
if (mark.Get()) continue;
|
||||
@ -1447,9 +1475,11 @@ class MarkCompactMarkingVisitor
|
||||
bool flush_code_candidate) {
|
||||
Heap* heap = map->GetHeap();
|
||||
|
||||
VisitPointers(heap,
|
||||
HeapObject::RawField(object, JSFunction::kPropertiesOffset),
|
||||
HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
|
||||
Object** start_slot =
|
||||
HeapObject::RawField(object, JSFunction::kPropertiesOffset);
|
||||
Object** end_slot =
|
||||
HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
|
||||
VisitPointers(heap, start_slot, start_slot, end_slot);
|
||||
|
||||
if (!flush_code_candidate) {
|
||||
VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
|
||||
@ -1473,11 +1503,12 @@ class MarkCompactMarkingVisitor
|
||||
}
|
||||
}
|
||||
|
||||
VisitPointers(
|
||||
heap,
|
||||
start_slot =
|
||||
HeapObject::RawField(object,
|
||||
JSFunction::kCodeEntryOffset + kPointerSize),
|
||||
HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
|
||||
JSFunction::kCodeEntryOffset + kPointerSize);
|
||||
end_slot =
|
||||
HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
|
||||
VisitPointers(heap, start_slot, start_slot, end_slot);
|
||||
}
|
||||
|
||||
|
||||
@ -1493,17 +1524,40 @@ class MarkCompactMarkingVisitor
|
||||
SharedFunctionInfo::kCodeOffset));
|
||||
}
|
||||
|
||||
VisitPointers(
|
||||
heap,
|
||||
Object** start_slot =
|
||||
HeapObject::RawField(object,
|
||||
SharedFunctionInfo::kOptimizedCodeMapOffset),
|
||||
HeapObject::RawField(object, SharedFunctionInfo::kSize));
|
||||
SharedFunctionInfo::kOptimizedCodeMapOffset);
|
||||
Object** end_slot =
|
||||
HeapObject::RawField(object, SharedFunctionInfo::kSize);
|
||||
|
||||
VisitPointers(heap, start_slot, start_slot, end_slot);
|
||||
}
|
||||
|
||||
static VisitorDispatchTable<Callback> non_count_table_;
|
||||
};
|
||||
|
||||
|
||||
void MarkCompactMarkingVisitor::VisitHugeFixedArray(Heap* heap,
|
||||
FixedArray* array,
|
||||
int length) {
|
||||
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
|
||||
|
||||
ASSERT(chunk->owner()->identity() == LO_SPACE);
|
||||
|
||||
Object** start_slot = array->data_start();
|
||||
int from =
|
||||
chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0;
|
||||
int to = Min(from + kScanningChunk, length);
|
||||
VisitPointers(heap, start_slot, start_slot + from, start_slot + to);
|
||||
|
||||
if (to == length) {
|
||||
chunk->SetCompletelyScanned();
|
||||
} else {
|
||||
chunk->SetPartiallyScannedProgress(to);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
|
||||
FixedArrayBase* fixed_array,
|
||||
FixedArraySubInstanceType fast_type,
|
||||
@ -1645,6 +1699,9 @@ void MarkCompactMarkingVisitor::Initialize() {
|
||||
table_.Register(kVisitJSRegExp,
|
||||
&VisitRegExpAndFlushCode);
|
||||
|
||||
table_.Register(kVisitFixedArray,
|
||||
&VisitFixedArray);
|
||||
|
||||
if (FLAG_track_gc_object_stats) {
|
||||
// Copy the visitor table to make call-through possible.
|
||||
non_count_table_.CopyFrom(&table_);
|
||||
@ -1668,8 +1725,9 @@ class MarkingVisitor : public ObjectVisitor {
|
||||
MarkCompactMarkingVisitor::VisitPointer(heap_, p);
|
||||
}
|
||||
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
|
||||
void VisitPointers(Object** start_slot, Object** end_slot) {
|
||||
MarkCompactMarkingVisitor::VisitPointers(
|
||||
heap_, start_slot, start_slot, end_slot);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -1696,8 +1754,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
|
||||
explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
|
||||
: collector_(collector) {}
|
||||
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
for (Object** p = start; p < end; p++) VisitPointer(p);
|
||||
void VisitPointers(Object** start_slot, Object** end_slot) {
|
||||
for (Object** p = start_slot; p < end_slot; p++) VisitPointer(p);
|
||||
}
|
||||
|
||||
void VisitPointer(Object** slot) {
|
||||
@ -1808,8 +1866,8 @@ class RootMarkingVisitor : public ObjectVisitor {
|
||||
MarkObjectByPointer(p);
|
||||
}
|
||||
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
|
||||
void VisitPointers(Object** start_slot, Object** end_slot) {
|
||||
for (Object** p = start_slot; p < end_slot; p++) MarkObjectByPointer(p);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -1845,9 +1903,9 @@ class SymbolTableCleaner : public ObjectVisitor {
|
||||
explicit SymbolTableCleaner(Heap* heap)
|
||||
: heap_(heap), pointers_removed_(0) { }
|
||||
|
||||
virtual void VisitPointers(Object** start, Object** end) {
|
||||
// Visit all HeapObject pointers in [start, end).
|
||||
for (Object** p = start; p < end; p++) {
|
||||
virtual void VisitPointers(Object** start_slot, Object** end_slot) {
|
||||
// Visit all HeapObject pointers in [start_slot, end_slot).
|
||||
for (Object** p = start_slot; p < end_slot; p++) {
|
||||
Object* o = *p;
|
||||
if (o->IsHeapObject() &&
|
||||
!Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
|
||||
@ -2128,6 +2186,7 @@ void MarkCompactCollector::EmptyMarkingDeque() {
|
||||
|
||||
MarkCompactMarkingVisitor::IterateBody(map, object);
|
||||
}
|
||||
ProcessLargePostponedArrays(heap(), &marking_deque_);
|
||||
|
||||
// Process encountered weak maps, mark objects only reachable by those
|
||||
// weak maps and repeat until fix-point is reached.
|
||||
@ -2136,12 +2195,29 @@ void MarkCompactCollector::EmptyMarkingDeque() {
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ProcessLargePostponedArrays(Heap* heap,
|
||||
MarkingDeque* deque) {
|
||||
ASSERT(deque->IsEmpty());
|
||||
LargeObjectIterator it(heap->lo_space());
|
||||
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
|
||||
if (!obj->IsFixedArray()) continue;
|
||||
MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
|
||||
if (p->IsPartiallyScanned()) {
|
||||
deque->PushBlack(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Sweep the heap for overflowed objects, clear their overflow bits, and
|
||||
// push them on the marking stack. Stop early if the marking stack fills
|
||||
// before sweeping completes. If sweeping completes, there are no remaining
|
||||
// overflowed objects in the heap so the overflow flag on the markings stack
|
||||
// is cleared.
|
||||
void MarkCompactCollector::RefillMarkingDeque() {
|
||||
if (FLAG_trace_gc) {
|
||||
PrintPID("Marking queue overflowed\n");
|
||||
}
|
||||
ASSERT(marking_deque_.overflowed());
|
||||
|
||||
SemiSpaceIterator new_it(heap()->new_space());
|
||||
@ -2632,8 +2708,8 @@ class PointersUpdatingVisitor: public ObjectVisitor {
|
||||
UpdatePointer(p);
|
||||
}
|
||||
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
for (Object** p = start; p < end; p++) UpdatePointer(p);
|
||||
void VisitPointers(Object** start_slot, Object** end_slot) {
|
||||
for (Object** p = start_slot; p < end_slot; p++) UpdatePointer(p);
|
||||
}
|
||||
|
||||
void VisitEmbeddedPointer(RelocInfo* rinfo) {
|
||||
|
@ -240,6 +240,35 @@ class MarkingDeque {
|
||||
int mask() { return mask_; }
|
||||
void set_top(int top) { top_ = top; }
|
||||
|
||||
int space_left() {
|
||||
// If we already overflowed we may as well just say there is lots of
|
||||
// space left.
|
||||
if (overflowed_) return mask_ + 1;
|
||||
if (IsEmpty()) return mask_ + 1;
|
||||
if (IsFull()) return 0;
|
||||
return (bottom_ - top_) & mask_;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
const char* Status() {
|
||||
if (overflowed_) return "Overflowed";
|
||||
if (IsEmpty()) return "Empty";
|
||||
if (IsFull()) return "Full";
|
||||
int oct = (((top_ - bottom_) & mask_) * 8) / (mask_ + 1);
|
||||
switch (oct) {
|
||||
case 0: return "Almost empty";
|
||||
case 1: return "1/8 full";
|
||||
case 2: return "2/8 full";
|
||||
case 3: return "3/8 full";
|
||||
case 4: return "4/8 full";
|
||||
case 5: return "5/8 full";
|
||||
case 6: return "6/8 full";
|
||||
case 7: return "7/8 full";
|
||||
}
|
||||
return "??";
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
HeapObject** array_;
|
||||
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
|
||||
@ -566,6 +595,10 @@ class MarkCompactCollector {
|
||||
|
||||
bool is_compacting() const { return compacting_; }
|
||||
|
||||
// Find the large objects that are not completely scanned, but have been
|
||||
// postponed to later.
|
||||
static void ProcessLargePostponedArrays(Heap* heap, MarkingDeque* deque);
|
||||
|
||||
private:
|
||||
MarkCompactCollector();
|
||||
~MarkCompactCollector();
|
||||
|
@ -262,9 +262,11 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(
|
||||
map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
|
||||
MarkMapContents(heap, map_object);
|
||||
} else {
|
||||
StaticVisitor::VisitPointers(heap,
|
||||
HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
|
||||
HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
|
||||
Object** start_slot =
|
||||
HeapObject::RawField(object, Map::kPointerFieldsBeginOffset);
|
||||
Object** end_slot =
|
||||
HeapObject::RawField(object, Map::kPointerFieldsEndOffset);
|
||||
StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
|
||||
}
|
||||
}
|
||||
|
||||
@ -286,9 +288,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
|
||||
Map* map, HeapObject* object) {
|
||||
int last_property_offset =
|
||||
JSRegExp::kSize + kPointerSize * map->inobject_properties();
|
||||
StaticVisitor::VisitPointers(map->GetHeap(),
|
||||
HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
|
||||
HeapObject::RawField(object, last_property_offset));
|
||||
Object** start_slot =
|
||||
HeapObject::RawField(object, JSRegExp::kPropertiesOffset);
|
||||
Object** end_slot =
|
||||
HeapObject::RawField(object, last_property_offset);
|
||||
StaticVisitor::VisitPointers(
|
||||
map->GetHeap(), start_slot, start_slot, end_slot);
|
||||
}
|
||||
|
||||
|
||||
@ -315,9 +320,11 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
|
||||
// Mark the pointer fields of the Map. Since the transitions array has
|
||||
// been marked already, it is fine that one of these fields contains a
|
||||
// pointer to it.
|
||||
StaticVisitor::VisitPointers(heap,
|
||||
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
|
||||
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
|
||||
Object** start_slot =
|
||||
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset);
|
||||
Object** end_slot =
|
||||
HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
|
||||
StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
|
||||
}
|
||||
|
||||
|
||||
|
@ -213,7 +213,7 @@ class BodyVisitorBase : public AllStatic {
|
||||
start_offset);
|
||||
Object** end_slot = reinterpret_cast<Object**>(object->address() +
|
||||
end_offset);
|
||||
StaticVisitor::VisitPointers(heap, start_slot, end_slot);
|
||||
StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
|
||||
}
|
||||
};
|
||||
|
||||
@ -283,21 +283,26 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
|
||||
return table_.GetVisitor(map)(map, obj);
|
||||
}
|
||||
|
||||
static inline void VisitPointers(Heap* heap, Object** start, Object** end) {
|
||||
static inline void VisitPointers(
|
||||
Heap* heap, Object** anchor, Object** start, Object** end) {
|
||||
for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
|
||||
}
|
||||
|
||||
private:
|
||||
static inline int VisitJSFunction(Map* map, HeapObject* object) {
|
||||
Heap* heap = map->GetHeap();
|
||||
Object** start_slot =
|
||||
HeapObject::RawField(object, JSFunction::kPropertiesOffset);
|
||||
VisitPointers(heap,
|
||||
HeapObject::RawField(object, JSFunction::kPropertiesOffset),
|
||||
start_slot,
|
||||
start_slot,
|
||||
HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
|
||||
|
||||
// Don't visit code entry. We are using this visitor only during scavenges.
|
||||
|
||||
VisitPointers(
|
||||
heap,
|
||||
start_slot,
|
||||
HeapObject::RawField(object,
|
||||
JSFunction::kCodeEntryOffset + kPointerSize),
|
||||
HeapObject::RawField(object,
|
||||
|
@ -2679,12 +2679,10 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
|
||||
|
||||
HeapObject* object = page->GetObject();
|
||||
|
||||
#ifdef DEBUG
|
||||
// Make the object consistent so the heap can be vefified in OldSpaceStep.
|
||||
// Make the object consistent so the large object space can be traversed.
|
||||
reinterpret_cast<Object**>(object->address())[0] =
|
||||
heap()->fixed_array_map();
|
||||
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
|
||||
#endif
|
||||
|
||||
heap()->incremental_marking()->OldSpaceStep(object_size);
|
||||
return object;
|
||||
|
37
src/spaces.h
37
src/spaces.h
@ -400,6 +400,15 @@ class MemoryChunk {
|
||||
WAS_SWEPT_PRECISELY,
|
||||
WAS_SWEPT_CONSERVATIVELY,
|
||||
|
||||
// Used for large objects only. Indicates that the object has been
|
||||
// partially scanned by the incremental mark-sweep GC. Objects that have
|
||||
// been partially scanned are marked black so that the write barrier
|
||||
// triggers for them, and they are counted as live bytes. If the mutator
|
||||
// writes to them they may be turned grey and subtracted from the live byte
|
||||
// list. They move back to the marking deque either by an iteration over
|
||||
// the large object space or in the write barrier.
|
||||
IS_PARTIALLY_SCANNED,
|
||||
|
||||
// Last flag, keep at bottom.
|
||||
NUM_MEMORY_CHUNK_FLAGS
|
||||
};
|
||||
@ -420,6 +429,25 @@ class MemoryChunk {
|
||||
(1 << IN_FROM_SPACE) |
|
||||
(1 << IN_TO_SPACE);
|
||||
|
||||
static const int kIsPartiallyScannedMask = 1 << IS_PARTIALLY_SCANNED;
|
||||
|
||||
void SetPartiallyScannedProgress(int progress) {
|
||||
SetFlag(IS_PARTIALLY_SCANNED);
|
||||
partially_scanned_progress_ = progress;
|
||||
}
|
||||
|
||||
bool IsPartiallyScanned() {
|
||||
return IsFlagSet(IS_PARTIALLY_SCANNED);
|
||||
}
|
||||
|
||||
void SetCompletelyScanned() {
|
||||
ClearFlag(IS_PARTIALLY_SCANNED);
|
||||
}
|
||||
|
||||
int PartiallyScannedProgress() {
|
||||
ASSERT(IsPartiallyScanned());
|
||||
return partially_scanned_progress_;
|
||||
}
|
||||
|
||||
void SetFlag(int flag) {
|
||||
flags_ |= static_cast<uintptr_t>(1) << flag;
|
||||
@ -506,8 +534,14 @@ class MemoryChunk {
|
||||
|
||||
static const size_t kWriteBarrierCounterOffset =
|
||||
kSlotsBufferOffset + kPointerSize + kPointerSize;
|
||||
static const size_t kPartiallyScannedProgress =
|
||||
kWriteBarrierCounterOffset + kPointerSize;
|
||||
|
||||
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize;
|
||||
// Actually the partially_scanned_progress_ member is only an int, but on
|
||||
// 64 bit the size of MemoryChunk gets rounded up to a 64 bit size so we
|
||||
// have to have the header start kPointerSize after the
|
||||
// partially_scanned_progress_ member.
|
||||
static const size_t kHeaderSize = kPartiallyScannedProgress + kPointerSize;
|
||||
|
||||
static const int kBodyOffset =
|
||||
CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
|
||||
@ -644,6 +678,7 @@ class MemoryChunk {
|
||||
SlotsBuffer* slots_buffer_;
|
||||
SkipList* skip_list_;
|
||||
intptr_t write_barrier_counter_;
|
||||
int partially_scanned_progress_;
|
||||
|
||||
static MemoryChunk* Initialize(Heap* heap,
|
||||
Address base,
|
||||
|
Loading…
Reference in New Issue
Block a user