Remove conservative sweeping.
BUG= R=jarin@chromium.org Review URL: https://codereview.chromium.org/479113004 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23283 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
53fdf75be1
commit
65c9c2a2dd
@ -526,7 +526,6 @@ DEFINE_BOOL(trace_incremental_marking, false,
|
|||||||
"trace progress of the incremental marking")
|
"trace progress of the incremental marking")
|
||||||
DEFINE_BOOL(track_gc_object_stats, false,
|
DEFINE_BOOL(track_gc_object_stats, false,
|
||||||
"track object counts and memory usage")
|
"track object counts and memory usage")
|
||||||
DEFINE_BOOL(always_precise_sweeping, true, "always sweep precisely")
|
|
||||||
DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
|
DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
|
||||||
DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
|
DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
|
||||||
DEFINE_INT(sweeper_threads, 0,
|
DEFINE_INT(sweeper_threads, 0,
|
||||||
|
@ -2580,15 +2580,6 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
|
|||||||
|
|
||||||
#ifdef VERIFY_HEAP
|
#ifdef VERIFY_HEAP
|
||||||
Heap* debug_heap = heap_;
|
Heap* debug_heap = heap_;
|
||||||
CHECK(debug_heap->old_data_space()->swept_precisely());
|
|
||||||
CHECK(debug_heap->old_pointer_space()->swept_precisely());
|
|
||||||
CHECK(debug_heap->code_space()->swept_precisely());
|
|
||||||
CHECK(debug_heap->cell_space()->swept_precisely());
|
|
||||||
CHECK(debug_heap->property_cell_space()->swept_precisely());
|
|
||||||
CHECK(debug_heap->map_space()->swept_precisely());
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef VERIFY_HEAP
|
|
||||||
debug_heap->Verify();
|
debug_heap->Verify();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1273,14 +1273,10 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
|
|||||||
object = code_it.Next())
|
object = code_it.Next())
|
||||||
object->Iterate(&v);
|
object->Iterate(&v);
|
||||||
|
|
||||||
// The old data space was normally swept conservatively so that the iterator
|
|
||||||
// doesn't work, so we normally skip the next bit.
|
|
||||||
if (heap->old_data_space()->swept_precisely()) {
|
|
||||||
HeapObjectIterator data_it(heap->old_data_space());
|
HeapObjectIterator data_it(heap->old_data_space());
|
||||||
for (HeapObject* object = data_it.Next(); object != NULL;
|
for (HeapObject* object = data_it.Next(); object != NULL;
|
||||||
object = data_it.Next())
|
object = data_it.Next())
|
||||||
object->Iterate(&v);
|
object->Iterate(&v);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif // VERIFY_HEAP
|
#endif // VERIFY_HEAP
|
||||||
|
|
||||||
@ -4242,9 +4238,7 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
|
|||||||
bool Heap::IsHeapIterable() {
|
bool Heap::IsHeapIterable() {
|
||||||
// TODO(hpayer): This function is not correct. Allocation folding in old
|
// TODO(hpayer): This function is not correct. Allocation folding in old
|
||||||
// space breaks the iterability.
|
// space breaks the iterability.
|
||||||
return (old_pointer_space()->swept_precisely() &&
|
return new_space_top_after_last_gc_ == new_space()->top();
|
||||||
old_data_space()->swept_precisely() &&
|
|
||||||
new_space_top_after_last_gc_ == new_space()->top());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -715,14 +715,11 @@ class Heap {
|
|||||||
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
|
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
|
||||||
|
|
||||||
static const int kNoGCFlags = 0;
|
static const int kNoGCFlags = 0;
|
||||||
static const int kSweepPreciselyMask = 1;
|
static const int kReduceMemoryFootprintMask = 1;
|
||||||
static const int kReduceMemoryFootprintMask = 2;
|
static const int kAbortIncrementalMarkingMask = 2;
|
||||||
static const int kAbortIncrementalMarkingMask = 4;
|
|
||||||
|
|
||||||
// Making the heap iterable requires us to sweep precisely and abort any
|
// Making the heap iterable requires us to abort incremental marking.
|
||||||
// incremental marking as well.
|
static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
|
||||||
static const int kMakeHeapIterableMask =
|
|
||||||
kSweepPreciselyMask | kAbortIncrementalMarkingMask;
|
|
||||||
|
|
||||||
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
|
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
|
||||||
// non-zero, then the slower precise sweeper is used, which leaves the heap
|
// non-zero, then the slower precise sweeper is used, which leaves the heap
|
||||||
|
@ -23,7 +23,6 @@ MarkBit Marking::MarkBitFrom(Address addr) {
|
|||||||
|
|
||||||
|
|
||||||
void MarkCompactCollector::SetFlags(int flags) {
|
void MarkCompactCollector::SetFlags(int flags) {
|
||||||
sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
|
|
||||||
reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
|
reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
|
||||||
abort_incremental_marking_ =
|
abort_incremental_marking_ =
|
||||||
((flags & Heap::kAbortIncrementalMarkingMask) != 0);
|
((flags & Heap::kAbortIncrementalMarkingMask) != 0);
|
||||||
|
@ -41,7 +41,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
|
|||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
state_(IDLE),
|
state_(IDLE),
|
||||||
#endif
|
#endif
|
||||||
sweep_precisely_(false),
|
|
||||||
reduce_memory_footprint_(false),
|
reduce_memory_footprint_(false),
|
||||||
abort_incremental_marking_(false),
|
abort_incremental_marking_(false),
|
||||||
marking_parity_(ODD_MARKING_PARITY),
|
marking_parity_(ODD_MARKING_PARITY),
|
||||||
@ -200,7 +199,6 @@ static void VerifyEvacuation(NewSpace* space) {
|
|||||||
|
|
||||||
|
|
||||||
static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
|
static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
|
||||||
if (!space->swept_precisely()) return;
|
|
||||||
if (FLAG_use_allocation_folding &&
|
if (FLAG_use_allocation_folding &&
|
||||||
(space == heap->old_pointer_space() || space == heap->old_data_space())) {
|
(space == heap->old_pointer_space() || space == heap->old_data_space())) {
|
||||||
return;
|
return;
|
||||||
@ -3126,7 +3124,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
|
|||||||
AlwaysAllocateScope always_allocate(isolate());
|
AlwaysAllocateScope always_allocate(isolate());
|
||||||
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
||||||
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
|
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
|
||||||
p->MarkSweptPrecisely();
|
p->SetWasSwept();
|
||||||
|
|
||||||
int offsets[16];
|
int offsets[16];
|
||||||
|
|
||||||
@ -3290,10 +3288,7 @@ static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Sweep a space precisely. After this has been done the space can
|
// Sweeps a page. After sweeping the page can be iterated.
|
||||||
// be iterated precisely, hitting only the live objects. Code space
|
|
||||||
// is always swept precisely because we want to be able to iterate
|
|
||||||
// over it. Map space is swept precisely, because it is not compacted.
|
|
||||||
// Slots in live objects pointing into evacuation candidates are updated
|
// Slots in live objects pointing into evacuation candidates are updated
|
||||||
// if requested.
|
// if requested.
|
||||||
// Returns the size of the biggest continuous freed memory chunk in bytes.
|
// Returns the size of the biggest continuous freed memory chunk in bytes.
|
||||||
@ -3301,8 +3296,8 @@ template <SweepingMode sweeping_mode,
|
|||||||
MarkCompactCollector::SweepingParallelism parallelism,
|
MarkCompactCollector::SweepingParallelism parallelism,
|
||||||
SkipListRebuildingMode skip_list_mode,
|
SkipListRebuildingMode skip_list_mode,
|
||||||
FreeSpaceTreatmentMode free_space_mode>
|
FreeSpaceTreatmentMode free_space_mode>
|
||||||
static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
|
static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
|
||||||
ObjectVisitor* v) {
|
ObjectVisitor* v) {
|
||||||
DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
|
DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
|
||||||
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
|
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
|
||||||
space->identity() == CODE_SPACE);
|
space->identity() == CODE_SPACE);
|
||||||
@ -3384,7 +3379,7 @@ static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
|
|||||||
// sweeping by the main thread.
|
// sweeping by the main thread.
|
||||||
p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
|
p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
|
||||||
} else {
|
} else {
|
||||||
p->MarkSweptPrecisely();
|
p->SetWasSwept();
|
||||||
}
|
}
|
||||||
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
||||||
}
|
}
|
||||||
@ -3621,22 +3616,24 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
|||||||
|
|
||||||
switch (space->identity()) {
|
switch (space->identity()) {
|
||||||
case OLD_DATA_SPACE:
|
case OLD_DATA_SPACE:
|
||||||
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
|
Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
||||||
|
IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
|
||||||
|
&updating_visitor);
|
||||||
break;
|
break;
|
||||||
case OLD_POINTER_SPACE:
|
case OLD_POINTER_SPACE:
|
||||||
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
||||||
IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
|
IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
|
||||||
space, NULL, p, &updating_visitor);
|
&updating_visitor);
|
||||||
break;
|
break;
|
||||||
case CODE_SPACE:
|
case CODE_SPACE:
|
||||||
if (FLAG_zap_code_space) {
|
if (FLAG_zap_code_space) {
|
||||||
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
||||||
REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
|
REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
|
||||||
space, NULL, p, &updating_visitor);
|
&updating_visitor);
|
||||||
} else {
|
} else {
|
||||||
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
||||||
REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
|
REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
|
||||||
space, NULL, p, &updating_visitor);
|
&updating_visitor);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -4119,182 +4116,6 @@ static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline Address DigestFreeStart(Address approximate_free_start,
|
|
||||||
uint32_t free_start_cell) {
|
|
||||||
DCHECK(free_start_cell != 0);
|
|
||||||
|
|
||||||
// No consecutive 1 bits.
|
|
||||||
DCHECK((free_start_cell & (free_start_cell << 1)) == 0);
|
|
||||||
|
|
||||||
int offsets[16];
|
|
||||||
uint32_t cell = free_start_cell;
|
|
||||||
int offset_of_last_live;
|
|
||||||
if ((cell & 0x80000000u) != 0) {
|
|
||||||
// This case would overflow below.
|
|
||||||
offset_of_last_live = 31;
|
|
||||||
} else {
|
|
||||||
// Remove all but one bit, the most significant. This is an optimization
|
|
||||||
// that may or may not be worthwhile.
|
|
||||||
cell |= cell >> 16;
|
|
||||||
cell |= cell >> 8;
|
|
||||||
cell |= cell >> 4;
|
|
||||||
cell |= cell >> 2;
|
|
||||||
cell |= cell >> 1;
|
|
||||||
cell = (cell + 1) >> 1;
|
|
||||||
int live_objects = MarkWordToObjectStarts(cell, offsets);
|
|
||||||
DCHECK(live_objects == 1);
|
|
||||||
offset_of_last_live = offsets[live_objects - 1];
|
|
||||||
}
|
|
||||||
Address last_live_start =
|
|
||||||
approximate_free_start + offset_of_last_live * kPointerSize;
|
|
||||||
HeapObject* last_live = HeapObject::FromAddress(last_live_start);
|
|
||||||
Address free_start = last_live_start + last_live->Size();
|
|
||||||
return free_start;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
|
|
||||||
DCHECK(cell != 0);
|
|
||||||
|
|
||||||
// No consecutive 1 bits.
|
|
||||||
DCHECK((cell & (cell << 1)) == 0);
|
|
||||||
|
|
||||||
int offsets[16];
|
|
||||||
if (cell == 0x80000000u) { // Avoid overflow below.
|
|
||||||
return block_address + 31 * kPointerSize;
|
|
||||||
}
|
|
||||||
uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
|
|
||||||
DCHECK((first_set_bit & cell) == first_set_bit);
|
|
||||||
int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
|
|
||||||
DCHECK(live_objects == 1);
|
|
||||||
USE(live_objects);
|
|
||||||
return block_address + offsets[0] * kPointerSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Force instantiation of templatized SweepConservatively method for
|
|
||||||
// SWEEP_ON_MAIN_THREAD mode.
|
|
||||||
template int MarkCompactCollector::SweepConservatively<
|
|
||||||
MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*);
|
|
||||||
|
|
||||||
|
|
||||||
// Force instantiation of templatized SweepConservatively method for
|
|
||||||
// SWEEP_IN_PARALLEL mode.
|
|
||||||
template int MarkCompactCollector::SweepConservatively<
|
|
||||||
MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*);
|
|
||||||
|
|
||||||
|
|
||||||
// Sweeps a space conservatively. After this has been done the larger free
|
|
||||||
// spaces have been put on the free list and the smaller ones have been
|
|
||||||
// ignored and left untouched. A free space is always either ignored or put
|
|
||||||
// on the free list, never split up into two parts. This is important
|
|
||||||
// because it means that any FreeSpace maps left actually describe a region of
|
|
||||||
// memory that can be ignored when scanning. Dead objects other than free
|
|
||||||
// spaces will not contain the free space map.
|
|
||||||
template <MarkCompactCollector::SweepingParallelism mode>
|
|
||||||
int MarkCompactCollector::SweepConservatively(PagedSpace* space,
|
|
||||||
FreeList* free_list, Page* p) {
|
|
||||||
DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
|
|
||||||
DCHECK(
|
|
||||||
(mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) ||
|
|
||||||
(mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
|
|
||||||
free_list == NULL));
|
|
||||||
|
|
||||||
intptr_t freed_bytes = 0;
|
|
||||||
intptr_t max_freed_bytes = 0;
|
|
||||||
size_t size = 0;
|
|
||||||
|
|
||||||
// Skip over all the dead objects at the start of the page and mark them free.
|
|
||||||
Address cell_base = 0;
|
|
||||||
MarkBit::CellType* cell = NULL;
|
|
||||||
MarkBitCellIterator it(p);
|
|
||||||
for (; !it.Done(); it.Advance()) {
|
|
||||||
cell_base = it.CurrentCellBase();
|
|
||||||
cell = it.CurrentCell();
|
|
||||||
if (*cell != 0) break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it.Done()) {
|
|
||||||
size = p->area_end() - p->area_start();
|
|
||||||
freed_bytes =
|
|
||||||
Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
|
|
||||||
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
|
||||||
DCHECK_EQ(0, p->LiveBytes());
|
|
||||||
if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
|
|
||||||
// When concurrent sweeping is active, the page will be marked after
|
|
||||||
// sweeping by the main thread.
|
|
||||||
p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
|
|
||||||
} else {
|
|
||||||
p->MarkSweptConservatively();
|
|
||||||
}
|
|
||||||
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Grow the size of the start-of-page free space a little to get up to the
|
|
||||||
// first live object.
|
|
||||||
Address free_end = StartOfLiveObject(cell_base, *cell);
|
|
||||||
// Free the first free space.
|
|
||||||
size = free_end - p->area_start();
|
|
||||||
freed_bytes =
|
|
||||||
Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
|
|
||||||
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
|
||||||
|
|
||||||
// The start of the current free area is represented in undigested form by
|
|
||||||
// the address of the last 32-word section that contained a live object and
|
|
||||||
// the marking bitmap for that cell, which describes where the live object
|
|
||||||
// started. Unless we find a large free space in the bitmap we will not
|
|
||||||
// digest this pair into a real address. We start the iteration here at the
|
|
||||||
// first word in the marking bit map that indicates a live object.
|
|
||||||
Address free_start = cell_base;
|
|
||||||
MarkBit::CellType free_start_cell = *cell;
|
|
||||||
|
|
||||||
for (; !it.Done(); it.Advance()) {
|
|
||||||
cell_base = it.CurrentCellBase();
|
|
||||||
cell = it.CurrentCell();
|
|
||||||
if (*cell != 0) {
|
|
||||||
// We have a live object. Check approximately whether it is more than 32
|
|
||||||
// words since the last live object.
|
|
||||||
if (cell_base - free_start > 32 * kPointerSize) {
|
|
||||||
free_start = DigestFreeStart(free_start, free_start_cell);
|
|
||||||
if (cell_base - free_start > 32 * kPointerSize) {
|
|
||||||
// Now that we know the exact start of the free space it still looks
|
|
||||||
// like we have a large enough free space to be worth bothering with.
|
|
||||||
// so now we need to find the start of the first live object at the
|
|
||||||
// end of the free space.
|
|
||||||
free_end = StartOfLiveObject(cell_base, *cell);
|
|
||||||
freed_bytes = Free<mode>(space, free_list, free_start,
|
|
||||||
static_cast<int>(free_end - free_start));
|
|
||||||
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Update our undigested record of where the current free area started.
|
|
||||||
free_start = cell_base;
|
|
||||||
free_start_cell = *cell;
|
|
||||||
// Clear marking bits for current cell.
|
|
||||||
*cell = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle the free space at the end of the page.
|
|
||||||
if (cell_base - free_start > 32 * kPointerSize) {
|
|
||||||
free_start = DigestFreeStart(free_start, free_start_cell);
|
|
||||||
freed_bytes = Free<mode>(space, free_list, free_start,
|
|
||||||
static_cast<int>(p->area_end() - free_start));
|
|
||||||
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
p->ResetLiveBytes();
|
|
||||||
if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
|
|
||||||
// When concurrent sweeping is active, the page will be marked after
|
|
||||||
// sweeping by the main thread.
|
|
||||||
p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
|
|
||||||
} else {
|
|
||||||
p->MarkSweptConservatively();
|
|
||||||
}
|
|
||||||
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
|
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
|
||||||
int required_freed_bytes) {
|
int required_freed_bytes) {
|
||||||
int max_freed = 0;
|
int max_freed = 0;
|
||||||
@ -4321,14 +4142,8 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
|
|||||||
? free_list_old_pointer_space_.get()
|
? free_list_old_pointer_space_.get()
|
||||||
: free_list_old_data_space_.get();
|
: free_list_old_data_space_.get();
|
||||||
FreeList private_free_list(space);
|
FreeList private_free_list(space);
|
||||||
if (space->swept_precisely()) {
|
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
|
||||||
max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL,
|
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
|
||||||
IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
|
|
||||||
space, &private_free_list, page, NULL);
|
|
||||||
} else {
|
|
||||||
max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
|
|
||||||
space, &private_free_list, page);
|
|
||||||
}
|
|
||||||
free_list->Concatenate(&private_free_list);
|
free_list->Concatenate(&private_free_list);
|
||||||
}
|
}
|
||||||
return max_freed;
|
return max_freed;
|
||||||
@ -4336,9 +4151,6 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
|
|||||||
|
|
||||||
|
|
||||||
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
||||||
space->set_swept_precisely(sweeper == PRECISE ||
|
|
||||||
sweeper == CONCURRENT_PRECISE ||
|
|
||||||
sweeper == PARALLEL_PRECISE);
|
|
||||||
space->ClearStats();
|
space->ClearStats();
|
||||||
|
|
||||||
// We defensively initialize end_of_unswept_pages_ here with the first page
|
// We defensively initialize end_of_unswept_pages_ here with the first page
|
||||||
@ -4356,8 +4168,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
|||||||
DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
|
DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
|
||||||
|
|
||||||
// Clear sweeping flags indicating that marking bits are still intact.
|
// Clear sweeping flags indicating that marking bits are still intact.
|
||||||
p->ClearSweptPrecisely();
|
p->ClearWasSwept();
|
||||||
p->ClearSweptConservatively();
|
|
||||||
|
|
||||||
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
|
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
|
||||||
p->IsEvacuationCandidate()) {
|
p->IsEvacuationCandidate()) {
|
||||||
@ -4383,19 +4194,20 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch (sweeper) {
|
switch (sweeper) {
|
||||||
case CONCURRENT_CONSERVATIVE:
|
case CONCURRENT_SWEEPING:
|
||||||
case PARALLEL_CONSERVATIVE: {
|
case PARALLEL_SWEEPING:
|
||||||
if (!parallel_sweeping_active) {
|
if (!parallel_sweeping_active) {
|
||||||
if (FLAG_gc_verbose) {
|
if (FLAG_gc_verbose) {
|
||||||
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
|
PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
|
||||||
reinterpret_cast<intptr_t>(p));
|
reinterpret_cast<intptr_t>(p));
|
||||||
}
|
}
|
||||||
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
|
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
|
||||||
|
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
|
||||||
pages_swept++;
|
pages_swept++;
|
||||||
parallel_sweeping_active = true;
|
parallel_sweeping_active = true;
|
||||||
} else {
|
} else {
|
||||||
if (FLAG_gc_verbose) {
|
if (FLAG_gc_verbose) {
|
||||||
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
|
PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
|
||||||
reinterpret_cast<intptr_t>(p));
|
reinterpret_cast<intptr_t>(p));
|
||||||
}
|
}
|
||||||
p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
|
p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
|
||||||
@ -4403,42 +4215,19 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
|||||||
}
|
}
|
||||||
space->set_end_of_unswept_pages(p);
|
space->set_end_of_unswept_pages(p);
|
||||||
break;
|
break;
|
||||||
}
|
case SEQUENTIAL_SWEEPING: {
|
||||||
case CONCURRENT_PRECISE:
|
|
||||||
case PARALLEL_PRECISE:
|
|
||||||
if (!parallel_sweeping_active) {
|
|
||||||
if (FLAG_gc_verbose) {
|
|
||||||
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
|
|
||||||
reinterpret_cast<intptr_t>(p));
|
|
||||||
}
|
|
||||||
SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
|
|
||||||
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
|
|
||||||
pages_swept++;
|
|
||||||
parallel_sweeping_active = true;
|
|
||||||
} else {
|
|
||||||
if (FLAG_gc_verbose) {
|
|
||||||
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
|
|
||||||
reinterpret_cast<intptr_t>(p));
|
|
||||||
}
|
|
||||||
p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
|
|
||||||
space->IncreaseUnsweptFreeBytes(p);
|
|
||||||
}
|
|
||||||
space->set_end_of_unswept_pages(p);
|
|
||||||
break;
|
|
||||||
case PRECISE: {
|
|
||||||
if (FLAG_gc_verbose) {
|
if (FLAG_gc_verbose) {
|
||||||
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
|
PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
|
||||||
reinterpret_cast<intptr_t>(p));
|
|
||||||
}
|
}
|
||||||
if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
|
if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
|
||||||
SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
|
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
|
||||||
ZAP_FREE_SPACE>(space, NULL, p, NULL);
|
ZAP_FREE_SPACE>(space, NULL, p, NULL);
|
||||||
} else if (space->identity() == CODE_SPACE) {
|
} else if (space->identity() == CODE_SPACE) {
|
||||||
SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
|
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
|
||||||
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
|
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
|
||||||
} else {
|
} else {
|
||||||
SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
|
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
|
||||||
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
|
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
|
||||||
}
|
}
|
||||||
pages_swept++;
|
pages_swept++;
|
||||||
break;
|
break;
|
||||||
@ -4458,17 +4247,14 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
|||||||
|
|
||||||
|
|
||||||
static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
|
static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
|
||||||
return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
|
return type == MarkCompactCollector::PARALLEL_SWEEPING ||
|
||||||
type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
|
type == MarkCompactCollector::CONCURRENT_SWEEPING;
|
||||||
type == MarkCompactCollector::PARALLEL_PRECISE ||
|
|
||||||
type == MarkCompactCollector::CONCURRENT_PRECISE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static bool ShouldWaitForSweeperThreads(
|
static bool ShouldWaitForSweeperThreads(
|
||||||
MarkCompactCollector::SweeperType type) {
|
MarkCompactCollector::SweeperType type) {
|
||||||
return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
|
return type == MarkCompactCollector::PARALLEL_SWEEPING;
|
||||||
type == MarkCompactCollector::PARALLEL_PRECISE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -4482,16 +4268,9 @@ void MarkCompactCollector::SweepSpaces() {
|
|||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
state_ = SWEEP_SPACES;
|
state_ = SWEEP_SPACES;
|
||||||
#endif
|
#endif
|
||||||
SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
|
SweeperType how_to_sweep = CONCURRENT_SWEEPING;
|
||||||
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
|
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING;
|
||||||
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
|
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING;
|
||||||
if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
|
|
||||||
how_to_sweep = PARALLEL_PRECISE;
|
|
||||||
}
|
|
||||||
if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
|
|
||||||
how_to_sweep = CONCURRENT_PRECISE;
|
|
||||||
}
|
|
||||||
if (sweep_precisely_) how_to_sweep = PRECISE;
|
|
||||||
|
|
||||||
MoveEvacuationCandidatesToEndOfPagesList();
|
MoveEvacuationCandidatesToEndOfPagesList();
|
||||||
|
|
||||||
@ -4522,14 +4301,14 @@ void MarkCompactCollector::SweepSpaces() {
|
|||||||
{
|
{
|
||||||
GCTracer::Scope sweep_scope(heap()->tracer(),
|
GCTracer::Scope sweep_scope(heap()->tracer(),
|
||||||
GCTracer::Scope::MC_SWEEP_CODE);
|
GCTracer::Scope::MC_SWEEP_CODE);
|
||||||
SweepSpace(heap()->code_space(), PRECISE);
|
SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
GCTracer::Scope sweep_scope(heap()->tracer(),
|
GCTracer::Scope sweep_scope(heap()->tracer(),
|
||||||
GCTracer::Scope::MC_SWEEP_CELL);
|
GCTracer::Scope::MC_SWEEP_CELL);
|
||||||
SweepSpace(heap()->cell_space(), PRECISE);
|
SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
|
||||||
SweepSpace(heap()->property_cell_space(), PRECISE);
|
SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
|
||||||
}
|
}
|
||||||
|
|
||||||
EvacuateNewSpaceAndCandidates();
|
EvacuateNewSpaceAndCandidates();
|
||||||
@ -4540,7 +4319,7 @@ void MarkCompactCollector::SweepSpaces() {
|
|||||||
{
|
{
|
||||||
GCTracer::Scope sweep_scope(heap()->tracer(),
|
GCTracer::Scope sweep_scope(heap()->tracer(),
|
||||||
GCTracer::Scope::MC_SWEEP_MAP);
|
GCTracer::Scope::MC_SWEEP_MAP);
|
||||||
SweepSpace(heap()->map_space(), PRECISE);
|
SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deallocate unmarked objects and clear marked bits for marked objects.
|
// Deallocate unmarked objects and clear marked bits for marked objects.
|
||||||
@ -4562,11 +4341,7 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
|
|||||||
Page* p = it.next();
|
Page* p = it.next();
|
||||||
if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
|
if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
|
||||||
p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
|
p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
|
||||||
if (space->swept_precisely()) {
|
p->SetWasSwept();
|
||||||
p->MarkSweptPrecisely();
|
|
||||||
} else {
|
|
||||||
p->MarkSweptConservatively();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
|
DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
|
||||||
}
|
}
|
||||||
|
@ -544,11 +544,9 @@ class MarkCompactCollector {
|
|||||||
void EnableCodeFlushing(bool enable);
|
void EnableCodeFlushing(bool enable);
|
||||||
|
|
||||||
enum SweeperType {
|
enum SweeperType {
|
||||||
PARALLEL_CONSERVATIVE,
|
PARALLEL_SWEEPING,
|
||||||
CONCURRENT_CONSERVATIVE,
|
CONCURRENT_SWEEPING,
|
||||||
PARALLEL_PRECISE,
|
SEQUENTIAL_SWEEPING
|
||||||
CONCURRENT_PRECISE,
|
|
||||||
PRECISE
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
|
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
|
||||||
@ -561,12 +559,6 @@ class MarkCompactCollector {
|
|||||||
void VerifyOmittedMapChecks();
|
void VerifyOmittedMapChecks();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Sweep a single page from the given space conservatively.
|
|
||||||
// Returns the size of the biggest continuous freed memory chunk in bytes.
|
|
||||||
template <SweepingParallelism type>
|
|
||||||
static int SweepConservatively(PagedSpace* space, FreeList* free_list,
|
|
||||||
Page* p);
|
|
||||||
|
|
||||||
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
|
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
|
||||||
return Page::FromAddress(reinterpret_cast<Address>(anchor))
|
return Page::FromAddress(reinterpret_cast<Address>(anchor))
|
||||||
->ShouldSkipEvacuationSlotRecording();
|
->ShouldSkipEvacuationSlotRecording();
|
||||||
@ -693,10 +685,6 @@ class MarkCompactCollector {
|
|||||||
CollectorState state_;
|
CollectorState state_;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Global flag that forces sweeping to be precise, so we can traverse the
|
|
||||||
// heap.
|
|
||||||
bool sweep_precisely_;
|
|
||||||
|
|
||||||
bool reduce_memory_footprint_;
|
bool reduce_memory_footprint_;
|
||||||
|
|
||||||
bool abort_incremental_marking_;
|
bool abort_incremental_marking_;
|
||||||
|
@ -47,18 +47,13 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
|
|||||||
owner == page->heap()->code_space());
|
owner == page->heap()->code_space());
|
||||||
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
|
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
|
||||||
page->area_end(), kOnePageOnly, size_func);
|
page->area_end(), kOnePageOnly, size_func);
|
||||||
DCHECK(page->WasSweptPrecisely() ||
|
DCHECK(page->WasSwept() || page->SweepingCompleted());
|
||||||
(static_cast<PagedSpace*>(owner)->swept_precisely() &&
|
|
||||||
page->SweepingCompleted()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
|
void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
|
||||||
HeapObjectIterator::PageMode mode,
|
HeapObjectIterator::PageMode mode,
|
||||||
HeapObjectCallback size_f) {
|
HeapObjectCallback size_f) {
|
||||||
// Check that we actually can iterate this space.
|
|
||||||
DCHECK(space->swept_precisely());
|
|
||||||
|
|
||||||
space_ = space;
|
space_ = space;
|
||||||
cur_addr_ = cur;
|
cur_addr_ = cur;
|
||||||
cur_end_ = end;
|
cur_end_ = end;
|
||||||
@ -83,9 +78,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
|
|||||||
if (cur_page == space_->anchor()) return false;
|
if (cur_page == space_->anchor()) return false;
|
||||||
cur_addr_ = cur_page->area_start();
|
cur_addr_ = cur_page->area_start();
|
||||||
cur_end_ = cur_page->area_end();
|
cur_end_ = cur_page->area_end();
|
||||||
DCHECK(cur_page->WasSweptPrecisely() ||
|
DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
|
||||||
(static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() &&
|
|
||||||
cur_page->SweepingCompleted()));
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,7 +452,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
|||||||
chunk->ResetLiveBytes();
|
chunk->ResetLiveBytes();
|
||||||
Bitmap::Clear(chunk);
|
Bitmap::Clear(chunk);
|
||||||
chunk->initialize_scan_on_scavenge(false);
|
chunk->initialize_scan_on_scavenge(false);
|
||||||
chunk->SetFlag(WAS_SWEPT_PRECISELY);
|
chunk->SetFlag(WAS_SWEPT);
|
||||||
|
|
||||||
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
|
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
|
||||||
DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
|
DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
|
||||||
@ -886,7 +879,6 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
|
|||||||
Executability executable)
|
Executability executable)
|
||||||
: Space(heap, id, executable),
|
: Space(heap, id, executable),
|
||||||
free_list_(this),
|
free_list_(this),
|
||||||
swept_precisely_(true),
|
|
||||||
unswept_free_bytes_(0),
|
unswept_free_bytes_(0),
|
||||||
end_of_unswept_pages_(NULL),
|
end_of_unswept_pages_(NULL),
|
||||||
emergency_memory_(NULL) {
|
emergency_memory_(NULL) {
|
||||||
@ -936,7 +928,7 @@ size_t PagedSpace::CommittedPhysicalMemory() {
|
|||||||
|
|
||||||
|
|
||||||
Object* PagedSpace::FindObject(Address addr) {
|
Object* PagedSpace::FindObject(Address addr) {
|
||||||
// Note: this function can only be called on precisely swept spaces.
|
// Note: this function can only be called on iterable spaces.
|
||||||
DCHECK(!heap()->mark_compact_collector()->in_use());
|
DCHECK(!heap()->mark_compact_collector()->in_use());
|
||||||
|
|
||||||
if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
|
if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
|
||||||
@ -1129,9 +1121,6 @@ void PagedSpace::Print() {}
|
|||||||
|
|
||||||
#ifdef VERIFY_HEAP
|
#ifdef VERIFY_HEAP
|
||||||
void PagedSpace::Verify(ObjectVisitor* visitor) {
|
void PagedSpace::Verify(ObjectVisitor* visitor) {
|
||||||
// We can only iterate over the pages if they were swept precisely.
|
|
||||||
if (!swept_precisely_) return;
|
|
||||||
|
|
||||||
bool allocation_pointer_found_in_space =
|
bool allocation_pointer_found_in_space =
|
||||||
(allocation_info_.top() == allocation_info_.limit());
|
(allocation_info_.top() == allocation_info_.limit());
|
||||||
PageIterator page_iterator(this);
|
PageIterator page_iterator(this);
|
||||||
@ -1141,7 +1130,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
|
|||||||
if (page == Page::FromAllocationTop(allocation_info_.top())) {
|
if (page == Page::FromAllocationTop(allocation_info_.top())) {
|
||||||
allocation_pointer_found_in_space = true;
|
allocation_pointer_found_in_space = true;
|
||||||
}
|
}
|
||||||
CHECK(page->WasSweptPrecisely());
|
CHECK(page->WasSwept());
|
||||||
HeapObjectIterator it(page, NULL);
|
HeapObjectIterator it(page, NULL);
|
||||||
Address end_of_previous_object = page->area_start();
|
Address end_of_previous_object = page->area_start();
|
||||||
Address top = page->area_end();
|
Address top = page->area_end();
|
||||||
@ -2737,7 +2726,6 @@ void PagedSpace::ReportStatistics() {
|
|||||||
", available: %" V8_PTR_PREFIX "d, %%%d\n",
|
", available: %" V8_PTR_PREFIX "d, %%%d\n",
|
||||||
Capacity(), Waste(), Available(), pct);
|
Capacity(), Waste(), Available(), pct);
|
||||||
|
|
||||||
if (!swept_precisely_) return;
|
|
||||||
if (heap()->mark_compact_collector()->sweeping_in_progress()) {
|
if (heap()->mark_compact_collector()->sweeping_in_progress()) {
|
||||||
heap()->mark_compact_collector()->EnsureSweepingCompleted();
|
heap()->mark_compact_collector()->EnsureSweepingCompleted();
|
||||||
}
|
}
|
||||||
|
@ -373,12 +373,9 @@ class MemoryChunk {
|
|||||||
EVACUATION_CANDIDATE,
|
EVACUATION_CANDIDATE,
|
||||||
RESCAN_ON_EVACUATION,
|
RESCAN_ON_EVACUATION,
|
||||||
|
|
||||||
// Pages swept precisely can be iterated, hitting only the live objects.
|
// WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
|
||||||
// Whereas those swept conservatively cannot be iterated over. Both flags
|
// otherwise marking bits are still intact.
|
||||||
// indicate that marking bits have been cleared by the sweeper, otherwise
|
WAS_SWEPT,
|
||||||
// marking bits are still intact.
|
|
||||||
WAS_SWEPT_PRECISELY,
|
|
||||||
WAS_SWEPT_CONSERVATIVELY,
|
|
||||||
|
|
||||||
// Large objects can have a progress bar in their page header. These object
|
// Large objects can have a progress bar in their page header. These object
|
||||||
// are scanned in increments and will be kept black while being scanned.
|
// are scanned in increments and will be kept black while being scanned.
|
||||||
@ -765,15 +762,9 @@ class Page : public MemoryChunk {
|
|||||||
|
|
||||||
void InitializeAsAnchor(PagedSpace* owner);
|
void InitializeAsAnchor(PagedSpace* owner);
|
||||||
|
|
||||||
bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
|
bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
|
||||||
bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
|
void SetWasSwept() { SetFlag(WAS_SWEPT); }
|
||||||
bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
|
void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
|
||||||
|
|
||||||
void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
|
|
||||||
void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
|
|
||||||
|
|
||||||
void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
|
|
||||||
void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
|
|
||||||
|
|
||||||
void ResetFreeListStatistics();
|
void ResetFreeListStatistics();
|
||||||
|
|
||||||
@ -1830,14 +1821,11 @@ class PagedSpace : public Space {
|
|||||||
static void ResetCodeStatistics(Isolate* isolate);
|
static void ResetCodeStatistics(Isolate* isolate);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool swept_precisely() { return swept_precisely_; }
|
|
||||||
void set_swept_precisely(bool b) { swept_precisely_ = b; }
|
|
||||||
|
|
||||||
// Evacuation candidates are swept by evacuator. Needs to return a valid
|
// Evacuation candidates are swept by evacuator. Needs to return a valid
|
||||||
// result before _and_ after evacuation has finished.
|
// result before _and_ after evacuation has finished.
|
||||||
static bool ShouldBeSweptBySweeperThreads(Page* p) {
|
static bool ShouldBeSweptBySweeperThreads(Page* p) {
|
||||||
return !p->IsEvacuationCandidate() &&
|
return !p->IsEvacuationCandidate() &&
|
||||||
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely();
|
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
|
||||||
}
|
}
|
||||||
|
|
||||||
void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
|
void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
|
||||||
@ -1907,12 +1895,8 @@ class PagedSpace : public Space {
|
|||||||
// Normal allocation information.
|
// Normal allocation information.
|
||||||
AllocationInfo allocation_info_;
|
AllocationInfo allocation_info_;
|
||||||
|
|
||||||
// This space was swept precisely, hence it is iterable.
|
|
||||||
bool swept_precisely_;
|
|
||||||
|
|
||||||
// The number of free bytes which could be reclaimed by advancing the
|
// The number of free bytes which could be reclaimed by advancing the
|
||||||
// concurrent sweeper threads. This is only an estimation because concurrent
|
// concurrent sweeper threads.
|
||||||
// sweeping is done conservatively.
|
|
||||||
intptr_t unswept_free_bytes_;
|
intptr_t unswept_free_bytes_;
|
||||||
|
|
||||||
// The sweeper threads iterate over the list of pointer and data space pages
|
// The sweeper threads iterate over the list of pointer and data space pages
|
||||||
|
@ -477,10 +477,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
|
|||||||
} else {
|
} else {
|
||||||
Page* page = reinterpret_cast<Page*>(chunk);
|
Page* page = reinterpret_cast<Page*>(chunk);
|
||||||
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
|
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
|
||||||
Address start = page->area_start();
|
|
||||||
Address end = page->area_end();
|
|
||||||
if (owner == heap_->map_space()) {
|
if (owner == heap_->map_space()) {
|
||||||
DCHECK(page->WasSweptPrecisely());
|
DCHECK(page->WasSwept());
|
||||||
HeapObjectIterator iterator(page, NULL);
|
HeapObjectIterator iterator(page, NULL);
|
||||||
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
|
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
|
||||||
heap_object = iterator.Next()) {
|
heap_object = iterator.Next()) {
|
||||||
@ -504,24 +502,17 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
|
|||||||
heap_->mark_compact_collector()->EnsureSweepingCompleted();
|
heap_->mark_compact_collector()->EnsureSweepingCompleted();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO(hpayer): remove the special casing and merge map and pointer
|
|
||||||
// space handling as soon as we removed conservative sweeping.
|
|
||||||
CHECK(page->owner() == heap_->old_pointer_space());
|
CHECK(page->owner() == heap_->old_pointer_space());
|
||||||
if (heap_->old_pointer_space()->swept_precisely()) {
|
HeapObjectIterator iterator(page, NULL);
|
||||||
HeapObjectIterator iterator(page, NULL);
|
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
|
||||||
for (HeapObject* heap_object = iterator.Next();
|
heap_object = iterator.Next()) {
|
||||||
heap_object != NULL; heap_object = iterator.Next()) {
|
// We iterate over objects that contain new space pointers only.
|
||||||
// We iterate over objects that contain new space pointers only.
|
if (heap_object->MayContainNewSpacePointers()) {
|
||||||
if (heap_object->MayContainNewSpacePointers()) {
|
FindPointersToNewSpaceInRegion(
|
||||||
FindPointersToNewSpaceInRegion(
|
heap_object->address() + HeapObject::kHeaderSize,
|
||||||
heap_object->address() + HeapObject::kHeaderSize,
|
heap_object->address() + heap_object->Size(), slot_callback,
|
||||||
heap_object->address() + heap_object->Size(),
|
clear_maps);
|
||||||
slot_callback, clear_maps);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
FindPointersToNewSpaceInRegion(start, end, slot_callback,
|
|
||||||
clear_maps);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user