Wait for sweeper threads only if we have to, i.e. if we globally have to finish sweeping or if we have to allocate from a certain space.

BUG=
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/361983003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22156 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
hpayer@chromium.org 2014-07-02 12:33:12 +00:00
parent 09660b00eb
commit d4c07acd66
6 changed files with 39 additions and 29 deletions

View File

@ -2588,13 +2588,12 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
CHECK(!debug_heap->code_space()->was_swept_conservatively());
CHECK(!debug_heap->cell_space()->was_swept_conservatively());
CHECK(!debug_heap->property_cell_space()->
was_swept_conservatively());
CHECK(!debug_heap->map_space()->was_swept_conservatively());
CHECK(debug_heap->old_data_space()->is_iterable());
CHECK(debug_heap->old_pointer_space()->is_iterable());
CHECK(debug_heap->code_space()->is_iterable());
CHECK(debug_heap->cell_space()->is_iterable());
CHECK(debug_heap->property_cell_space()->is_iterable());
CHECK(debug_heap->map_space()->is_iterable());
#endif
#ifdef VERIFY_HEAP

View File

@ -1297,7 +1297,7 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
if (!heap->old_data_space()->was_swept_conservatively()) {
if (heap->old_data_space()->is_iterable()) {
HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next();
object != NULL; object = data_it.Next())
@ -4264,8 +4264,8 @@ STRUCT_LIST(MAKE_CASE)
bool Heap::IsHeapIterable() {
return (!old_pointer_space()->was_swept_conservatively() &&
!old_data_space()->was_swept_conservatively() &&
return (old_pointer_space()->is_iterable() &&
old_data_space()->is_iterable() &&
new_space_top_after_last_gc_ == new_space()->top());
}

View File

@ -208,7 +208,7 @@ static void VerifyEvacuation(PagedSpace* space) {
// TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
// swept pages.
if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
space->was_swept_conservatively()) return;
!space->is_iterable()) return;
PageIterator it(space);
while (it.has_next()) {
@ -648,8 +648,9 @@ bool MarkCompactCollector::AreSweeperThreadsActivated() {
}
bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
return sweeping_pending_;
bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace* space) {
return (space == NULL || space->is_swept_concurrently()) &&
sweeping_pending_;
}
@ -2045,7 +2046,7 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
static void DiscoverGreyObjectsInSpace(Heap* heap,
MarkingDeque* marking_deque,
PagedSpace* space) {
if (!space->was_swept_conservatively()) {
if (space->is_iterable()) {
HeapObjectIterator it(space);
DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
} else {
@ -4079,9 +4080,8 @@ void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
sweeper == PARALLEL_CONSERVATIVE ||
sweeper == CONCURRENT_CONSERVATIVE);
space->set_is_iterable(sweeper == PRECISE);
space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page

View File

@ -670,7 +670,10 @@ class MarkCompactCollector {
bool AreSweeperThreadsActivated();
bool IsConcurrentSweepingInProgress();
// If a paged space is passed in, this method checks if the given space is
// swept concurrently. Otherwise, this method checks if concurrent sweeping
// is in progress right now on any space.
bool IsConcurrentSweepingInProgress(PagedSpace* space = NULL);
void set_sequential_sweeping(bool sequential_sweeping) {
sequential_sweeping_ = sequential_sweeping;

View File

@ -67,7 +67,7 @@ void HeapObjectIterator::Initialize(PagedSpace* space,
HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) {
// Check that we actually can iterate this space.
ASSERT(!space->was_swept_conservatively());
ASSERT(space->is_iterable());
space_ = space;
cur_addr_ = cur;
@ -935,7 +935,8 @@ PagedSpace::PagedSpace(Heap* heap,
Executability executable)
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
is_iterable_(true),
is_swept_concurrently_(false),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
@ -1157,7 +1158,7 @@ void PagedSpace::Print() { }
#ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) {
// We can only iterate over the pages if they were swept precisely.
if (was_swept_conservatively_) return;
if (!is_iterable_) return;
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
@ -2546,8 +2547,8 @@ void PagedSpace::PrepareForMarkCompact() {
intptr_t PagedSpace::SizeOfObjects() {
ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
(unswept_free_bytes_ == 0));
ASSERT(heap()->mark_compact_collector()->
IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
@ -2582,7 +2583,7 @@ HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
MarkCompactCollector* collector = heap()->mark_compact_collector();
// If sweeper threads are still running, wait for them.
if (collector->IsConcurrentSweepingInProgress()) {
if (collector->IsConcurrentSweepingInProgress(this)) {
collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
@ -2598,7 +2599,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, try to re-fill the free-lists.
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->IsConcurrentSweepingInProgress()) {
if (collector->IsConcurrentSweepingInProgress(this)) {
collector->RefillFreeList(this);
// Retry the free list allocation.
@ -2762,7 +2763,7 @@ void PagedSpace::ReportStatistics() {
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
if (was_swept_conservatively_) return;
if (!is_iterable_) return;
ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())

View File

@ -1902,8 +1902,11 @@ class PagedSpace : public Space {
static void ResetCodeStatistics(Isolate* isolate);
#endif
bool was_swept_conservatively() { return was_swept_conservatively_; }
void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
bool is_iterable() { return is_iterable_; }
void set_is_iterable(bool b) { is_iterable_ = b; }
bool is_swept_concurrently() { return is_swept_concurrently_; }
void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; }
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
@ -1986,7 +1989,11 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
bool was_swept_conservatively_;
// This space was swept precisely, hence it is iterable.
bool is_iterable_;
// This space is currently swept by sweeper threads.
bool is_swept_concurrently_;
// The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent