Unlink evacuation candidates from list of pages before starting sweeper threads.
Removed FinalizeSweeping(). BUG= Review URL: https://codereview.chromium.org/12499004 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13886 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
38cc926a7e
commit
fffadaf9ba
@ -585,13 +585,6 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::FinalizeSweeping() {
|
||||
ASSERT(sweeping_pending_ == false);
|
||||
ReleaseEvacuationCandidates();
|
||||
heap()->FreeQueuedChunks();
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::MarkInParallel() {
|
||||
for (int i = 0; i < FLAG_marking_threads; i++) {
|
||||
heap()->isolate()->marking_threads()[i]->StartMarking();
|
||||
@ -911,7 +904,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
|
||||
if (IsConcurrentSweepingInProgress()) {
|
||||
// Instead of waiting we could also abort the sweeper threads here.
|
||||
WaitUntilSweepingCompleted();
|
||||
FinalizeSweeping();
|
||||
}
|
||||
|
||||
// Clear marking bits if incremental marking is aborted.
|
||||
@ -2849,6 +2841,7 @@ void MarkCompactCollector::EvacuatePages() {
|
||||
slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
|
||||
page->ClearEvacuationCandidate();
|
||||
page->SetFlag(Page::RESCAN_ON_EVACUATION);
|
||||
page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -3309,6 +3302,18 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::UnlinkEvacuationCandidates() {
|
||||
int npages = evacuation_candidates_.length();
|
||||
for (int i = 0; i < npages; i++) {
|
||||
Page* p = evacuation_candidates_[i];
|
||||
if (!p->IsEvacuationCandidate()) continue;
|
||||
p->Unlink();
|
||||
p->ClearSweptPrecisely();
|
||||
p->ClearSweptConservatively();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ReleaseEvacuationCandidates() {
|
||||
int npages = evacuation_candidates_.length();
|
||||
for (int i = 0; i < npages; i++) {
|
||||
@ -3319,10 +3324,11 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
|
||||
p->set_scan_on_scavenge(false);
|
||||
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
|
||||
p->ResetLiveBytes();
|
||||
space->ReleasePage(p);
|
||||
space->ReleasePage(p, false);
|
||||
}
|
||||
evacuation_candidates_.Rewind(0);
|
||||
compacting_ = false;
|
||||
heap()->FreeQueuedChunks();
|
||||
}
|
||||
|
||||
|
||||
@ -3794,7 +3800,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
||||
// Adjust unswept free bytes because releasing a page expects said
|
||||
// counter to be accurate for unswept pages.
|
||||
space->IncreaseUnsweptFreeBytes(p);
|
||||
space->ReleasePage(p);
|
||||
space->ReleasePage(p, true);
|
||||
continue;
|
||||
}
|
||||
unused_page_present = true;
|
||||
@ -3899,6 +3905,10 @@ void MarkCompactCollector::SweepSpaces() {
|
||||
SweepSpace(heap()->old_pointer_space(), how_to_sweep);
|
||||
SweepSpace(heap()->old_data_space(), how_to_sweep);
|
||||
|
||||
// Unlink evacuation candidates before sweeper threads access the list of
|
||||
// pages to avoid race condition.
|
||||
UnlinkEvacuationCandidates();
|
||||
|
||||
if (how_to_sweep == PARALLEL_CONSERVATIVE ||
|
||||
how_to_sweep == CONCURRENT_CONSERVATIVE) {
|
||||
// TODO(hpayer): fix race with concurrent sweeper
|
||||
@ -3924,9 +3934,7 @@ void MarkCompactCollector::SweepSpaces() {
|
||||
// Deallocate unmarked objects and clear marked bits for marked objects.
|
||||
heap_->lo_space()->FreeUnmarkedObjects();
|
||||
|
||||
if (how_to_sweep != CONCURRENT_CONSERVATIVE) {
|
||||
FinalizeSweeping();
|
||||
}
|
||||
ReleaseEvacuationCandidates();
|
||||
}
|
||||
|
||||
|
||||
|
@ -690,8 +690,6 @@ class MarkCompactCollector {
|
||||
|
||||
bool IsConcurrentSweepingInProgress();
|
||||
|
||||
void FinalizeSweeping();
|
||||
|
||||
void set_sequential_sweeping(bool sequential_sweeping) {
|
||||
sequential_sweeping_ = sequential_sweeping;
|
||||
}
|
||||
@ -713,6 +711,7 @@ class MarkCompactCollector {
|
||||
void RemoveDeadInvalidatedCode();
|
||||
void ProcessInvalidatedCode(ObjectVisitor* visitor);
|
||||
|
||||
void UnlinkEvacuationCandidates();
|
||||
void ReleaseEvacuationCandidates();
|
||||
|
||||
void StartSweeperThreads();
|
||||
|
@ -981,6 +981,7 @@ bool PagedSpace::CanExpand() {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool PagedSpace::Expand() {
|
||||
if (!CanExpand()) return false;
|
||||
|
||||
@ -1045,7 +1046,7 @@ int PagedSpace::CountTotalPages() {
|
||||
}
|
||||
|
||||
|
||||
void PagedSpace::ReleasePage(Page* page) {
|
||||
void PagedSpace::ReleasePage(Page* page, bool unlink) {
|
||||
ASSERT(page->LiveBytes() == 0);
|
||||
ASSERT(AreaSize() == page->area_size());
|
||||
|
||||
@ -1069,7 +1070,9 @@ void PagedSpace::ReleasePage(Page* page) {
|
||||
allocation_info_.top = allocation_info_.limit = NULL;
|
||||
}
|
||||
|
||||
page->Unlink();
|
||||
if (unlink) {
|
||||
page->Unlink();
|
||||
}
|
||||
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
|
||||
heap()->isolate()->memory_allocator()->Free(page);
|
||||
} else {
|
||||
@ -2555,7 +2558,6 @@ bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
|
||||
if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
|
||||
if (!collector->sequential_sweeping()) {
|
||||
collector->WaitUntilSweepingCompleted();
|
||||
collector->FinalizeSweeping();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1697,7 +1697,7 @@ class PagedSpace : public Space {
|
||||
}
|
||||
|
||||
// Releases an unused page and shrinks the space.
|
||||
void ReleasePage(Page* page);
|
||||
void ReleasePage(Page* page, bool unlink);
|
||||
|
||||
// The dummy page that anchors the linked list of pages.
|
||||
Page* anchor() { return &anchor_; }
|
||||
|
Loading…
Reference in New Issue
Block a user