Don't filter store buffer after sweeping.
Additionally, this CL moves a bit of code around to free up more memory before compaction starts. BUG= Review URL: https://codereview.chromium.org/1305733003 Cr-Commit-Position: refs/heads/master@{#30275}
This commit is contained in:
parent
e31695f907
commit
267381d978
@ -6663,7 +6663,7 @@ void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Heap::FreeQueuedChunks() {
|
void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
|
||||||
if (chunks_queued_for_free_ == NULL) return;
|
if (chunks_queued_for_free_ == NULL) return;
|
||||||
MemoryChunk* next;
|
MemoryChunk* next;
|
||||||
MemoryChunk* chunk;
|
MemoryChunk* chunk;
|
||||||
@ -6673,6 +6673,12 @@ void Heap::FreeQueuedChunks() {
|
|||||||
}
|
}
|
||||||
isolate_->heap()->store_buffer()->Compact();
|
isolate_->heap()->store_buffer()->Compact();
|
||||||
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
|
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Heap::FreeQueuedChunks() {
|
||||||
|
MemoryChunk* next;
|
||||||
|
MemoryChunk* chunk;
|
||||||
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
|
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
|
||||||
next = chunk->next_chunk();
|
next = chunk->next_chunk();
|
||||||
isolate_->memory_allocator()->Free(chunk);
|
isolate_->memory_allocator()->Free(chunk);
|
||||||
|
@ -1429,6 +1429,7 @@ class Heap {
|
|||||||
inline bool OldGenerationAllocationLimitReached();
|
inline bool OldGenerationAllocationLimitReached();
|
||||||
|
|
||||||
void QueueMemoryChunkForFree(MemoryChunk* chunk);
|
void QueueMemoryChunkForFree(MemoryChunk* chunk);
|
||||||
|
void FilterStoreBufferEntriesOnAboutToBeFreedPages();
|
||||||
void FreeQueuedChunks();
|
void FreeQueuedChunks();
|
||||||
|
|
||||||
int gc_count() const { return gc_count_; }
|
int gc_count() const { return gc_count_; }
|
||||||
|
@ -3760,6 +3760,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
|
|||||||
}
|
}
|
||||||
evacuation_candidates_.Rewind(0);
|
evacuation_candidates_.Rewind(0);
|
||||||
compacting_ = false;
|
compacting_ = false;
|
||||||
|
heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
|
||||||
heap()->FreeQueuedChunks();
|
heap()->FreeQueuedChunks();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4312,9 +4313,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
|||||||
PrintF("SweepSpace: %s (%d pages swept)\n",
|
PrintF("SweepSpace: %s (%d pages swept)\n",
|
||||||
AllocationSpaceName(space->identity()), pages_swept);
|
AllocationSpaceName(space->identity()), pages_swept);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Give pages that are queued to be freed back to the OS.
|
|
||||||
heap()->FreeQueuedChunks();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -4331,11 +4329,6 @@ void MarkCompactCollector::SweepSpaces() {
|
|||||||
|
|
||||||
MoveEvacuationCandidatesToEndOfPagesList();
|
MoveEvacuationCandidatesToEndOfPagesList();
|
||||||
|
|
||||||
// Noncompacting collections simply sweep the spaces to clear the mark
|
|
||||||
// bits and free the nonlive blocks (for old and map spaces). We sweep
|
|
||||||
// the map space last because freeing non-live maps overwrites them and
|
|
||||||
// the other spaces rely on possibly non-live maps to get the sizes for
|
|
||||||
// non-live objects.
|
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
GCTracer::Scope sweep_scope(heap()->tracer(),
|
GCTracer::Scope sweep_scope(heap()->tracer(),
|
||||||
@ -4358,12 +4351,19 @@ void MarkCompactCollector::SweepSpaces() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
EvacuateNewSpaceAndCandidates();
|
// Deallocate unmarked large objects.
|
||||||
|
heap_->lo_space()->FreeUnmarkedObjects();
|
||||||
|
|
||||||
|
// Give pages that are queued to be freed back to the OS. Invalid store
|
||||||
|
// buffer entries are already filter out. We can just release the memory.
|
||||||
|
heap()->FreeQueuedChunks();
|
||||||
|
|
||||||
heap()->FreeDeadArrayBuffers(false);
|
heap()->FreeDeadArrayBuffers(false);
|
||||||
|
|
||||||
// Deallocate unmarked objects and clear marked bits for marked objects.
|
EvacuateNewSpaceAndCandidates();
|
||||||
heap_->lo_space()->FreeUnmarkedObjects();
|
|
||||||
|
// Clear the marking state of live large objects.
|
||||||
|
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
|
||||||
|
|
||||||
// Deallocate evacuated candidate pages.
|
// Deallocate evacuated candidate pages.
|
||||||
ReleaseEvacuationCandidates();
|
ReleaseEvacuationCandidates();
|
||||||
|
@ -2933,19 +2933,27 @@ LargePage* LargeObjectSpace::FindPage(Address a) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
|
||||||
|
LargePage* current = first_page_;
|
||||||
|
while (current != NULL) {
|
||||||
|
HeapObject* object = current->GetObject();
|
||||||
|
MarkBit mark_bit = Marking::MarkBitFrom(object);
|
||||||
|
DCHECK(Marking::IsBlackOrGrey(mark_bit));
|
||||||
|
Marking::BlackToWhite(mark_bit);
|
||||||
|
Page::FromAddress(object->address())->ResetProgressBar();
|
||||||
|
Page::FromAddress(object->address())->ResetLiveBytes();
|
||||||
|
current = current->next_page();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void LargeObjectSpace::FreeUnmarkedObjects() {
|
void LargeObjectSpace::FreeUnmarkedObjects() {
|
||||||
LargePage* previous = NULL;
|
LargePage* previous = NULL;
|
||||||
LargePage* current = first_page_;
|
LargePage* current = first_page_;
|
||||||
while (current != NULL) {
|
while (current != NULL) {
|
||||||
HeapObject* object = current->GetObject();
|
HeapObject* object = current->GetObject();
|
||||||
// Can this large page contain pointers to non-trivial objects. No other
|
|
||||||
// pointer object is this big.
|
|
||||||
bool is_pointer_object = object->IsFixedArray();
|
|
||||||
MarkBit mark_bit = Marking::MarkBitFrom(object);
|
MarkBit mark_bit = Marking::MarkBitFrom(object);
|
||||||
if (Marking::IsBlackOrGrey(mark_bit)) {
|
if (Marking::IsBlackOrGrey(mark_bit)) {
|
||||||
Marking::BlackToWhite(mark_bit);
|
|
||||||
Page::FromAddress(object->address())->ResetProgressBar();
|
|
||||||
Page::FromAddress(object->address())->ResetLiveBytes();
|
|
||||||
previous = current;
|
previous = current;
|
||||||
current = current->next_page();
|
current = current->next_page();
|
||||||
} else {
|
} else {
|
||||||
@ -2976,14 +2984,9 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
|
|||||||
static_cast<uint32_t>(key));
|
static_cast<uint32_t>(key));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_pointer_object) {
|
heap()->QueueMemoryChunkForFree(page);
|
||||||
heap()->QueueMemoryChunkForFree(page);
|
|
||||||
} else {
|
|
||||||
heap()->isolate()->memory_allocator()->Free(page);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
heap()->FreeQueuedChunks();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2744,6 +2744,9 @@ class LargeObjectSpace : public Space {
|
|||||||
// if such a page doesn't exist.
|
// if such a page doesn't exist.
|
||||||
LargePage* FindPage(Address a);
|
LargePage* FindPage(Address a);
|
||||||
|
|
||||||
|
// Clears the marking state of live objects.
|
||||||
|
void ClearMarkingStateOfLiveObjects();
|
||||||
|
|
||||||
// Frees unmarked objects.
|
// Frees unmarked objects.
|
||||||
void FreeUnmarkedObjects();
|
void FreeUnmarkedObjects();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user