New page local store buffer.

This replaces the global remembered set with per-page remembered sets.

Each page in the old space, map space, and large object space keeps track of
the set of slots in the page pointing to the new space.

The data structure for storing slot sets is a two-level bitmap, which allows
us to remove the store buffer overflow and SCAN_ON_SCAVENGE logic.

Design doc: https://goo.gl/sMKCf7

BUG=chromium:578883
LOG=NO

Review URL: https://codereview.chromium.org/1608583002

Cr-Commit-Position: refs/heads/master@{#33806}
This commit is contained in:
ulan 2016-02-08 00:51:02 -08:00 committed by Commit bot
parent df71183380
commit bb883395a8
24 changed files with 517 additions and 863 deletions

View File

@ -1070,6 +1070,7 @@ source_set("v8_base") {
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
"src/heap/slot-set.h",
"src/heap/slots-buffer.cc",
"src/heap/slots-buffer.h",
"src/heap/spaces-inl.h",

View File

@ -3991,11 +3991,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.

View File

@ -4091,8 +4091,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
__ Ldr(val, MemOperand(regs_.address()));
__ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
__ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
&dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.

View File

@ -598,7 +598,6 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(pc_to_code, V8.PcToCode) \
SC(pc_to_code_cached, V8.PcToCodeCached) \
/* The store-buffer implementation of the write barrier. */ \
SC(store_buffer_compactions, V8.StoreBufferCompactions) \
SC(store_buffer_overflows, V8.StoreBufferOverflows)

View File

@ -93,7 +93,6 @@ Heap::Heap()
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_space_(NULL),
code_space_(NULL),
@ -113,7 +112,6 @@ Heap::Heap()
old_gen_exhausted_(false),
optimize_for_memory_usage_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
total_regexp_code_generated_(0),
tracer_(nullptr),
high_survival_rate_period_length_(0),
@ -453,8 +451,6 @@ void Heap::GarbageCollectionPrologue() {
ReportStatisticsBeforeGC();
#endif // DEBUG
store_buffer()->GCPrologue();
if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
}
@ -642,8 +638,6 @@ void Heap::DeoptMarkedAllocationSites() {
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
// In release mode, we only zap the from space under heap verification.
if (Heap::ShouldZapGarbage()) {
ZapFromSpace();
@ -1554,12 +1548,6 @@ static bool IsUnmodifiedHeapObject(Object** p) {
}
void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
StoreBufferEvent event) {
heap->store_buffer_rebuilder_.Callback(page, event);
}
void PromotionQueue::Initialize() {
// The last to-space page may be used for promotion queue. On promotion
// conflict, we use the emergency stack.
@ -1692,8 +1680,6 @@ void Heap::Scavenge() {
// Copy objects reachable from the old generation.
GCTracer::Scope gc_scope(tracer(),
GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
StoreBufferRebuildScope scope(this, store_buffer(),
&ScavengeStoreBufferCallback);
store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
}
@ -1948,8 +1934,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// Promote and process all the to-be-promoted objects.
{
StoreBufferRebuildScope scope(this, store_buffer(),
&ScavengeStoreBufferCallback);
while (!promotion_queue()->is_empty()) {
HeapObject* target;
int size;
@ -4487,8 +4471,7 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
if (InNewSpace(new_target)) {
SLOW_DCHECK(Heap::InToSpace(new_target));
SLOW_DCHECK(new_target->IsHeapObject());
store_buffer_.EnterDirectlyIntoStoreBuffer(
reinterpret_cast<Address>(slot));
store_buffer_.Mark(reinterpret_cast<Address>(slot));
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
} else if (record_slots &&
@ -6069,19 +6052,6 @@ void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
}
void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
if (chunks_queued_for_free_ == NULL) return;
MemoryChunk* next;
MemoryChunk* chunk;
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
}
store_buffer()->Compact();
store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
}
void Heap::FreeQueuedChunks() {
if (chunks_queued_for_free_ != NULL) {
if (FLAG_concurrent_sweeping) {

View File

@ -670,20 +670,6 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed(bool dependant_context);
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
}
}
inline void decrement_scan_on_scavenge_pages() {
scan_on_scavenge_pages_--;
if (FLAG_gc_verbose) {
PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
}
}
void set_native_contexts_list(Object* object) {
native_contexts_list_ = object;
}
@ -783,7 +769,6 @@ class Heap {
inline bool OldGenerationAllocationLimitReached();
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FilterStoreBufferEntriesOnAboutToBeFreedPages();
void FreeQueuedChunks(MemoryChunk* list_head);
void FreeQueuedChunks();
void WaitUntilUnmappingOfFreeChunksCompleted();
@ -1495,9 +1480,6 @@ class Heap {
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
Heap* heap, Object** pointer);
static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
StoreBufferEvent event);
// Selects the proper allocation space based on the pretenuring decision.
static AllocationSpace SelectSpace(PretenureFlag pretenure) {
return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
@ -2012,8 +1994,6 @@ class Heap {
int global_ic_age_;
int scan_on_scavenge_pages_;
NewSpace new_space_;
OldSpace* old_space_;
OldSpace* code_space_;
@ -2081,8 +2061,6 @@ class Heap {
Object* encountered_transition_arrays_;
StoreBufferRebuilder store_buffer_rebuilder_;
List<GCCallbackPair> gc_epilogue_callbacks_;
List<GCCallbackPair> gc_prologue_callbacks_;

View File

@ -375,7 +375,6 @@ void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
}

View File

@ -2873,11 +2873,12 @@ void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
static void UpdatePointer(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word();
// The store buffer can still contain stale pointers in dead large objects.
// Ignore these pointers here.
// Since we only filter invalid slots in old space, the store buffer can
// still contain stale pointers in large object and in map spaces. Ignore
// these pointers here.
DCHECK(map_word.IsForwardingAddress() ||
object->GetHeap()->lo_space()->FindPage(
reinterpret_cast<Address>(address)) != NULL);
!object->GetHeap()->old_space()->Contains(
reinterpret_cast<Address>(address)));
if (map_word.IsForwardingAddress()) {
// Update the corresponding slot.
*address = map_word.ToForwardingAddress();
@ -3327,7 +3328,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// entries of such pages are filtered before rescanning.
DCHECK(p->IsEvacuationCandidate());
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
p->set_scan_on_scavenge(true);
abandoned_pages++;
break;
case MemoryChunk::kCompactingFinalize:
@ -3566,6 +3566,10 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
page->markbits()->ClearRange(
page->AddressToMarkbitIndex(page->area_start()),
page->AddressToMarkbitIndex(object->address()));
if (page->old_to_new_slots() != nullptr) {
page->old_to_new_slots()->RemoveRange(
0, static_cast<int>(object->address() - page->address()));
}
RecomputeLiveBytes(page);
}
return false;
@ -3720,8 +3724,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
&Heap::ScavengeStoreBufferCallback);
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
@ -3808,14 +3810,12 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
p->ResetLiveBytes();
CHECK(p->SweepingDone());
space->ReleasePage(p, true);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
heap()->FreeQueuedChunks();
}

203
src/heap/slot-set.h Normal file
View File

@ -0,0 +1,203 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SLOT_SET_H
#define V8_SLOT_SET_H
#include "src/allocation.h"
#include "src/base/bits.h"
namespace v8 {
namespace internal {
// Data structure for maintaining a set of slots in a standard (non-large)
// page. The base address of the page must be set with SetPageStart before any
// operation.
// The data structure assumes that the slots are pointer size aligned and
// splits the valid slot offset range into kBuckets buckets.
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
enum CallbackResult { KEEP_SLOT, REMOVE_SLOT };
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
bucket[i] = nullptr;
}
}
~SlotSet() {
for (int i = 0; i < kBuckets; i++) {
ReleaseBucket(i);
}
}
void SetPageStart(Address page_start) { page_start_ = page_start; }
// The slot offset specifies a slot at address page_start_ + slot_offset.
void Insert(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
if (bucket[bucket_index] == nullptr) {
bucket[bucket_index] = AllocateBucket();
}
bucket[bucket_index][cell_index] |= 1u << bit_index;
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
void Remove(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
if (bucket[bucket_index] != nullptr) {
uint32_t cell = bucket[bucket_index][cell_index];
if (cell) {
uint32_t bit_mask = 1u << bit_index;
if (cell & bit_mask) {
bucket[bucket_index][cell_index] ^= bit_mask;
}
}
}
}
// The slot offsets specify a range of slots at addresses:
// [page_start_ + start_offset ... page_start_ + end_offset).
void RemoveRange(int start_offset, int end_offset) {
DCHECK_LE(start_offset, end_offset);
int start_bucket, start_cell, start_bit;
SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
int end_bucket, end_cell, end_bit;
SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
uint32_t start_mask = (1u << start_bit) - 1;
uint32_t end_mask = ~((1u << end_bit) - 1);
if (start_bucket == end_bucket && start_cell == end_cell) {
MaskCell(start_bucket, start_cell, start_mask | end_mask);
return;
}
MaskCell(start_bucket, start_cell, start_mask);
start_cell++;
if (bucket[start_bucket] != nullptr && start_bucket < end_bucket) {
while (start_cell < kCellsPerBucket) {
bucket[start_bucket][start_cell] = 0;
start_cell++;
}
}
while (start_bucket < end_bucket) {
delete[] bucket[start_bucket];
bucket[start_bucket] = nullptr;
start_bucket++;
}
if (start_bucket < kBuckets && bucket[start_bucket] != nullptr) {
while (start_cell < end_cell) {
bucket[start_bucket][start_cell] = 0;
start_cell++;
}
}
if (end_bucket < kBuckets) {
MaskCell(end_bucket, end_cell, end_mask);
}
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
bool Lookup(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
if (bucket[bucket_index] != nullptr) {
uint32_t cell = bucket[bucket_index][cell_index];
return (cell & (1u << bit_index)) != 0;
}
return false;
}
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
//
// Sample usage:
// Iterate([](Address slot_address) {
// if (good(slot_address)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
template <typename Callback>
void Iterate(Callback callback) {
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
if (bucket[bucket_index] != nullptr) {
bool bucket_is_empty = true;
uint32_t* current_bucket = bucket[bucket_index];
int cell_offset = bucket_index * kBitsPerBucket;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
if (current_bucket[i]) {
uint32_t cell = current_bucket[i];
uint32_t old_cell = cell;
uint32_t new_cell = cell;
while (cell) {
int bit_offset = base::bits::CountTrailingZeros32(cell);
uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
if (callback(page_start_ + slot) == KEEP_SLOT) {
bucket_is_empty = false;
} else {
new_cell ^= bit_mask;
}
cell ^= bit_mask;
}
if (old_cell != new_cell) {
current_bucket[i] = new_cell;
}
}
}
if (bucket_is_empty) {
ReleaseBucket(bucket_index);
}
}
}
}
private:
static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
static const int kBitsPerCell = 32;
static const int kBitsPerCellLog2 = 5;
static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
uint32_t* AllocateBucket() {
uint32_t* result = NewArray<uint32_t>(kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) {
result[i] = 0;
}
return result;
}
void ReleaseBucket(int bucket_index) {
DeleteArray<uint32_t>(bucket[bucket_index]);
bucket[bucket_index] = nullptr;
}
void MaskCell(int bucket_index, int cell_index, uint32_t mask) {
uint32_t* cells = bucket[bucket_index];
if (cells != nullptr && cells[cell_index] != 0) {
cells[cell_index] &= mask;
}
}
// Converts the slot offset into bucket/cell/bit index.
void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
int* bit_index) {
DCHECK_EQ(slot_offset % kPointerSize, 0);
int slot = slot_offset >> kPointerSizeLog2;
DCHECK(slot >= 0 && slot <= kMaxSlots);
*bucket_index = slot >> kBitsPerBucketLog2;
*cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
*bit_index = slot & (kBitsPerCell - 1);
}
uint32_t* bucket[kBuckets];
Address page_start_;
};
} // namespace internal
} // namespace v8
#endif // V8_SLOT_SET_H

View File

@ -216,18 +216,6 @@ bool PagedSpace::Contains(Address addr) {
bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
void MemoryChunk::set_scan_on_scavenge(bool scan) {
if (scan) {
if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
SetFlag(SCAN_ON_SCAVENGE);
} else {
if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
ClearFlag(SCAN_ON_SCAVENGE);
}
heap_->incremental_marking()->SetOldSpacePageFlags(this);
}
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
OffsetFrom(addr) & ~Page::kPageAlignmentMask);

View File

@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
#include "src/heap/slot-set.h"
#include "src/heap/slots-buffer.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
@ -428,7 +429,6 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
MemoryChunk* chunk =
MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
area_end, NOT_EXECUTABLE, semi_space);
chunk->initialize_scan_on_scavenge(true);
bool in_to_space = (semi_space->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE);
@ -464,14 +464,15 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = 0;
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
chunk->slots_buffer_ = nullptr;
chunk->old_to_new_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
chunk->parallel_compaction_state().SetValue(kCompactingDone);
chunk->mutex_ = NULL;
chunk->mutex_ = nullptr;
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
@ -479,7 +480,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->non_available_small_blocks_ = 0;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
@ -933,6 +933,23 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete slots_buffer_;
delete skip_list_;
delete mutex_;
ReleaseOldToNewSlots();
}
void MemoryChunk::AllocateOldToNewSlots() {
size_t pages = (size_ + Page::kPageSize - 1) / Page::kPageSize;
DCHECK(owner() == heap_->lo_space() || pages == 1);
DCHECK(pages > 0);
DCHECK(nullptr == old_to_new_slots_);
old_to_new_slots_ = new SlotSet[pages];
for (size_t i = 0; i < pages; i++) {
old_to_new_slots_[i].SetPageStart(address() + i * Page::kPageSize);
}
}
void MemoryChunk::ReleaseOldToNewSlots() {
delete[] old_to_new_slots_;
old_to_new_slots_ = nullptr;
}
@ -1187,11 +1204,6 @@ void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
DCHECK_EQ(AreaSize(), static_cast<int>(size));
}
if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
heap()->decrement_scan_on_scavenge_pages();
page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
}
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
@ -1826,7 +1838,6 @@ void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
page->SetFlag(MemoryChunk::IN_FROM_SPACE);
page->ClearFlag(MemoryChunk::IN_TO_SPACE);
}
DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
}
@ -1900,7 +1911,6 @@ void SemiSpace::Verify() {
// TODO(gc): Check that the live_bytes_count_ field matches the
// black marking on the page (if we make it match in new-space).
}
CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
CHECK_EQ(page->prev_page()->next_page(), page);
page = page->next_page();
}

View File

@ -31,6 +31,7 @@ class PagedSpace;
class SemiSpace;
class SkipList;
class SlotsBuffer;
class SlotSet;
class Space;
// -----------------------------------------------------------------------------
@ -296,10 +297,8 @@ class MemoryChunk {
public:
enum MemoryChunkFlags {
IS_EXECUTABLE,
ABOUT_TO_BE_FREED,
POINTERS_TO_HERE_ARE_INTERESTING,
POINTERS_FROM_HERE_ARE_INTERESTING,
SCAN_ON_SCAVENGE,
IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
IN_TO_SPACE, // All pages in new space has one of these two set.
NEW_SPACE_BELOW_AGE_MARK,
@ -398,6 +397,7 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
+ kPointerSize // SlotSet* old_to_new_slots_;
+ kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
@ -503,16 +503,6 @@ class MemoryChunk {
reservation_.TakeControl(reservation);
}
bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
void initialize_scan_on_scavenge(bool scan) {
if (scan) {
SetFlag(SCAN_ON_SCAVENGE);
} else {
ClearFlag(SCAN_ON_SCAVENGE);
}
}
inline void set_scan_on_scavenge(bool scan);
bool Contains(Address addr) {
return addr >= area_start() && addr < area_end();
}
@ -686,6 +676,11 @@ class MemoryChunk {
inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
void MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK(slots_buffer_ == NULL);
@ -733,6 +728,10 @@ class MemoryChunk {
// Count of bytes marked black on page.
int live_byte_count_;
SlotsBuffer* slots_buffer_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
SkipList* skip_list_;
intptr_t write_barrier_counter_;
// Assuming the initial allocation on a page is sequential,
@ -2233,8 +2232,7 @@ class NewSpacePage : public MemoryChunk {
// flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
(1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static const int kAreaSize = Page::kAllocatableMemory;

View File

@ -12,37 +12,26 @@
namespace v8 {
namespace internal {
void StoreBuffer::Mark(Address addr) {
DCHECK(!heap_->code_space()->Contains(addr));
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
*top++ = addr;
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(top));
if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
DCHECK(top == limit_);
Compact();
uint32_t StoreBuffer::AddressToSlotSetAndOffset(Address addr, SlotSet** slots) {
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
if (offset < MemoryChunk::kHeaderSize || chunk->owner() == nullptr) {
chunk = heap_->lo_space()->FindPage(addr);
offset = addr - chunk->address();
}
if (chunk->old_to_new_slots() == nullptr) {
chunk->AllocateOldToNewSlots();
}
if (offset < Page::kPageSize) {
*slots = chunk->old_to_new_slots();
} else {
DCHECK(top < limit_);
*slots = &chunk->old_to_new_slots()[offset / Page::kPageSize];
offset = offset % Page::kPageSize;
}
return static_cast<uint32_t>(offset);
}
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (store_buffer_rebuilding_enabled_) {
SLOW_DCHECK(!heap_->code_space()->Contains(addr) &&
!heap_->new_space()->Contains(addr));
Address* top = old_top_;
*top++ = addr;
old_top_ = top;
old_buffer_is_sorted_ = false;
old_buffer_is_filtered_ = false;
if (top >= old_limit_) {
DCHECK(callback_ != NULL);
(*callback_)(heap_, MemoryChunk::FromAnyPointerAddress(heap_, addr),
kStoreBufferFullEvent);
}
}
}
void LocalStoreBuffer::Record(Address addr) {
if (top_->is_full()) top_ = new Node(top_);
top_->buffer[top_->count++] = addr;
@ -58,6 +47,13 @@ void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
}
}
void StoreBuffer::Mark(Address addr) {
SlotSet* slots;
uint32_t offset;
offset = AddressToSlotSetAndOffset(addr, &slots);
slots->Insert(offset);
}
} // namespace internal
} // namespace v8

View File

@ -17,24 +17,7 @@ namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
: heap_(heap),
start_(NULL),
limit_(NULL),
old_start_(NULL),
old_limit_(NULL),
old_top_(NULL),
old_reserved_limit_(NULL),
old_buffer_is_sorted_(false),
old_buffer_is_filtered_(false),
during_gc_(false),
store_buffer_rebuilding_enabled_(false),
callback_(NULL),
may_move_store_buffer_entries_(true),
virtual_memory_(NULL),
hash_set_1_(NULL),
hash_set_2_(NULL),
hash_sets_are_empty_(true) {}
: heap_(heap), start_(nullptr), limit_(nullptr), virtual_memory_(nullptr) {}
void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
@ -47,31 +30,6 @@ void StoreBuffer::SetUp() {
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
limit_ = start_ + (kStoreBufferSize / kPointerSize);
// Reserve space for the larger old buffer.
old_virtual_memory_ =
new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
old_top_ = old_start_ =
reinterpret_cast<Address*>(old_virtual_memory_->address());
// Don't know the alignment requirements of the OS, but it is certainly not
// less than 0xfff.
CHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
CHECK(kStoreBufferSize >= base::OS::CommitPageSize());
// Initial size of the old buffer is as big as the buffer for new pointers.
// This means even if we later fail to enlarge the old buffer due to OOM from
// the OS, we will still be able to empty the new pointer buffer into the old
// buffer.
int initial_length = static_cast<int>(kStoreBufferSize / kPointerSize);
CHECK(initial_length > 0);
CHECK(initial_length <= kOldStoreBufferLength);
old_limit_ = old_start_ + initial_length;
old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
(old_limit_ - old_start_) * kPointerSize,
false)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
Address* vm_limit = reinterpret_cast<Address*>(
@ -90,195 +48,22 @@ void StoreBuffer::SetUp() {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
hash_set_1_ = new uintptr_t[kHashSetLength];
hash_set_2_ = new uintptr_t[kHashSetLength];
hash_sets_are_empty_ = false;
ClearFilteringHashSets();
}
void StoreBuffer::TearDown() {
delete virtual_memory_;
delete old_virtual_memory_;
delete[] hash_set_1_;
delete[] hash_set_2_;
old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
start_ = limit_ = NULL;
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
}
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->Compact();
isolate->heap()->store_buffer()->InsertEntriesFromBuffer();
isolate->counters()->store_buffer_overflows()->Increment();
}
bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
return old_limit_ - old_top_ >= space_needed;
}
void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed &&
old_limit_ < old_reserved_limit_) {
size_t grow = old_limit_ - old_start_; // Double size.
if (old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
grow * kPointerSize, false)) {
old_limit_ += grow;
} else {
break;
}
}
if (SpaceAvailable(space_needed)) return;
if (old_buffer_is_filtered_) return;
DCHECK(may_move_store_buffer_entries_);
Compact();
old_buffer_is_filtered_ = true;
bool page_has_scan_on_scavenge_flag = false;
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != NULL) {
if (chunk->scan_on_scavenge()) {
page_has_scan_on_scavenge_flag = true;
break;
}
}
if (page_has_scan_on_scavenge_flag) {
Filter(MemoryChunk::SCAN_ON_SCAVENGE);
}
if (SpaceAvailable(space_needed)) return;
// Sample 1 entry in 97 and filter out the pages where we estimate that more
// than 1 in 8 pointers are to new space.
static const int kSampleFinenesses = 5;
static const struct Samples {
int prime_sample_step;
int threshold;
} samples[kSampleFinenesses] = {
{97, ((Page::kPageSize / kPointerSize) / 97) / 8},
{23, ((Page::kPageSize / kPointerSize) / 23) / 16},
{7, ((Page::kPageSize / kPointerSize) / 7) / 32},
{3, ((Page::kPageSize / kPointerSize) / 3) / 256},
{1, 0}};
for (int i = 0; i < kSampleFinenesses; i++) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
if (SpaceAvailable(space_needed)) return;
}
UNREACHABLE();
}
// Sample the store buffer to see if some pages are taking up a lot of space
// in the store buffer.
void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
HashMap store_buffer_counts(HashMap::PointersMatch, 16);
bool created_new_scan_on_scavenge_pages = false;
MemoryChunk* previous_chunk = NULL;
for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
Address addr = *p;
MemoryChunk* containing_chunk = NULL;
if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
containing_chunk = previous_chunk;
} else {
containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
}
HashMap::Entry* e = store_buffer_counts.LookupOrInsert(
containing_chunk,
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(containing_chunk) >>
kPageSizeBits));
intptr_t old_counter = bit_cast<intptr_t>(e->value);
if (old_counter >= threshold) {
containing_chunk->set_scan_on_scavenge(true);
created_new_scan_on_scavenge_pages = true;
}
(*bit_cast<intptr_t*>(&e->value))++;
previous_chunk = containing_chunk;
}
if (created_new_scan_on_scavenge_pages) {
Filter(MemoryChunk::SCAN_ON_SCAVENGE);
heap_->isolate()->CountUsage(
v8::Isolate::UseCounterFeature::kStoreBufferOverflow);
}
old_buffer_is_filtered_ = true;
}
void StoreBuffer::Filter(int flag) {
Address* new_top = old_start_;
MemoryChunk* previous_chunk = NULL;
for (Address* p = old_start_; p < old_top_; p++) {
Address addr = *p;
MemoryChunk* containing_chunk = NULL;
if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
containing_chunk = previous_chunk;
} else {
containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
previous_chunk = containing_chunk;
}
if (!containing_chunk->IsFlagSet(flag)) {
*new_top++ = addr;
}
}
old_top_ = new_top;
// Filtering hash sets are inconsistent with the store buffer after this
// operation.
ClearFilteringHashSets();
}
bool StoreBuffer::PrepareForIteration() {
Compact();
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
bool page_has_scan_on_scavenge_flag = false;
while ((chunk = it.next()) != NULL) {
if (chunk->scan_on_scavenge()) {
page_has_scan_on_scavenge_flag = true;
break;
}
}
if (page_has_scan_on_scavenge_flag) {
Filter(MemoryChunk::SCAN_ON_SCAVENGE);
}
// Filtering hash sets are inconsistent with the store buffer after
// iteration.
ClearFilteringHashSets();
return page_has_scan_on_scavenge_flag;
}
void StoreBuffer::ClearFilteringHashSets() {
if (!hash_sets_are_empty_) {
memset(reinterpret_cast<void*>(hash_set_1_), 0,
sizeof(uintptr_t) * kHashSetLength);
memset(reinterpret_cast<void*>(hash_set_2_), 0,
sizeof(uintptr_t) * kHashSetLength);
hash_sets_are_empty_ = true;
}
}
void StoreBuffer::GCPrologue() {
ClearFilteringHashSets();
during_gc_ = true;
}
#ifdef VERIFY_HEAP
void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
LargeObjectIterator it(space);
@ -286,7 +71,6 @@ void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
if (object->IsFixedArray()) {
Address slot_address = object->address();
Address end = object->address() + object->Size();
while (slot_address < end) {
HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
// When we are not in GC the Heap::InNewSpace() predicate
@ -308,25 +92,38 @@ void StoreBuffer::Verify() {
#endif
}
void StoreBuffer::GCEpilogue() {
during_gc_ = false;
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
void StoreBuffer::InsertEntriesFromBuffer() {
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
if (top == start_) return;
// There's no check of the limit in the loop below so we check here for
// the worst case (compaction doesn't eliminate any pointers).
DCHECK(top <= limit_);
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
Page* last_page = nullptr;
SlotSet* last_slot_set = nullptr;
for (Address* current = start_; current < top; current++) {
DCHECK(!heap_->code_space()->Contains(*current));
Address addr = *current;
Page* page = Page::FromAddress(addr);
SlotSet* slot_set;
uint32_t offset;
if (page == last_page) {
slot_set = last_slot_set;
offset = static_cast<uint32_t>(addr - page->address());
} else {
offset = AddressToSlotSetAndOffset(addr, &slot_set);
last_page = page;
last_slot_set = slot_set;
}
slot_set->Insert(offset);
}
#endif
}
void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
ObjectSlotCallback slot_callback) {
static SlotSet::CallbackResult ProcessOldToNewSlot(
Heap* heap, Address slot_address, ObjectSlotCallback slot_callback) {
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* object = *slot;
// If the object is not in from space, it must be a duplicate store buffer
// entry and the slot was already updated.
if (heap_->InFromSpace(object)) {
if (heap->InFromSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
DCHECK(heap_object->IsHeapObject());
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
@ -335,289 +132,87 @@ void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
if (heap_->InToSpace(object)) {
EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
if (heap->InToSpace(object)) {
return SlotSet::KEEP_SLOT;
}
} else {
DCHECK(!heap->InNewSpace(object));
}
return SlotSet::REMOVE_SLOT;
}
void StoreBuffer::FindPointersToNewSpaceInRegion(
Address start, Address end, ObjectSlotCallback slot_callback) {
for (Address slot_address = start; slot_address < end;
slot_address += kPointerSize) {
ProcessOldToNewSlot(slot_address, slot_callback);
}
void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
Heap* heap = heap_;
Iterate([heap, slot_callback](Address addr) {
return ProcessOldToNewSlot(heap, addr, slot_callback);
});
}
void StoreBuffer::IteratePointersInStoreBuffer(
ObjectSlotCallback slot_callback) {
Address* limit = old_top_;
old_top_ = old_start_;
{
DontMoveStoreBufferEntriesScope scope(this);
for (Address* current = old_start_; current < limit; current++) {
#ifdef DEBUG
Address* saved_top = old_top_;
#endif
ProcessOldToNewSlot(*current, slot_callback);
DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
template <typename Callback>
void StoreBuffer::Iterate(Callback callback) {
InsertEntriesFromBuffer();
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
if (chunk->old_to_new_slots() != nullptr) {
SlotSet* slots = chunk->old_to_new_slots();
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
slots[page].Iterate(callback);
}
}
}
}
void StoreBuffer::ClearInvalidStoreBufferEntries() {
Compact();
Address* new_top = old_start_;
for (Address* current = old_start_; current < old_top_; current++) {
Address addr = *current;
Object** slot = reinterpret_cast<Object**>(addr);
Object* object = *slot;
if (heap_->InNewSpace(object) && object->IsHeapObject()) {
// If the target object is not black, the source slot must be part
// of a non-black (dead) object.
HeapObject* heap_object = HeapObject::cast(object);
if (Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
heap_->mark_compact_collector()->IsSlotInLiveObject(addr)) {
*new_top++ = addr;
}
}
}
old_top_ = new_top;
ClearFilteringHashSets();
InsertEntriesFromBuffer();
// Don't scan on scavenge dead large objects.
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (chunk->scan_on_scavenge() &&
Marking::IsWhite(Marking::MarkBitFrom(object))) {
chunk->set_scan_on_scavenge(false);
Heap* heap = heap_;
PageIterator it(heap->old_space());
MemoryChunk* chunk;
while (it.has_next()) {
chunk = it.next();
if (chunk->old_to_new_slots() != nullptr) {
SlotSet* slots = chunk->old_to_new_slots();
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
if (pages > 1) {
// Large pages were processed above.
continue;
}
slots->Iterate([heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
Object* object = *slot;
if (heap->InNewSpace(object)) {
DCHECK(object->IsHeapObject());
// If the target object is not black, the source slot must be part
// of a non-black (dead) object.
HeapObject* heap_object = HeapObject::cast(object);
bool live = Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
heap->mark_compact_collector()->IsSlotInLiveObject(addr);
return live ? SlotSet::KEEP_SLOT : SlotSet::REMOVE_SLOT;
}
return SlotSet::REMOVE_SLOT;
});
}
}
}
void StoreBuffer::VerifyValidStoreBufferEntries() {
for (Address* current = old_start_; current < old_top_; current++) {
Object** slot = reinterpret_cast<Object**>(*current);
Heap* heap = heap_;
Iterate([heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
Object* object = *slot;
CHECK(object->IsHeapObject());
CHECK(heap_->InNewSpace(object));
heap_->mark_compact_collector()->VerifyIsSlotInLiveObject(
reinterpret_cast<Address>(slot), HeapObject::cast(object));
}
}
class FindPointersToNewSpaceVisitor final : public ObjectVisitor {
public:
FindPointersToNewSpaceVisitor(StoreBuffer* store_buffer,
ObjectSlotCallback callback)
: store_buffer_(store_buffer), callback_(callback) {}
V8_INLINE void VisitPointers(Object** start, Object** end) override {
store_buffer_->FindPointersToNewSpaceInRegion(
reinterpret_cast<Address>(start), reinterpret_cast<Address>(end),
callback_);
}
V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
private:
StoreBuffer* store_buffer_;
ObjectSlotCallback callback_;
};
void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
// We do not sort or remove duplicated entries from the store buffer because
// we expect that callback will rebuild the store buffer thus removing
// all duplicates and pointers to old space.
bool some_pages_to_scan = PrepareForIteration();
// TODO(gc): we want to skip slots on evacuation candidates
// but we can't simply figure that out from slot address
// because slot can belong to a large object.
IteratePointersInStoreBuffer(slot_callback);
// We are done scanning all the pointers that were in the store buffer, but
// there may be some pages marked scan_on_scavenge that have pointers to new
// space that are not in the store buffer. We must scan them now. As we
// scan, the surviving pointers to new space will be added to the store
// buffer. If there are still a lot of pointers to new space then we will
// keep the scan_on_scavenge flag on the page and discard the pointers that
// were added to the store buffer. If there are not many pointers to new
// space left on the page we will keep the pointers in the store buffer and
// remove the flag from the page.
if (some_pages_to_scan) {
if (callback_ != NULL) {
(*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
if (Page::FromAddress(addr)->owner() != nullptr &&
Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
CHECK(object->IsHeapObject());
CHECK(heap->InNewSpace(object));
heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
reinterpret_cast<Address>(slot), HeapObject::cast(object));
}
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
FindPointersToNewSpaceVisitor visitor(this, slot_callback);
while ((chunk = it.next()) != NULL) {
if (chunk->scan_on_scavenge()) {
chunk->set_scan_on_scavenge(false);
if (callback_ != NULL) {
(*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
}
if (chunk->owner() == heap_->lo_space()) {
LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
HeapObject* array = large_page->GetObject();
DCHECK(array->IsFixedArray());
Address start = array->address();
Address end = start + array->Size();
FindPointersToNewSpaceInRegion(start, end, slot_callback);
} else {
Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
if (owner == heap_->map_space()) {
DCHECK(page->SweepingDone());
HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We skip free space objects.
if (!heap_object->IsFiller()) {
DCHECK(heap_object->IsMap());
FindPointersToNewSpaceInRegion(
heap_object->address() + Map::kPointerFieldsBeginOffset,
heap_object->address() + Map::kPointerFieldsEndOffset,
slot_callback);
}
}
} else {
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
// Aborted pages require iterating using mark bits because they
// don't have an iterable object layout before sweeping (which can
// only happen later). Note that we can never reach an
// aborted page through the scavenger.
DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
heap_->mark_compact_collector()->VisitLiveObjectsBody(page,
&visitor);
} else {
heap_->mark_compact_collector()
->SweepOrWaitUntilSweepingCompleted(page);
HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next();
heap_object != nullptr; heap_object = iterator.Next()) {
// We iterate over objects that contain new space pointers only.
heap_object->IterateBody(&visitor);
}
}
}
}
}
}
if (callback_ != NULL) {
(*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
}
}
}
void StoreBuffer::Compact() {
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
if (top == start_) return;
// There's no check of the limit in the loop below so we check here for
// the worst case (compaction doesn't eliminate any pointers).
DCHECK(top <= limit_);
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
EnsureSpace(top - start_);
DCHECK(may_move_store_buffer_entries_);
// Goes through the addresses in the store buffer attempting to remove
// duplicates. In the interest of speed this is a lossy operation. Some
// duplicates will remain. We have two hash sets with different hash
// functions to reduce the number of unnecessary clashes.
hash_sets_are_empty_ = false; // Hash sets are in use.
for (Address* current = start_; current < top; current++) {
DCHECK(!heap_->code_space()->Contains(*current));
uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
// Shift out the last bits including any tags.
int_addr >>= kPointerSizeLog2;
// The upper part of an address is basically random because of ASLR and OS
// non-determinism, so we use only the bits within a page for hashing to
// make v8's behavior (more) deterministic.
uintptr_t hash_addr =
int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
(kHashSetLength - 1));
if (hash_set_1_[hash1] == int_addr) continue;
uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
hash2 &= (kHashSetLength - 1);
if (hash_set_2_[hash2] == int_addr) continue;
if (hash_set_1_[hash1] == 0) {
hash_set_1_[hash1] = int_addr;
} else if (hash_set_2_[hash2] == 0) {
hash_set_2_[hash2] = int_addr;
} else {
// Rather than slowing down we just throw away some entries. This will
// cause some duplicates to remain undetected.
hash_set_1_[hash1] = int_addr;
hash_set_2_[hash2] = 0;
}
old_buffer_is_sorted_ = false;
old_buffer_is_filtered_ = false;
*old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
DCHECK(old_top_ <= old_limit_);
}
heap_->isolate()->counters()->store_buffer_compactions()->Increment();
}
void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
if (event == kStoreBufferStartScanningPagesEvent) {
start_of_current_page_ = NULL;
current_page_ = NULL;
} else if (event == kStoreBufferScanningPageEvent) {
if (current_page_ != NULL) {
// If this page already overflowed the store buffer during this iteration.
if (current_page_->scan_on_scavenge()) {
// Then we should wipe out the entries that have been added for it.
store_buffer_->SetTop(start_of_current_page_);
} else if (store_buffer_->Top() - start_of_current_page_ >=
(store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
// Did we find too many pointers in the previous page? The heuristic is
// that no page can take more then 1/5 the remaining slots in the store
// buffer.
current_page_->set_scan_on_scavenge(true);
store_buffer_->SetTop(start_of_current_page_);
} else {
// In this case the page we scanned took a reasonable number of slots in
// the store buffer. It has now been rehabilitated and is no longer
// marked scan_on_scavenge.
DCHECK(!current_page_->scan_on_scavenge());
}
}
start_of_current_page_ = store_buffer_->Top();
current_page_ = page;
} else if (event == kStoreBufferFullEvent) {
// The current page overflowed the store buffer again. Wipe out its entries
// in the store buffer and mark it scan-on-scavenge again. This may happen
// several times while scanning.
if (current_page_ == NULL) {
// Store Buffer overflowed while scanning promoted objects. These are not
// in any particular page, though they are likely to be clustered by the
// allocation routines.
store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
} else {
// Store Buffer overflowed while scanning a particular old space page for
// pointers to new space.
DCHECK(current_page_ == page);
DCHECK(page != NULL);
current_page_->set_scan_on_scavenge(true);
DCHECK(start_of_current_page_ != store_buffer_->Top());
store_buffer_->SetTop(start_of_current_page_);
}
} else {
UNREACHABLE();
}
return SlotSet::KEEP_SLOT;
});
}
} // namespace internal

View File

@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/globals.h"
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
@ -24,67 +25,23 @@ typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
class StoreBuffer {
public:
explicit StoreBuffer(Heap* heap);
static void StoreBufferOverflow(Isolate* isolate);
void SetUp();
void TearDown();
// This is used to add addresses to the store buffer non-concurrently.
inline void Mark(Address addr);
// This is used by the heap traversal to enter the addresses into the store
// buffer that should still be in the store buffer after GC. It enters
// addresses directly into the old buffer because the GC starts by wiping the
// old buffer and thereafter only visits each cell once so there is no need
// to attempt to remove any dupes. During the first part of a GC we
// are using the store buffer to access the old spaces and at the same time
// we are rebuilding the store buffer using this function. There is, however
// no issue of overwriting the buffer we are iterating over, because this
// stage of the scavenge can only reduce the number of addresses in the store
// buffer (some objects are promoted so pointers to them do not need to be in
// the store buffer). The later parts of the GC scan the pages that are
// exempt from the store buffer and process the promotion queue. These steps
// can overflow this buffer. We check for this and on overflow we call the
// callback set up with the StoreBufferRebuildScope object.
inline void EnterDirectlyIntoStoreBuffer(Address addr);
// Iterates over all pointers that go from old space to new space. It will
// delete the store buffer as it starts so the callback should reenter
// surviving old-to-new pointers into the store buffer to rebuild it.
void IteratePointersToNewSpace(ObjectSlotCallback callback);
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
static const int kOldStoreBufferLength = kStoreBufferLength * 16;
static const int kHashSetLengthLog2 = 12;
static const int kHashSetLength = 1 << kHashSetLengthLog2;
void Compact();
// This is used to add addresses to the store buffer non-concurrently.
inline void Mark(Address addr);
void GCPrologue();
void GCEpilogue();
// Slots that do not point to the ToSpace after callback invocation will be
// removed from the set.
void IteratePointersToNewSpace(ObjectSlotCallback callback);
Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
void SetTop(Object*** top) {
DCHECK(top >= Start());
DCHECK(top <= Limit());
old_top_ = reinterpret_cast<Address*>(top);
}
bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
void EnsureSpace(intptr_t space_needed);
void Verify();
bool PrepareForIteration();
void Filter(int flag);
// Eliminates all stale store buffer entries from the store buffer, i.e.,
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
@ -95,124 +52,30 @@ class StoreBuffer {
private:
Heap* heap_;
// The store buffer is divided up into a new buffer that is constantly being
// filled by mutator activity and an old buffer that is filled with the data
// from the new buffer after compression.
// The start and the limit of the buffer that contains store slots
// added from the generated code.
Address* start_;
Address* limit_;
Address* old_start_;
Address* old_limit_;
Address* old_top_;
Address* old_reserved_limit_;
base::VirtualMemory* old_virtual_memory_;
bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_;
bool during_gc_;
// The garbage collector iterates over many pointers to new space that are not
// handled by the store buffer. This flag indicates whether the pointers
// found by the callbacks should be added to the store buffer or not.
bool store_buffer_rebuilding_enabled_;
StoreBufferCallback callback_;
bool may_move_store_buffer_entries_;
base::VirtualMemory* virtual_memory_;
// Two hash sets used for filtering.
// If address is in the hash set then it is guaranteed to be in the
// old part of the store buffer.
uintptr_t* hash_set_1_;
uintptr_t* hash_set_2_;
bool hash_sets_are_empty_;
// Used for synchronization of concurrent store buffer access.
base::Mutex mutex_;
void ClearFilteringHashSets();
void InsertEntriesFromBuffer();
bool SpaceAvailable(intptr_t space_needed);
void ExemptPopularPages(int prime_sample_step, int threshold);
inline uint32_t AddressToSlotSetAndOffset(Address slot_address,
SlotSet** slots);
void ProcessOldToNewSlot(Address slot_address,
ObjectSlotCallback slot_callback);
void FindPointersToNewSpaceInRegion(Address start, Address end,
ObjectSlotCallback slot_callback);
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
template <typename Callback>
void Iterate(Callback callback);
#ifdef VERIFY_HEAP
void VerifyPointers(LargeObjectSpace* space);
#endif
friend class DontMoveStoreBufferEntriesScope;
friend class FindPointersToNewSpaceVisitor;
friend class StoreBufferRebuildScope;
};
class StoreBufferRebuilder {
public:
explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
: store_buffer_(store_buffer) {}
void Callback(MemoryChunk* page, StoreBufferEvent event);
private:
StoreBuffer* store_buffer_;
// We record in this variable how full the store buffer was when we started
// iterating over the current page, finding pointers to new space. If the
// store buffer overflows again we can exempt the page from the store buffer
// by rewinding to this point instead of having to search the store buffer.
Object*** start_of_current_page_;
// The current page we are scanning in the store buffer iterator.
MemoryChunk* current_page_;
};
class StoreBufferRebuildScope {
public:
explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
StoreBufferCallback callback)
: store_buffer_(store_buffer),
stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
stored_callback_(store_buffer->callback_) {
store_buffer_->store_buffer_rebuilding_enabled_ = true;
store_buffer_->callback_ = callback;
(*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
}
~StoreBufferRebuildScope() {
store_buffer_->callback_ = stored_callback_;
store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
}
private:
StoreBuffer* store_buffer_;
bool stored_state_;
StoreBufferCallback stored_callback_;
};
class DontMoveStoreBufferEntriesScope {
public:
explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
: store_buffer_(store_buffer),
stored_state_(store_buffer->may_move_store_buffer_entries_) {
store_buffer_->may_move_store_buffer_entries_ = false;
}
~DontMoveStoreBufferEntriesScope() {
store_buffer_->may_move_store_buffer_entries_ = stored_state_;
}
private:
StoreBuffer* store_buffer_;
bool stored_state_;
};
class LocalStoreBuffer BASE_EMBEDDED {
public:
LocalStoreBuffer() : top_(new Node(nullptr)) {}

View File

@ -4047,11 +4047,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
not_zero,
&dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.

View File

@ -4189,11 +4189,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.

View File

@ -4204,11 +4204,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.

View File

@ -3954,11 +3954,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
not_zero,
&dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.

View File

@ -3728,11 +3728,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
not_zero,
&dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.

View File

@ -1363,65 +1363,6 @@ TEST(LayoutDescriptorSharing) {
}
TEST(StoreBufferScanOnScavenge) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
Handle<FieldType> any_type = FieldType::Any(isolate);
Handle<Map> map = Map::Create(isolate, 10);
map = Map::CopyWithField(map, MakeName("prop", 0), any_type, NONE,
Representation::Double(),
INSERT_TRANSITION).ToHandleChecked();
// Create object in new space.
Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED);
Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
obj->WriteToField(0, *heap_number);
{
// Ensure the object is properly set up.
DescriptorArray* descriptors = map->instance_descriptors();
CHECK(descriptors->GetDetails(0).representation().IsDouble());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, 0);
CHECK(field_index.is_inobject() && field_index.is_double());
CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
}
CHECK(isolate->heap()->new_space()->Contains(*obj));
// Trigger GCs so that the newly allocated object moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
CHECK(isolate->heap()->old_space()->Contains(*obj));
// Create temp object in the new space.
Handle<JSArray> temp = factory->NewJSArray(0, FAST_ELEMENTS);
CHECK(isolate->heap()->new_space()->Contains(*temp));
// Construct a double value that looks like a pointer to the new space object
// and store it into the obj.
Address fake_object = reinterpret_cast<Address>(*temp) + kPointerSize;
double boom_value = bit_cast<double>(fake_object);
FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
Handle<HeapNumber> boom_number = factory->NewHeapNumber(boom_value, MUTABLE);
obj->FastPropertyAtPut(field_index, *boom_number);
// Enforce scan on scavenge for the obj's page.
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
chunk->set_scan_on_scavenge(true);
// Trigger GCs and force evacuation. Should not crash there.
CcTest::heap()->CollectAllGarbage();
CHECK_EQ(boom_value, GetDoubleFieldValue(*obj, field_index));
}
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
int tagged_descriptor, int double_descriptor,
bool check_tagged_value = true) {

View File

@ -0,0 +1,130 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include "src/globals.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
TEST(SlotSet, InsertAndLookup1) {
SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
EXPECT_FALSE(set.Lookup(i));
}
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
set.Insert(i);
}
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
EXPECT_TRUE(set.Lookup(i));
}
}
TEST(SlotSet, InsertAndLookup2) {
SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 7 == 0) {
set.Insert(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 7 == 0) {
EXPECT_TRUE(set.Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
}
}
}
TEST(SlotSet, Iterate) {
SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 7 == 0) {
set.Insert(i);
}
}
set.Iterate([](Address slot_address) {
uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
if (intaddr % 3 == 0) {
return SlotSet::KEEP_SLOT;
} else {
return SlotSet::REMOVE_SLOT;
}
});
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
}
}
}
TEST(SlotSet, Remove) {
SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 7 == 0) {
set.Insert(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 3 != 0) {
set.Remove(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
}
}
}
TEST(SlotSet, RemoveRange) {
SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
set.Insert(i);
}
set.RemoveRange(0, Page::kPageSize);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
EXPECT_FALSE(set.Lookup(i));
}
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
set.Insert(i);
}
set.RemoveRange(10 * kPointerSize, 10 * kPointerSize);
EXPECT_TRUE(set.Lookup(9 * kPointerSize));
EXPECT_TRUE(set.Lookup(10 * kPointerSize));
EXPECT_TRUE(set.Lookup(11 * kPointerSize));
set.RemoveRange(10 * kPointerSize, 1000 * kPointerSize);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (10 * kPointerSize <= i && i < 1000 * kPointerSize) {
EXPECT_FALSE(set.Lookup(i));
} else {
EXPECT_TRUE(set.Lookup(i));
}
}
}
} // namespace internal
} // namespace v8

View File

@ -108,6 +108,7 @@
'heap/memory-reducer-unittest.cc',
'heap/heap-unittest.cc',
'heap/scavenge-job-unittest.cc',
'heap/slot-set-unittest.cc',
'locked-queue-unittest.cc',
'run-all-unittests.cc',
'runtime/runtime-interpreter-unittest.cc',

View File

@ -871,6 +871,7 @@
'../../src/heap/scavenger-inl.h',
'../../src/heap/scavenger.cc',
'../../src/heap/scavenger.h',
'../../src/heap/slot-set.h',
'../../src/heap/slots-buffer.cc',
'../../src/heap/slots-buffer.h',
'../../src/heap/spaces-inl.h',