Revert "[heap] Insert directly into RememberedSet and remove StoreBuffer"

This reverts commit 70e07cdb6e.

Reason for revert: Clusterfuzz found issue in chromium:1009019

Original change's description:
> [heap] Insert directly into RememberedSet and remove StoreBuffer
>
> This CL removes the StoreBuffer and inserts slots into the
> RememberedSet directly from within the RecordWrite builtin. Only calls
> into C code when either the SlotSet-array or the bucket is not
> allocated. This avoids filling the store buffer up with duplicates or
> due to a write-heavy workload and then blocking the main thread on
> store buffer processing.
>
> Change-Id: I05b0b0938d822cdf0e8ef086ad4527d3229c05b2
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1815241
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#64002}

TBR=ulan@chromium.org,jkummerow@chromium.org,dinfuehr@chromium.org

Change-Id: I6f4cc1641965c83b05f3b3830b0f526b362beb49
Bug: chromium:1009019, chromium:1009196
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1829259
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64043}
This commit is contained in:
Dominik Inführ 2019-09-29 19:35:36 +02:00 committed by Commit Bot
parent 95ec4803fd
commit 24d9e63e81
14 changed files with 664 additions and 171 deletions

View File

@ -2372,6 +2372,9 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
"src/heap/store-buffer-inl.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/heap/stress-marking-observer.cc",
"src/heap/stress-marking-observer.h",
"src/heap/stress-scavenge-observer.cc",

View File

@ -273,24 +273,24 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
}
void CallCFunction2WithCallerSavedRegistersMode(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
Node* function, Node* arg0, Node* arg1, Node* mode, Label* next) {
void CallCFunction1WithCallerSavedRegistersMode(MachineType return_type,
MachineType arg0_type,
Node* function, Node* arg0,
Node* mode, Label* next) {
Label dont_save_fp(this), save_fp(this);
Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp);
BIND(&dont_save_fp);
{
CallCFunctionWithCallerSavedRegisters(
function, return_type, kDontSaveFPRegs,
std::make_pair(arg0_type, arg0), std::make_pair(arg1_type, arg1));
CallCFunctionWithCallerSavedRegisters(function, return_type,
kDontSaveFPRegs,
std::make_pair(arg0_type, arg0));
Goto(next);
}
BIND(&save_fp);
{
CallCFunctionWithCallerSavedRegisters(function, return_type, kSaveFPRegs,
std::make_pair(arg0_type, arg0),
std::make_pair(arg1_type, arg1));
std::make_pair(arg0_type, arg0));
Goto(next);
}
}
@ -319,93 +319,34 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
}
void InsertIntoRememberedSetAndGotoSlow(Node* isolate, TNode<IntPtrT> object,
TNode<IntPtrT> slot, Node* mode,
Label* next) {
TNode<IntPtrT> page = PageFromAddress(object);
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::insert_remembered_set_function());
CallCFunction2WithCallerSavedRegistersMode(
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
function, page, slot, mode, next);
}
void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode,
Label* next) {
TNode<ExternalReference> store_buffer_top_addr =
ExternalConstant(ExternalReference::store_buffer_top(this->isolate()));
Node* store_buffer_top =
Load(MachineType::Pointer(), store_buffer_top_addr);
StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
slot);
TNode<WordT> new_store_buffer_top =
IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize));
StoreNoWriteBarrier(MachineType::PointerRepresentation(),
store_buffer_top_addr, new_store_buffer_top);
void InsertIntoRememberedSetAndGoto(Node* isolate, TNode<IntPtrT> object,
TNode<IntPtrT> slot, Node* mode,
Label* next) {
Label slow_path(this);
TNode<IntPtrT> page = PageFromAddress(object);
TNode<WordT> test =
WordAnd(new_store_buffer_top,
IntPtrConstant(Heap::store_buffer_mask_constant()));
// Load address of SlotSet
TNode<IntPtrT> slot_set_array = LoadSlotSetArray(page, &slow_path);
TNode<IntPtrT> page_start_offset = IntPtrSub(slot, page);
TNode<IntPtrT> slot_set = SlotSetAddress(slot_set_array, page_start_offset);
Label overflow(this);
Branch(IntPtrEqual(test, IntPtrConstant(0)), &overflow, next);
// Calculate bucket_index, cell_index and bit_index
TNode<WordT> bucket_index, cell_offset, bit_index;
SlotIndices(page_start_offset, &bucket_index, &cell_offset, &bit_index);
// Update cell
TNode<IntPtrT> bucket = LoadBucket(slot_set, bucket_index, &slow_path);
SetBitInCell(bucket, cell_offset, bit_index);
Goto(next);
BIND(&slow_path);
InsertIntoRememberedSetAndGotoSlow(isolate, object, slot, mode, next);
}
TNode<IntPtrT> LoadSlotSetArray(TNode<IntPtrT> page, Label* slow_path) {
TNode<IntPtrT> slot_set_array = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset)));
GotoIf(WordEqual(slot_set_array, IntPtrConstant(0)), slow_path);
return slot_set_array;
}
TNode<IntPtrT> SlotSetAddress(TNode<IntPtrT> slot_set_array,
TNode<IntPtrT> page_start_offset) {
TNode<WordT> slot_set_index =
UncheckedCast<IntPtrT>(WordShr(page_start_offset, kPageSizeBits));
TNode<IntPtrT> slot_set = UncheckedCast<IntPtrT>(
IntPtrAdd(slot_set_array,
IntPtrMul(slot_set_index, IntPtrConstant(sizeof(SlotSet)))));
return slot_set;
}
void SlotIndices(TNode<IntPtrT> page_start_offset, TNode<WordT>* bucket_index,
TNode<WordT>* cell_offset, TNode<WordT>* bit_index) {
TNode<WordT> offset =
WordAnd(page_start_offset, IntPtrConstant(MemoryChunk::kPageSize - 1));
TNode<WordT> slot_index = WordShr(offset, kTaggedSizeLog2);
*bucket_index = WordShr(slot_index, SlotSet::kBitsPerBucketLog2);
*cell_offset = WordAnd(WordShr(slot_index, SlotSet::kBitsPerCellLog2 -
SlotSet::kCellSizeBytesLog2),
IntPtrConstant((SlotSet::kCellsPerBucket - 1)
<< SlotSet::kCellSizeBytesLog2));
*bit_index = WordAnd(slot_index, IntPtrConstant(SlotSet::kBitsPerCell - 1));
}
TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set, TNode<WordT> bucket_index,
Label* slow_path) {
TNode<IntPtrT> bucket = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), slot_set,
WordShl(bucket_index, kSystemPointerSizeLog2)));
GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path);
return bucket;
}
void SetBitInCell(TNode<IntPtrT> bucket, TNode<WordT> cell_offset,
TNode<WordT> bit_index) {
TNode<IntPtrT> cell_address =
UncheckedCast<IntPtrT>(IntPtrAdd(bucket, cell_offset));
TNode<IntPtrT> old_cell_value =
ChangeInt32ToIntPtr(Load<Int32T>(cell_address));
TNode<IntPtrT> new_cell_value = UncheckedCast<IntPtrT>(
WordOr(old_cell_value, WordShl(IntPtrConstant(1), bit_index)));
StoreNoWriteBarrier(MachineRepresentation::kWord32, cell_address,
TruncateIntPtrToInt32(new_cell_value));
BIND(&overflow);
{
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::store_buffer_overflow_function());
CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(),
MachineType::Pointer(),
function, isolate, mode, next);
}
}
};
@ -456,10 +397,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
TNode<IntPtrT> object =
BitcastTaggedToWord(Parameter(Descriptor::kObject));
InsertIntoRememberedSetAndGoto(isolate_constant, object, slot, fp_mode,
&exit);
InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit);
}
BIND(&store_buffer_incremental_wb);
@ -467,10 +405,8 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
TNode<IntPtrT> object =
BitcastTaggedToWord(Parameter(Descriptor::kObject));
InsertIntoRememberedSetAndGoto(isolate_constant, object, slot, fp_mode,
&incremental_wb);
InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode,
&incremental_wb);
}
}

View File

@ -217,8 +217,10 @@ struct IsValidExternalReferenceType<Result (Class::*)(Args...)> {
FUNCTION_REFERENCE(incremental_marking_record_write_function,
IncrementalMarking::RecordWriteFromCode)
FUNCTION_REFERENCE(insert_remembered_set_function,
Heap::InsertIntoRememberedSetFromCode)
ExternalReference ExternalReference::store_buffer_overflow_function() {
return ExternalReference(
Redirect(Heap::store_buffer_overflow_function_address()));
}
FUNCTION_REFERENCE(delete_handle_scope_extensions,
HandleScope::DeleteExtensions)
@ -340,6 +342,10 @@ ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) {
return ExternalReference(address);
}
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address());
}
ExternalReference ExternalReference::heap_is_marking_flag_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->IsMarkingFlagAddress());

View File

@ -38,6 +38,7 @@ class StatsCounter;
V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \
V(address_of_jslimit, "StackGuard::address_of_jslimit()") \
V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()") \
V(store_buffer_top, "store_buffer_top") \
V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \
V(new_space_allocation_top_address, "Heap::NewSpaceAllocationTopAddress()") \
V(new_space_allocation_limit_address, \
@ -142,7 +143,6 @@ class StatsCounter;
V(ieee754_tanh_function, "base::ieee754::tanh") \
V(incremental_marking_record_write_function, \
"IncrementalMarking::RecordWrite") \
V(insert_remembered_set_function, "Heap::InsertIntoRememberedSetFromCode") \
V(invalidate_prototype_chains_function, \
"JSObject::InvalidatePrototypeChains()") \
V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \
@ -170,6 +170,7 @@ class StatsCounter;
V(search_string_raw_two_one, "search_string_raw_two_one") \
V(search_string_raw_two_two, "search_string_raw_two_two") \
V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \
V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \
V(try_internalize_string_function, "try_internalize_string_function") \
V(wasm_call_trap_callback_for_testing, \
"wasm::call_trap_callback_for_testing") \

View File

@ -10,19 +10,12 @@
#include "src/base/atomic-utils.h"
#include "src/common/globals.h"
#include "src/heap/marking.h"
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
class MemoryChunk;
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
NUMBER_OF_REMEMBERED_SET_TYPES
};
class BasicMemoryChunk {
public:
enum Flag {
@ -177,11 +170,6 @@ class BasicMemoryChunk {
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kHeaderSentinelOffset =
kHeapOffset + kSystemPointerSize;
static const intptr_t kAreaStartOffset =
kHeaderSentinelOffset + kSystemPointerSize;
static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
static const intptr_t kOldToNewSlotSetOffset =
kAreaEndOffset + kSystemPointerSize;
static const size_t kHeaderSize =
kSizeOffset + kSizetSize // size_t size
@ -190,8 +178,7 @@ class BasicMemoryChunk {
+ kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address header_sentinel_
+ kSystemPointerSize // Address area_start_
+ kSystemPointerSize // Address area_end_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array
+ kSystemPointerSize; // Address area_end_
protected:
// Overall size of the chunk, including the header and guards.
@ -217,11 +204,6 @@ class BasicMemoryChunk {
Address area_start_;
Address area_end_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
friend class BasicMemoryChunkValidator;
};
@ -239,8 +221,6 @@ class BasicMemoryChunkValidator {
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset ==
offsetof(BasicMemoryChunk, header_sentinel_));
STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset ==
offsetof(BasicMemoryChunk, slot_set_));
};
} // namespace internal

View File

@ -48,6 +48,7 @@
#include "src/heap/remembered-set.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
@ -913,6 +914,23 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
}
}
class Heap::SkipStoreBufferScope {
public:
explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
: store_buffer_(store_buffer) {
store_buffer_->MoveAllEntriesToRememberedSet();
store_buffer_->SetMode(StoreBuffer::IN_GC);
}
~SkipStoreBufferScope() {
DCHECK(store_buffer_->Empty());
store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
}
private:
StoreBuffer* store_buffer_;
};
namespace {
inline bool MakePretenureDecision(
AllocationSite site, AllocationSite::PretenureDecision current_decision,
@ -1948,40 +1966,44 @@ bool Heap::PerformGarbageCollection(
size_t start_young_generation_size =
Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
switch (collector) {
case MARK_COMPACTOR:
UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction.
MarkCompact();
old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which
// can cause another GC. Take into account the objects promoted during
// GC.
old_generation_allocation_counter_at_last_gc_ +=
static_cast<size_t>(promoted_objects_size_);
old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
break;
case MINOR_MARK_COMPACTOR:
MinorMarkCompact();
break;
case SCAVENGER:
if ((fast_promotion_mode_ &&
CanExpandOldGeneration(new_space()->Size() +
new_lo_space()->Size()))) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
EvacuateYoungGeneration();
} else {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kRegularScavenge);
{
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
Scavenge();
}
break;
switch (collector) {
case MARK_COMPACTOR:
UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction.
MarkCompact();
old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which
// can cause another GC. Take into account the objects promoted during
// GC.
old_generation_allocation_counter_at_last_gc_ +=
static_cast<size_t>(promoted_objects_size_);
old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
break;
case MINOR_MARK_COMPACTOR:
MinorMarkCompact();
break;
case SCAVENGER:
if ((fast_promotion_mode_ &&
CanExpandOldGeneration(new_space()->Size() +
new_lo_space()->Size()))) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
EvacuateYoungGeneration();
} else {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kRegularScavenge);
Scavenge();
}
break;
}
ProcessPretenuringFeedback();
}
ProcessPretenuringFeedback();
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
@ -4124,6 +4146,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
std::set<Address> old_to_new;
std::set<std::pair<SlotType, Address> > typed_old_to_new;
if (!InYoungGeneration(object)) {
store_buffer()->MoveAllEntriesToRememberedSet();
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
&this->ephemeron_remembered_set_);
@ -4986,6 +5009,8 @@ void Heap::SetUp() {
memory_allocator_.reset(
new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
store_buffer_.reset(new StoreBuffer(this));
mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_.reset(new ScavengerCollector(this));
@ -5055,6 +5080,8 @@ void Heap::SetUpSpaces() {
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
store_buffer()->SetUp();
mark_compact_collector()->SetUp();
#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector() != nullptr) {
@ -5286,6 +5313,8 @@ void Heap::TearDown() {
space_[i] = nullptr;
}
store_buffer()->TearDown();
memory_allocator()->TearDown();
StrongRootsList* next = nullptr;
@ -5295,6 +5324,7 @@ void Heap::TearDown() {
}
strong_roots_list_ = nullptr;
store_buffer_.reset();
memory_allocator_.reset();
}
@ -5505,6 +5535,24 @@ void Heap::CheckHandleCount() {
isolate_->handle_scope_implementer()->Iterate(&v);
}
Address* Heap::store_buffer_top_address() {
return store_buffer()->top_address();
}
// static
intptr_t Heap::store_buffer_mask_constant() {
return StoreBuffer::kStoreBufferMask;
}
// static
Address Heap::store_buffer_overflow_function_address() {
return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
}
void Heap::MoveStoreBufferEntriesToRememberedSet() {
store_buffer()->MoveAllEntriesToRememberedSet();
}
void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
#ifndef V8_DISABLE_WRITE_BARRIERS
DCHECK(!IsLargeObject(object));
@ -5513,18 +5561,13 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
if (!page->SweepingDone()) {
store_buffer()->MoveAllEntriesToRememberedSet();
RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
}
}
#endif
}
// static
int Heap::InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot) {
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
return 0;
}
#ifdef DEBUG
void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
#ifndef V8_DISABLE_WRITE_BARRIERS
@ -5532,6 +5575,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
// Slots are filtered with invalidated slots.
CHECK_IMPLIES(RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
@ -5549,6 +5593,7 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
if (!page->SweepingDone()) {
store_buffer()->MoveAllEntriesToRememberedSet();
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
SlotSet::KEEP_EMPTY_BUCKETS);
}
@ -6160,8 +6205,8 @@ void Heap::WriteBarrierForCodeSlow(Code code) {
void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
HeapObject value) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
Heap* heap = Heap::FromWritableHeapObject(object);
heap->store_buffer()->InsertEntry(slot);
}
void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
@ -6203,6 +6248,7 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
(kModeMask & kDoMarking));
StoreBuffer* store_buffer = this->store_buffer();
IncrementalMarking* incremental_marking = this->incremental_marking();
MarkCompactCollector* collector = this->mark_compact_collector();
@ -6213,8 +6259,7 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
if ((kModeMask & kDoGenerational) &&
Heap::InYoungGeneration(value_heap_object)) {
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(source_page,
slot.address());
store_buffer->InsertEntry(slot.address());
}
if ((kModeMask & kDoMarking) &&

View File

@ -80,6 +80,7 @@ class ScavengeJob;
class Scavenger;
class ScavengerCollector;
class Space;
class StoreBuffer;
class StressScavengeObserver;
class TimedHistogram;
class WeakObjectRetainer;
@ -853,9 +854,9 @@ class Heap {
static intptr_t store_buffer_mask_constant();
static Address store_buffer_overflow_function_address();
void MoveStoreBufferEntriesToRememberedSet();
void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot);
#ifdef DEBUG
void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
@ -1359,6 +1360,8 @@ class Heap {
inline int MaxNumberToStringCacheSize() const;
private:
class SkipStoreBufferScope;
using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
FullObjectSlot pointer);
@ -1471,6 +1474,8 @@ class Heap {
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
StoreBuffer* store_buffer() { return store_buffer_.get(); }
void set_current_gc_flags(int flags) { current_gc_flags_ = flags; }
inline bool ShouldReduceMemory() const {
@ -1982,6 +1987,7 @@ class Heap {
std::unique_ptr<ScavengerCollector> scavenger_collector_;
std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
std::unique_ptr<MemoryAllocator> memory_allocator_;
std::unique_ptr<StoreBuffer> store_buffer_;
std::unique_ptr<IncrementalMarking> incremental_marking_;
std::unique_ptr<ConcurrentMarking> concurrent_marking_;
std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
@ -2103,6 +2109,7 @@ class Heap {
friend class Scavenger;
friend class ScavengerCollector;
friend class Space;
friend class StoreBuffer;
friend class Sweeper;
friend class heap::TestMemoryAllocatorScope;

View File

@ -265,20 +265,17 @@ class SlotSet : public Malloced {
DCHECK_EQ(0u, to_be_freed_buckets_.size());
}
private:
using Bucket = uint32_t*;
static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
static const int kCellSizeBytesLog2 = 2;
static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
static const int kBitsPerCell = 32;
static const int kBitsPerCellLog2 = 5;
static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
private:
using Bucket = uint32_t*;
Bucket AllocateBucket() {
Bucket result = NewArray<uint32_t>(kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) {

View File

@ -23,6 +23,7 @@
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/heap/store-buffer.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
@ -865,6 +866,7 @@ void Page::MoveOldToNewRememberedSetForSweeping() {
void Page::MergeOldToNewRememberedSets() {
if (sweeping_slot_set_ == nullptr) return;
DCHECK(heap()->store_buffer()->Empty());
RememberedSet<OLD_TO_NEW>::Iterate(
this,
@ -1664,6 +1666,12 @@ void PagedSpace::RefillFreeList() {
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
// Avoid races with concurrent store buffer processing when merging
// old-to-new remembered sets later.
if (!is_local()) {
heap()->MoveStoreBufferEntriesToRememberedSet();
}
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {

View File

@ -130,6 +130,12 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory };
enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
NUMBER_OF_REMEMBERED_SET_TYPES
};
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
@ -600,6 +606,7 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // Address owner_
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
@ -915,6 +922,7 @@ class MemoryChunk : public BasicMemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];

View File

@ -0,0 +1,26 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_STORE_BUFFER_INL_H_
#define V8_HEAP_STORE_BUFFER_INL_H_
#include "src/heap/store-buffer.h"
#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
void StoreBuffer::InsertIntoStoreBuffer(Address slot) {
if (top_ + sizeof(Address) > limit_[current_]) {
StoreBufferOverflow(heap_->isolate());
}
*top_ = slot;
top_++;
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_STORE_BUFFER_INL_H_

174
src/heap/store-buffer.cc Normal file
View File

@ -0,0 +1,174 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/store-buffer.h"
#include <algorithm>
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/execution/isolate.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/store-buffer-inl.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
: heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
limit_[i] = nullptr;
lazy_top_[i] = nullptr;
}
task_running_ = false;
insertion_callback = &InsertDuringRuntime;
}
void StoreBuffer::SetUp() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// Round up the requested size in order to fulfill the VirtualMemory's
// requrements on the requested size alignment. This may cause a bit of
// memory wastage if the actual CommitPageSize() will be bigger than the
// kMinExpectedOSPageSize value but this is a trade-off for keeping the
// store buffer overflow check in write barriers cheap.
const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
page_allocator->CommitPageSize());
// Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
// use a bit test to detect the ends of the buffers.
STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
const size_t alignment =
std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
VirtualMemory reservation(page_allocator, requested_size, hint, alignment);
if (!reservation.IsReserved()) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
Address start = reservation.address();
const size_t allocated_size = reservation.size();
start_[0] = reinterpret_cast<Address*>(start);
limit_[0] = start_[0] + (kStoreBufferSize / kSystemPointerSize);
start_[1] = limit_[0];
limit_[1] = start_[1] + (kStoreBufferSize / kSystemPointerSize);
// Sanity check the buffers.
Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
USE(vm_limit);
for (int i = 0; i < kStoreBuffers; i++) {
DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit);
DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
}
// Set RW permissions only on the pages we use.
const size_t used_size = RoundUp(requested_size, CommitPageSize());
if (!reservation.SetPermissions(start, used_size,
PageAllocator::kReadWrite)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;
top_ = start_[current_];
virtual_memory_ = std::move(reservation);
}
void StoreBuffer::TearDown() {
if (virtual_memory_.IsReserved()) virtual_memory_.Free();
top_ = nullptr;
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
limit_[i] = nullptr;
lazy_top_[i] = nullptr;
}
}
void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
store_buffer->InsertIntoStoreBuffer(slot);
}
void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
Address slot) {
DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(Page::FromAddress(slot),
slot);
}
void StoreBuffer::SetMode(StoreBufferMode mode) {
mode_ = mode;
if (mode == NOT_IN_GC) {
insertion_callback = &InsertDuringRuntime;
} else {
insertion_callback = &InsertDuringGarbageCollection;
}
}
int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->FlipStoreBuffers();
isolate->counters()->store_buffer_overflows()->Increment();
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}
void StoreBuffer::FlipStoreBuffers() {
base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
current_ = other;
top_ = start_[current_];
if (!task_running_ && FLAG_concurrent_store_buffer) {
task_running_ = true;
V8::GetCurrentPlatform()->CallOnWorkerThread(
std::make_unique<Task>(heap_->isolate(), this));
}
}
void StoreBuffer::MoveEntriesToRememberedSet(int index) {
if (!lazy_top_[index]) return;
DCHECK_GE(index, 0);
DCHECK_LT(index, kStoreBuffers);
Address last_inserted_addr = kNullAddress;
MemoryChunk* chunk = nullptr;
for (Address* current = start_[index]; current < lazy_top_[index];
current++) {
Address addr = *current;
if (chunk == nullptr ||
MemoryChunk::BaseAddress(addr) != chunk->address()) {
chunk = MemoryChunk::FromAnyPointerAddress(addr);
}
if (addr != last_inserted_addr) {
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, addr);
last_inserted_addr = addr;
}
}
lazy_top_[index] = nullptr;
}
void StoreBuffer::MoveAllEntriesToRememberedSet() {
base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
MoveEntriesToRememberedSet(current_);
top_ = start_[current_];
}
void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
task_running_ = false;
}
} // namespace internal
} // namespace v8

153
src/heap/store-buffer.h Normal file
View File

@ -0,0 +1,153 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_STORE_BUFFER_H_
#define V8_HEAP_STORE_BUFFER_H_
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/common/globals.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/tasks/cancelable-task.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
// Intermediate buffer that accumulates old-to-new stores from the generated
// code. Moreover, it stores invalid old-to-new slots with two entries.
// The first is a tagged address of the start of the invalid range, the second
// one is the end address of the invalid range or null if there is just one slot
// that needs to be removed from the remembered set. On buffer overflow the
// slots are moved to the remembered set.
// Store buffer entries are always full pointers.
class StoreBuffer {
public:
enum StoreBufferMode { IN_GC, NOT_IN_GC };
static const int kStoreBuffers = 2;
static const int kStoreBufferSize =
Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
1 << (11 + kSystemPointerSizeLog2));
static const int kStoreBufferMask = kStoreBufferSize - 1;
V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
Address slot);
static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot);
explicit StoreBuffer(Heap* heap);
void SetUp();
void TearDown();
// Used to add entries from generated code.
inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
// Moves entries from a specific store buffer to the remembered set. This
// method takes a lock.
void MoveEntriesToRememberedSet(int index);
// This method ensures that all used store buffer entries are transferred to
// the remembered set.
void MoveAllEntriesToRememberedSet();
inline void InsertIntoStoreBuffer(Address slot);
void InsertEntry(Address slot) {
// Insertions coming from the GC are directly inserted into the remembered
// set. Insertions coming from the runtime are added to the store buffer to
// allow concurrent processing.
insertion_callback(this, slot);
}
void SetMode(StoreBufferMode mode);
// Used by the concurrent processing thread to transfer entries from the
// store buffer to the remembered set.
void ConcurrentlyProcessStoreBuffer();
bool Empty() {
for (int i = 0; i < kStoreBuffers; i++) {
if (lazy_top_[i]) {
return false;
}
}
return top_ == start_[current_];
}
Heap* heap() { return heap_; }
private:
// There are two store buffers. If one store buffer fills up, the main thread
// publishes the top pointer of the store buffer that needs processing in its
// global lazy_top_ field. After that it start the concurrent processing
// thread. The concurrent processing thread uses the pointer in lazy_top_.
// It will grab the given mutex and transfer its entries to the remembered
// set. If the concurrent thread does not make progress, the main thread will
// perform the work.
// Important: there is an ordering constrained. The store buffer with the
// older entries has to be processed first.
class Task : public CancelableTask {
public:
Task(Isolate* isolate, StoreBuffer* store_buffer)
: CancelableTask(isolate),
store_buffer_(store_buffer),
tracer_(isolate->heap()->tracer()) {}
~Task() override = default;
private:
void RunInternal() override {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_STORE_BUFFER);
store_buffer_->ConcurrentlyProcessStoreBuffer();
}
StoreBuffer* store_buffer_;
GCTracer* tracer_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
StoreBufferMode mode() const { return mode_; }
void FlipStoreBuffers();
Heap* heap_;
Address* top_;
// The start and the limit of the buffer that contains store slots
// added from the generated code. We have two chunks of store buffers.
// Whenever one fills up, we notify a concurrent processing thread and
// use the other empty one in the meantime.
Address* start_[kStoreBuffers];
Address* limit_[kStoreBuffers];
// At most one lazy_top_ pointer is set at any time.
Address* lazy_top_[kStoreBuffers];
base::Mutex mutex_;
// We only want to have at most one concurrent processing tas running.
bool task_running_;
// Points to the current buffer in use.
int current_;
// During GC, entries are directly added to the remembered set without
// going through the store buffer. This is signaled by a special
// IN_GC mode.
StoreBufferMode mode_;
VirtualMemory virtual_memory_;
// Callbacks are more efficient than reading out the gc state for every
// store buffer operation.
void (*insertion_callback)(StoreBuffer*, Address);
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_STORE_BUFFER_H_

View File

@ -10,6 +10,7 @@
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/store-buffer.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@ -48,6 +49,154 @@ Page* HeapTester::AllocateByteArraysOnPage(
return page;
}
template <RememberedSetType direction>
static size_t GetRememberedSetSize(HeapObject obj) {
std::set<Address> slots;
RememberedSet<direction>::Iterate(
MemoryChunk::FromHeapObject(obj),
[&slots](MaybeObjectSlot slot) {
slots.insert(slot.address());
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
return slots.size();
}
HEAP_TEST(StoreBuffer_CreateFromOldToYoung) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
heap::SealCurrentObjects(heap);
CHECK(heap->store_buffer()->Empty());
HandleScope scope(isolate);
const int n = 10;
Handle<FixedArray> old = factory->NewFixedArray(n, AllocationType::kOld);
// Fill the array with refs to both old and new targets.
{
const auto prev_top = *(heap->store_buffer_top_address());
HandleScope scope_inner(isolate);
intptr_t expected_slots_count = 0;
// Add refs from old to new.
for (int i = 0; i < n / 2; i++) {
Handle<Object> number = factory->NewHeapNumber(i);
old->set(i, *number);
expected_slots_count++;
}
// Add refs from old to old.
for (int i = n / 2; i < n; i++) {
Handle<Object> number = factory->NewHeapNumber<AllocationType::kOld>(i);
old->set(i, *number);
}
// All old to new refs should have been captured and only them.
const auto new_top = *(heap->store_buffer_top_address());
const intptr_t added_slots_count =
(new_top - prev_top) / kSystemPointerSize;
CHECK_EQ(expected_slots_count, added_slots_count);
}
// GC should flush the store buffer into remembered sets and retain the target
// young objects.
CHECK_EQ(0, GetRememberedSetSize<OLD_TO_NEW>(*old));
CcTest::CollectGarbage(i::NEW_SPACE);
CHECK(heap->store_buffer()->Empty());
CHECK_EQ(n / 2, GetRememberedSetSize<OLD_TO_NEW>(*old));
CHECK(Heap::InYoungGeneration(old->get(0)));
}
HEAP_TEST(StoreBuffer_Overflow) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
// Add enough refs from old to new to cause overflow of both buffer chunks.
const int n = 2 * StoreBuffer::kStoreBufferSize / kSystemPointerSize + 1;
HandleScope scope(isolate);
Handle<FixedArray> old = factory->NewFixedArray(n, AllocationType::kOld);
for (int i = 0; i < n; i++) {
Handle<Object> number = factory->NewHeapNumber(i);
old->set(i, *number);
}
// No test validations, the buffer flipping code triggered by the overflow
// self-validates with asserts.
}
HEAP_TEST(StoreBuffer_NotUsedOnAgingObjectWithRefsToYounger) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
heap::SealCurrentObjects(heap);
CHECK(heap->store_buffer()->Empty());
const int n = 10;
HandleScope scope(isolate);
Handle<FixedArray> arr = factory->NewFixedArray(n);
// Transition the array into the older new tier.
CcTest::CollectGarbage(i::NEW_SPACE);
CHECK(Heap::InYoungGeneration(*arr));
// Fill the array with younger objects.
{
HandleScope scope_inner(isolate);
for (int i = 0; i < n; i++) {
Handle<Object> number = factory->NewHeapNumber(i);
arr->set(i, *number);
}
// The references aren't crossing generations yet so none should be tracked.
CHECK(heap->store_buffer()->Empty());
}
// Promote the array into old, its elements are still in new, the old to new
// refs are inserted directly into the remembered sets during GC.
CcTest::CollectGarbage(i::NEW_SPACE);
CHECK(heap->InOldSpace(*arr));
CHECK(Heap::InYoungGeneration(arr->get(n / 2)));
CHECK(heap->store_buffer()->Empty());
CHECK_EQ(n, GetRememberedSetSize<OLD_TO_NEW>(*arr));
}
HEAP_TEST(RememberedSet_LargePage) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
heap::SealCurrentObjects(heap);
CHECK(heap->store_buffer()->Empty());
v8::HandleScope scope(CcTest::isolate());
// Allocate an object in Large space.
const int count = Max(FixedArray::kMaxRegularLength + 1, 128 * KB);
Handle<FixedArray> arr = factory->NewFixedArray(count, AllocationType::kOld);
CHECK(heap->lo_space()->Contains(*arr));
// Create OLD_TO_NEW references from the large object.
{
v8::HandleScope short_lived(CcTest::isolate());
Handle<Object> number = factory->NewHeapNumber(42);
arr->set(0, *number);
arr->set(count - 1, *number);
CHECK(!heap->store_buffer()->Empty());
}
// GC should flush the store buffer into the remembered set of the large page,
// it should also keep the young targets alive.
CcTest::CollectAllGarbage();
CHECK(heap->store_buffer()->Empty());
CHECK(Heap::InYoungGeneration(arr->get(0)));
CHECK(Heap::InYoungGeneration(arr->get(count - 1)));
CHECK_EQ(2, GetRememberedSetSize<OLD_TO_NEW>(*arr));
}
HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();