cppgc: Port backing store compaction.

This CL ports the existing backing store compaction algorithm from
blink. It does not attempt to improve on the existing algorithm.

Currently only unified heap uses the compaction implementation. It is
never triggered through standalone GCs.

The compaction implementation resides within an internal "subtle" namespace.

Bug: v8:10990
Change-Id: I4aa781db1b711e7aafc34234c4fb142de84394d7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2485228
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70714}
This commit is contained in:
Omer Katz 2020-10-22 16:35:20 +02:00 committed by Commit Bot
parent 46be10d188
commit 90ea9b35cb
28 changed files with 1051 additions and 27 deletions

View File

@ -4385,6 +4385,10 @@ v8_source_set("cppgc_base") {
"include/cppgc/visitor.h",
"include/v8config.h",
"src/heap/cppgc/allocation.cc",
"src/heap/cppgc/compaction-worklists.cc",
"src/heap/cppgc/compaction-worklists.h",
"src/heap/cppgc/compactor.cc",
"src/heap/cppgc/compactor.h",
"src/heap/cppgc/concurrent-marker.cc",
"src/heap/cppgc/concurrent-marker.h",
"src/heap/cppgc/free-list.cc",

View File

@ -14,8 +14,6 @@ struct CustomSpaceIndex {
size_t value;
};
enum class CustomSpaceCompactability { kNotCompactable, kCompactable };
/**
* Top-level base class for custom spaces. Users must inherit from CustomSpace
* below.
@ -66,6 +64,28 @@ struct SpaceTrait {
using Space = void;
};
namespace internal {
template <typename CustomSpace>
struct IsAllocatedOnCompactableSpaceImpl {
static constexpr bool value = CustomSpace::kSupportsCompaction;
};
template <>
struct IsAllocatedOnCompactableSpaceImpl<void> {
// Non-custom spaces are by default not compactable.
static constexpr bool value = false;
};
template <typename T>
struct IsAllocatedOnCompactableSpace {
public:
static constexpr bool value =
IsAllocatedOnCompactableSpaceImpl<typename SpaceTrait<T>::Space>::value;
};
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_CUSTOM_SPACE_H_

View File

@ -24,7 +24,7 @@ class MemberBase {
MemberBase() = default;
explicit MemberBase(void* value) : raw_(value) {}
void* const* GetRawSlot() const { return &raw_; }
void** GetRawSlot() const { return &raw_; }
void* GetRaw() const { return raw_; }
void SetRaw(void* value) { raw_ = value; }
@ -178,6 +178,10 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
return result;
}
const T** GetSlotForTesting() const {
return reinterpret_cast<const T**>(const_cast<const void**>(GetRawSlot()));
}
private:
T* GetRawAtomic() const {
return static_cast<T*>(MemberBase::GetRawAtomic());

View File

@ -5,6 +5,7 @@
#ifndef INCLUDE_CPPGC_VISITOR_H_
#define INCLUDE_CPPGC_VISITOR_H_
#include "cppgc/custom-space.h"
#include "cppgc/ephemeron-pair.h"
#include "cppgc/garbage-collected.h"
#include "cppgc/internal/logging.h"
@ -13,6 +14,7 @@
#include "cppgc/member.h"
#include "cppgc/source-location.h"
#include "cppgc/trace-trait.h"
#include "cppgc/type-traits.h"
namespace cppgc {
@ -26,7 +28,6 @@ class BasicPersistent;
class ConservativeTracingVisitor;
class VisitorBase;
class VisitorFactory;
} // namespace internal
using WeakCallback = void (*)(const LivenessBroker&, const void*);
@ -82,6 +83,8 @@ class V8_EXPORT Visitor {
static_assert(sizeof(T), "Pointee type must be fully defined.");
static_assert(internal::IsGarbageCollectedType<T>::value,
"T must be GarbageCollected or GarbageCollectedMixin type");
static_assert(!internal::IsAllocatedOnCompactableSpace<T>::value,
"Weak references to compactable objects are not allowed");
const T* value = weak_member.GetRawAtomic();
@ -176,6 +179,22 @@ class V8_EXPORT Visitor {
data);
}
/**
* Registers a slot containing a reference to an object allocated on a
* compactable space. Such references maybe be arbitrarily moved by the GC.
*
* \param slot location of reference to object that might be moved by the GC.
*/
template <typename T>
void RegisterMovableReference(const T** slot) {
static_assert(internal::IsAllocatedOnCompactableSpace<T>::value,
"Only references to objects allocated on compactable spaces "
"should be registered as movable slots.");
static_assert(!internal::IsGarbageCollectedMixinTypeV<T>,
"Mixin types do not support compaction.");
HandleMovableReference(reinterpret_cast<const void**>(slot));
}
/**
* Registers a weak callback that is invoked during garbage collection.
*
@ -214,6 +233,7 @@ class V8_EXPORT Visitor {
virtual void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
TraceDescriptor weak_desc,
WeakCallback callback, const void* data) {}
virtual void HandleMovableReference(const void**) {}
private:
template <typename T, void (T::*method)(const LivenessBroker&)>
@ -261,6 +281,8 @@ class V8_EXPORT Visitor {
static_assert(internal::IsGarbageCollectedType<PointeeType>::value,
"Persistent's pointee type must be GarbageCollected or "
"GarbageCollectedMixin");
static_assert(!internal::IsAllocatedOnCompactableSpace<PointeeType>::value,
"Weak references to compactable objects are not allowed");
VisitWeakRoot(p.Get(), TraceTrait<PointeeType>::GetTraceDescriptor(p.Get()),
&HandleWeak<WeakPersistent>, &p, loc);
}

View File

@ -177,6 +177,12 @@ void CppHeap::TracePrologue(TraceFlags flags) {
UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
cppgc::Heap::StackState::kNoHeapPointers,
UnifiedHeapMarker::MarkingConfig::MarkingType::kIncrementalAndConcurrent};
if ((flags == TraceFlags::kReduceMemory) || (flags == TraceFlags::kForced)) {
// Only enable compaction when in a memory reduction garbage collection as
// it may significantly increase the final garbage collection pause.
compactor_.InitializeIfShouldCompact(marking_config.marking_type,
marking_config.stack_state);
}
marker_ =
cppgc::internal::MarkerFactory::CreateAndStartMarking<UnifiedHeapMarker>(
*isolate_.heap(), AsBase(), platform_.get(), marking_config);
@ -194,7 +200,12 @@ bool CppHeap::AdvanceTracing(double deadline_in_ms) {
bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
marker_->EnterAtomicPause(cppgc::Heap::StackState::kNoHeapPointers);
marker_->EnterAtomicPause(stack_state);
if (compactor_.CancelIfShouldNotCompact(
UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic,
stack_state)) {
marker_->NotifyCompactionCancelled();
}
}
void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
@ -213,10 +224,15 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
UnifiedHeapMarkingVerifier verifier(*this);
verifier.Run(cppgc::Heap::StackState::kNoHeapPointers);
#endif
cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
{
NoGCScope no_gc(*this);
sweeper().Start(
cppgc::internal::Sweeper::Config::kIncrementalAndConcurrent);
const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
cppgc::internal::Sweeper::SweepingConfig::SweepingType::
kIncrementalAndConcurrent,
compactable_space_handling};
sweeper().Start(sweeping_config);
}
}

View File

@ -48,6 +48,12 @@ void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
marking_state_.RegisterWeakCallback(callback, object);
}
void UnifiedHeapMarkingVisitorBase::HandleMovableReference(const void** slot) {
auto* worklist = marking_state_.movable_slots_worklist();
if (!worklist) return;
worklist->Push(slot);
}
namespace {
void DeferredTraceJSMember(cppgc::Visitor* visitor, const void* ref) {
static_cast<JSVisitor*>(visitor)->Trace(

View File

@ -48,6 +48,7 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
TraceDescriptor weak_desc, WeakCallback callback,
const void* data) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
void HandleMovableReference(const void**) final;
// JS handling.
void Visit(const internal::JSMemberBase& ref) final;

View File

@ -0,0 +1,14 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/compaction-worklists.h"
namespace cppgc {
namespace internal {
void CompactionWorklists::ClearForTesting() { movable_slots_worklist_.Clear(); }
} // namespace internal
} // namespace cppgc

View File

@ -0,0 +1,35 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_
#define V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_
#include <unordered_set>
#include "src/heap/base/worklist.h"
namespace cppgc {
namespace internal {
class CompactionWorklists {
public:
using MovableReference = const void*;
using MovableReferencesWorklist =
heap::base::Worklist<MovableReference*, 256 /* local entries */>;
MovableReferencesWorklist* movable_slots_worklist() {
return &movable_slots_worklist_;
}
void ClearForTesting();
private:
MovableReferencesWorklist movable_slots_worklist_;
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_

505
src/heap/cppgc/compactor.cc Normal file
View File

@ -0,0 +1,505 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/compactor.h"
#include <map>
#include <numeric>
#include <unordered_map>
#include <unordered_set>
#include "include/cppgc/macros.h"
#include "src/heap/cppgc/compaction-worklists.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/raw-heap.h"
namespace cppgc {
namespace internal {
namespace {
// Freelist size threshold that must be exceeded before compaction
// should be considered.
static constexpr size_t kFreeListSizeThreshold = 512 * kKB;
// The real worker behind heap compaction, recording references to movable
// objects ("slots".) When the objects end up being compacted and moved,
// relocate() will adjust the slots to point to the new location of the
// object along with handling references for interior pointers.
//
// The MovableReferences object is created and maintained for the lifetime
// of one heap compaction-enhanced GC.
class MovableReferences final {
using MovableReference = CompactionWorklists::MovableReference;
public:
explicit MovableReferences(HeapBase& heap) : heap_(heap) {}
// Adds a slot for compaction. Filters slots in dead objects.
void AddOrFilter(MovableReference*);
// Relocates a backing store |from| -> |to|.
void Relocate(Address from, Address to);
// Relocates interior slots in a backing store that is moved |from| -> |to|.
void RelocateInteriorReferences(Address from, Address to, size_t size);
// Updates the collection of callbacks from the item pushed the worklist by
// marking visitors.
void UpdateCallbacks();
private:
HeapBase& heap_;
// Map from movable reference (value) to its slot. Upon moving an object its
// slot pointing to it requires updating. Movable reference should currently
// have only a single movable reference to them registered.
std::unordered_map<MovableReference, MovableReference*> movable_references_;
// Map of interior slots to their final location. Needs to be an ordered map
// as it is used to walk through slots starting at a given memory address.
// Requires log(n) lookup to make the early bailout reasonably fast.
//
// - The initial value for a given key is nullptr.
// - Upon moving an object this value is adjusted accordingly.
std::map<MovableReference*, Address> interior_movable_references_;
#if DEBUG
// The following two collections are used to allow refer back from a slot to
// an already moved object.
std::unordered_set<const void*> moved_objects_;
std::unordered_map<MovableReference*, MovableReference>
interior_slot_to_object_;
#endif // DEBUG
};
void MovableReferences::AddOrFilter(MovableReference* slot) {
const BasePage* slot_page = BasePage::FromInnerAddress(&heap_, slot);
CHECK_NOT_NULL(slot_page);
const void* value = *slot;
if (!value) return;
// All slots and values are part of Oilpan's heap.
// - Slots may be contained within dead objects if e.g. the write barrier
// registered the slot while backing itself has not been marked live in
// time. Slots in dead objects are filtered below.
// - Values may only be contained in or point to live objects.
const HeapObjectHeader& slot_header =
slot_page->ObjectHeaderFromInnerAddress(slot);
// Filter the slot since the object that contains the slot is dead.
if (!slot_header.IsMarked()) return;
const BasePage* value_page = BasePage::FromInnerAddress(&heap_, value);
CHECK_NOT_NULL(value_page);
// The following cases are not compacted and do not require recording:
// - Compactable object on large pages.
// - Compactable object on non-compactable spaces.
if (value_page->is_large() || !value_page->space()->is_compactable()) return;
// Slots must reside in and values must point to live objects at this
// point. |value| usually points to a separate object but can also point
// to the an interior pointer in the same object storage which is why the
// dynamic header lookup is required.
const HeapObjectHeader& value_header =
value_page->ObjectHeaderFromInnerAddress(value);
CHECK(value_header.IsMarked());
// Slots may have been recorded already but must point to the same value.
auto reference_it = movable_references_.find(value);
if (V8_UNLIKELY(reference_it != movable_references_.end())) {
CHECK_EQ(slot, reference_it->second);
return;
}
// Add regular movable reference.
movable_references_.emplace(value, slot);
// Check whether the slot itself resides on a page that is compacted.
if (V8_LIKELY(!slot_page->space()->is_compactable())) return;
CHECK_EQ(interior_movable_references_.end(),
interior_movable_references_.find(slot));
interior_movable_references_.emplace(slot, nullptr);
#if DEBUG
interior_slot_to_object_.emplace(slot, slot_header.Payload());
#endif // DEBUG
}
void MovableReferences::Relocate(Address from, Address to) {
#if DEBUG
moved_objects_.insert(from);
#endif // DEBUG
// Interior slots always need to be processed for moved objects.
// Consider an object A with slot A.x pointing to value B where A is
// allocated on a movable page itself. When B is finally moved, it needs to
// find the corresponding slot A.x. Object A may be moved already and the
// memory may have been freed, which would result in a crash.
if (!interior_movable_references_.empty()) {
const HeapObjectHeader& header = HeapObjectHeader::FromPayload(to);
const size_t size = header.GetSize() - sizeof(HeapObjectHeader);
RelocateInteriorReferences(from, to, size);
}
auto it = movable_references_.find(from);
// This means that there is no corresponding slot for a live object.
// This may happen because a mutator may change the slot to point to a
// different object because e.g. incremental marking marked an object
// as live that was later on replaced.
if (it == movable_references_.end()) {
return;
}
// If the object is referenced by a slot that is contained on a compacted
// area itself, check whether it can be updated already.
MovableReference* slot = it->second;
auto interior_it = interior_movable_references_.find(slot);
if (interior_it != interior_movable_references_.end()) {
MovableReference* slot_location =
reinterpret_cast<MovableReference*>(interior_it->second);
if (!slot_location) {
interior_it->second = to;
#if DEBUG
// Check that the containing object has not been moved yet.
auto reverse_it = interior_slot_to_object_.find(slot);
DCHECK_NE(interior_slot_to_object_.end(), reverse_it);
DCHECK_EQ(moved_objects_.end(), moved_objects_.find(reverse_it->second));
#endif // DEBUG
} else {
slot = slot_location;
}
}
// Compaction is atomic so slot should not be updated during compaction.
DCHECK_EQ(from, *slot);
// Update the slots new value.
*slot = to;
}
void MovableReferences::RelocateInteriorReferences(Address from, Address to,
size_t size) {
// |from| is a valid address for a slot.
auto interior_it = interior_movable_references_.lower_bound(
reinterpret_cast<MovableReference*>(from));
if (interior_it == interior_movable_references_.end()) return;
DCHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
while (offset < size) {
if (!interior_it->second) {
// Update the interior reference value, so that when the object the slot
// is pointing to is moved, it can re-use this value.
Address refernece = to + offset;
interior_it->second = refernece;
// If the |slot|'s content is pointing into the region [from, from +
// size) we are dealing with an interior pointer that does not point to
// a valid HeapObjectHeader. Such references need to be fixed up
// immediately.
Address& reference_contents = *reinterpret_cast<Address*>(refernece);
if (reference_contents > from && reference_contents < (from + size)) {
reference_contents = reference_contents - from + to;
}
}
interior_it++;
if (interior_it == interior_movable_references_.end()) return;
offset = reinterpret_cast<Address>(interior_it->first) - from;
}
}
class CompactionState final {
CPPGC_STACK_ALLOCATED();
using Pages = std::vector<NormalPage*>;
public:
CompactionState(NormalPageSpace* space, MovableReferences& movable_references)
: space_(space), movable_references_(movable_references) {}
void AddPage(NormalPage* page) {
DCHECK_EQ(space_, page->space());
// If not the first page, add |page| onto the available pages chain.
if (!current_page_)
current_page_ = page;
else
available_pages_.push_back(page);
}
void RelocateObject(const NormalPage* page, const Address header,
size_t size) {
// Allocate and copy over the live object.
Address compact_frontier =
current_page_->PayloadStart() + used_bytes_in_current_page_;
if (compact_frontier + size > current_page_->PayloadEnd()) {
// Can't fit on current page. Add remaining onto the freelist and advance
// to next available page.
ReturnCurrentPageToSpace();
current_page_ = available_pages_.back();
available_pages_.pop_back();
used_bytes_in_current_page_ = 0;
compact_frontier = current_page_->PayloadStart();
}
if (V8_LIKELY(compact_frontier != header)) {
// Use a non-overlapping copy, if possible.
if (current_page_ == page)
memmove(compact_frontier, header, size);
else
memcpy(compact_frontier, header, size);
movable_references_.Relocate(header + sizeof(HeapObjectHeader),
compact_frontier + sizeof(HeapObjectHeader));
}
current_page_->object_start_bitmap().SetBit(compact_frontier);
used_bytes_in_current_page_ += size;
DCHECK_LE(used_bytes_in_current_page_, current_page_->PayloadSize());
}
void FinishCompactingSpace() {
// If the current page hasn't been allocated into, add it to the available
// list, for subsequent release below.
if (used_bytes_in_current_page_ == 0) {
available_pages_.push_back(current_page_);
} else {
ReturnCurrentPageToSpace();
}
// Return remaining available pages to the free page pool, decommitting
// them from the pagefile.
for (NormalPage* page : available_pages_) {
SET_MEMORY_INACCESSIBLE(page->PayloadStart(), page->PayloadSize());
NormalPage::Destroy(page);
}
}
void FinishCompactingPage(NormalPage* page) {
#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
defined(MEMORY_SANITIZER)
// Zap the unused portion, until it is either compacted into or freed.
if (current_page_ != page) {
ZapMemory(page->PayloadStart(), page->PayloadSize());
} else {
ZapMemory(page->PayloadStart() + used_bytes_in_current_page_,
page->PayloadSize() - used_bytes_in_current_page_);
}
#endif
}
private:
void ReturnCurrentPageToSpace() {
DCHECK_EQ(space_, current_page_->space());
space_->AddPage(current_page_);
if (used_bytes_in_current_page_ != current_page_->PayloadSize()) {
// Put the remainder of the page onto the free list.
size_t freed_size =
current_page_->PayloadSize() - used_bytes_in_current_page_;
Address payload = current_page_->PayloadStart();
Address free_start = payload + used_bytes_in_current_page_;
SET_MEMORY_INACCESSIBLE(free_start, freed_size);
space_->free_list().Add({free_start, freed_size});
current_page_->object_start_bitmap().SetBit(free_start);
}
}
NormalPageSpace* space_;
MovableReferences& movable_references_;
// Page into which compacted object will be written to.
NormalPage* current_page_ = nullptr;
// Offset into |current_page_| to the next free address.
size_t used_bytes_in_current_page_ = 0;
// Additional pages in the current space that can be used as compaction
// targets. Pages that remain available at the compaction can be released.
Pages available_pages_;
};
void CompactPage(NormalPage* page, CompactionState& compaction_state) {
compaction_state.AddPage(page);
page->object_start_bitmap().Clear();
for (Address header_address = page->PayloadStart();
header_address < page->PayloadEnd();) {
HeapObjectHeader* header =
reinterpret_cast<HeapObjectHeader*>(header_address);
size_t size = header->GetSize();
DCHECK_GT(size, 0u);
DCHECK_LT(size, kPageSize);
if (header->IsFree()) {
// Unpoison the freelist entry so that we can compact into it as wanted.
ASAN_UNPOISON_MEMORY_REGION(header_address, size);
header_address += size;
continue;
}
if (!header->IsMarked()) {
// Compaction is currently launched only from AtomicPhaseEpilogue, so it's
// guaranteed to be on the mutator thread - no need to postpone
// finalization.
header->Finalize();
// As compaction is under way, leave the freed memory accessible
// while compacting the rest of the page. We just zap the payload
// to catch out other finalizers trying to access it.
#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
defined(MEMORY_SANITIZER)
ZapMemory(header, size);
#endif
header_address += size;
continue;
}
// Object is marked.
#if !defined(CPPGC_YOUNG_GENERATION)
header->Unmark();
#endif
compaction_state.RelocateObject(page, header_address, size);
header_address += size;
}
compaction_state.FinishCompactingPage(page);
}
void CompactSpace(NormalPageSpace* space,
MovableReferences& movable_references) {
using Pages = NormalPageSpace::Pages;
DCHECK(space->is_compactable());
space->free_list().Clear();
// Compaction generally follows Jonker's algorithm for fast garbage
// compaction. Compaction is performed in-place, sliding objects down over
// unused holes for a smaller heap page footprint and improved locality. A
// "compaction pointer" is consequently kept, pointing to the next available
// address to move objects down to. It will belong to one of the already
// compacted pages for this space, but as compaction proceeds, it will not
// belong to the same page as the one being currently compacted.
//
// The compaction pointer is represented by the
// |(current_page_, used_bytes_in_current_page_)| pair, with
// |used_bytes_in_current_page_| being the offset into |current_page_|, making
// up the next available location. When the compaction of an arena page causes
// the compaction pointer to exhaust the current page it is compacting into,
// page compaction will advance the current page of the compaction
// pointer, as well as the allocation point.
//
// By construction, the page compaction can be performed without having
// to allocate any new pages. So to arrange for the page compaction's
// supply of freed, available pages, we chain them together after each
// has been "compacted from". The page compaction will then reuse those
// as needed, and once finished, the chained, available pages can be
// released back to the OS.
//
// To ease the passing of the compaction state when iterating over an
// arena's pages, package it up into a |CompactionState|.
Pages pages = space->RemoveAllPages();
if (pages.empty()) return;
CompactionState compaction_state(space, movable_references);
for (BasePage* page : pages) {
// Large objects do not belong to this arena.
CompactPage(NormalPage::From(page), compaction_state);
}
compaction_state.FinishCompactingSpace();
// Sweeping will verify object start bitmap of compacted space.
}
size_t UpdateHeapResidency(const std::vector<NormalPageSpace*>& spaces) {
return std::accumulate(spaces.cbegin(), spaces.cend(), 0u,
[](size_t acc, const NormalPageSpace* space) {
DCHECK(space->is_compactable());
if (!space->size()) return acc;
return acc + space->free_list().Size();
});
}
} // namespace
Compactor::Compactor(RawHeap& heap) : heap_(heap) {
for (auto& space : heap_) {
if (!space->is_compactable()) continue;
DCHECK_EQ(&heap, space->raw_heap());
compactable_spaces_.push_back(static_cast<NormalPageSpace*>(space.get()));
}
}
bool Compactor::ShouldCompact(
GarbageCollector::Config::MarkingType marking_type,
GarbageCollector::Config::StackState stack_state) {
if (compactable_spaces_.empty() ||
(marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
stack_state ==
GarbageCollector::Config::StackState::kMayContainHeapPointers)) {
// The following check ensures that tests that want to test compaction are
// not interrupted by garbage collections that cannot use compaction.
DCHECK(!enable_for_next_gc_for_testing_);
return false;
}
if (enable_for_next_gc_for_testing_) {
return true;
}
size_t free_list_size = UpdateHeapResidency(compactable_spaces_);
return free_list_size > kFreeListSizeThreshold;
}
void Compactor::InitializeIfShouldCompact(
GarbageCollector::Config::MarkingType marking_type,
GarbageCollector::Config::StackState stack_state) {
DCHECK(!is_enabled_);
if (!ShouldCompact(marking_type, stack_state)) return;
compaction_worklists_ = std::make_unique<CompactionWorklists>();
is_enabled_ = true;
enable_for_next_gc_for_testing_ = false;
}
bool Compactor::CancelIfShouldNotCompact(
GarbageCollector::Config::MarkingType marking_type,
GarbageCollector::Config::StackState stack_state) {
if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return false;
DCHECK_NOT_NULL(compaction_worklists_);
compaction_worklists_->movable_slots_worklist()->Clear();
compaction_worklists_.reset();
is_enabled_ = false;
return true;
}
Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
if (!is_enabled_) return CompactableSpaceHandling::kSweep;
MovableReferences movable_references(*heap_.heap());
CompactionWorklists::MovableReferencesWorklist::Local local(
compaction_worklists_->movable_slots_worklist());
CompactionWorklists::MovableReference* slot;
while (local.Pop(&slot)) {
movable_references.AddOrFilter(slot);
}
compaction_worklists_.reset();
for (NormalPageSpace* space : compactable_spaces_) {
CompactSpace(space, movable_references);
}
is_enabled_ = false;
return CompactableSpaceHandling::kIgnore;
}
} // namespace internal
} // namespace cppgc

View File

@ -0,0 +1,56 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_COMPACTOR_H_
#define V8_HEAP_CPPGC_COMPACTOR_H_
#include "src/heap/cppgc/compaction-worklists.h"
#include "src/heap/cppgc/garbage-collector.h"
#include "src/heap/cppgc/raw-heap.h"
namespace cppgc {
namespace internal {
class V8_EXPORT_PRIVATE Compactor final {
using CompactableSpaceHandling =
Sweeper::SweepingConfig::CompactableSpaceHandling;
public:
explicit Compactor(RawHeap&);
~Compactor() { DCHECK(!is_enabled_); }
void InitializeIfShouldCompact(GarbageCollector::Config::MarkingType,
GarbageCollector::Config::StackState);
// Returns true is compaction was cancelled.
bool CancelIfShouldNotCompact(GarbageCollector::Config::MarkingType,
GarbageCollector::Config::StackState);
CompactableSpaceHandling CompactSpacesIfEnabled();
CompactionWorklists* compaction_worklists() {
return compaction_worklists_.get();
}
void EnableForNextGCForTesting() { enable_for_next_gc_for_testing_ = true; }
bool IsEnabledForTesting() const { return is_enabled_; }
private:
bool ShouldCompact(GarbageCollector::Config::MarkingType,
GarbageCollector::Config::StackState);
RawHeap& heap_;
// Compactor does not own the compactable spaces. The heap owns all spaces.
std::vector<NormalPageSpace*> compactable_spaces_;
std::unique_ptr<CompactionWorklists> compaction_worklists_;
bool is_enabled_ = false;
bool enable_for_next_gc_for_testing_ = false;
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_COMPACTOR_H_

View File

@ -74,7 +74,8 @@ void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
return;
ConcurrentMarkingState concurrent_marking_state(
concurrent_marker_.heap(), concurrent_marker_.marking_worklists());
concurrent_marker_.heap(), concurrent_marker_.marking_worklists(),
concurrent_marker_.heap().compactor().compaction_worklists());
std::unique_ptr<Visitor> concurrent_marking_visitor =
concurrent_marker_.CreateConcurrentMarkingVisitor(
concurrent_marking_state);
@ -186,6 +187,10 @@ void ConcurrentMarkerBase::JoinForTesting() {
concurrent_marking_handle_->Join();
}
bool ConcurrentMarkerBase::IsActive() const {
return concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning();
}
ConcurrentMarkerBase::~ConcurrentMarkerBase() {
CHECK_IMPLIES(concurrent_marking_handle_,
!concurrent_marking_handle_->IsValid());

View File

@ -30,6 +30,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
bool NotifyIncrementalMutatorStepCompleted();
bool IsActive() const;
HeapBase& heap() const { return heap_; }
MarkingWorklists& marking_worklists() const { return marking_worklists_; }
IncrementalMarkingSchedule& incremental_marking_schedule() const {

View File

@ -19,7 +19,7 @@ class GarbageCollector {
using CollectionType = Marker::MarkingConfig::CollectionType;
using StackState = cppgc::Heap::StackState;
using MarkingType = Marker::MarkingConfig::MarkingType;
using SweepingType = Sweeper::Config;
using SweepingType = Sweeper::SweepingConfig::SweepingType;
static constexpr Config ConservativeAtomicConfig() {
return {CollectionType::kMajor, StackState::kMayContainHeapPointers,

View File

@ -70,6 +70,7 @@ HeapBase::HeapBase(
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()),
compactor_(raw_heap_),
object_allocator_(&raw_heap_, page_backend_.get(),
stats_collector_.get()),
sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()),

View File

@ -12,6 +12,7 @@
#include "include/cppgc/internal/persistent-node.h"
#include "include/cppgc/macros.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/compactor.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/raw-heap.h"
@ -97,6 +98,8 @@ class V8_EXPORT_PRIVATE HeapBase {
MarkerBase* marker() const { return marker_.get(); }
Compactor& compactor() { return compactor_; }
ObjectAllocator& object_allocator() { return object_allocator_; }
Sweeper& sweeper() { return sweeper_; }
@ -154,6 +157,7 @@ class V8_EXPORT_PRIVATE HeapBase {
std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
std::unique_ptr<MarkerBase> marker_;
Compactor compactor_;
ObjectAllocator object_allocator_;
Sweeper sweeper_;

View File

@ -168,7 +168,10 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
#endif
{
NoGCScope no_gc(*this);
sweeper_.Start(config_.sweeping_type);
const Sweeper::SweepingConfig sweeping_config{
config_.sweeping_type,
Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
sweeper_.Start(sweeping_config);
}
gc_in_progress_ = false;
}

View File

@ -158,7 +158,8 @@ MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
config_(config),
platform_(platform),
foreground_task_runner_(platform_->GetForegroundTaskRunner()),
mutator_marking_state_(heap, marking_worklists_) {}
mutator_marking_state_(heap, marking_worklists_,
heap.compactor().compaction_worklists()) {}
MarkerBase::~MarkerBase() {
// The fixed point iteration may have found not-fully-constructed objects.
@ -435,6 +436,8 @@ void MarkerBase::MarkNotFullyConstructedObjects() {
void MarkerBase::ClearAllWorklistsForTesting() {
marking_worklists_.ClearForTesting();
auto* compaction_worklists = heap_.compactor().compaction_worklists();
if (compaction_worklists) compaction_worklists->ClearForTesting();
}
void MarkerBase::DisableIncrementalMarkingForTesting() {
@ -445,6 +448,13 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
void MarkerBase::NotifyCompactionCancelled() {
// Compaction cannot be cancelled while concurrent marking is active.
DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
mutator_marking_state_.NotifyCompactionCancelled();
}
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: MarkerBase(key, heap, platform, config),

View File

@ -124,6 +124,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void WaitForConcurrentMarkingForTesting();
void NotifyCompactionCancelled();
protected:
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);

View File

@ -6,6 +6,7 @@
#define V8_HEAP_CPPGC_MARKING_STATE_H_
#include "include/cppgc/trace-trait.h"
#include "src/heap/cppgc/compaction-worklists.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
@ -18,7 +19,8 @@ namespace internal {
// C++ marking implementation.
class MarkingStateBase {
public:
inline MarkingStateBase(HeapBase& heap, MarkingWorklists&);
inline MarkingStateBase(HeapBase& heap, MarkingWorklists&,
CompactionWorklists*);
MarkingStateBase(const MarkingStateBase&) = delete;
MarkingStateBase& operator=(const MarkingStateBase&) = delete;
@ -53,6 +55,7 @@ class MarkingStateBase {
concurrent_marking_bailout_worklist_.Publish();
discovered_ephemeron_pairs_worklist_.Publish();
ephemeron_pairs_for_processing_worklist_.Publish();
if (IsCompactionEnabled()) movable_slots_worklist_->Publish();
}
MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
@ -88,6 +91,17 @@ class MarkingStateBase {
return weak_containers_worklist_;
}
CompactionWorklists::MovableReferencesWorklist::Local*
movable_slots_worklist() {
return movable_slots_worklist_.get();
}
void NotifyCompactionCancelled() {
DCHECK(IsCompactionEnabled());
movable_slots_worklist_->Clear();
movable_slots_worklist_.reset();
}
protected:
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
@ -95,6 +109,10 @@ class MarkingStateBase {
inline void RegisterWeakContainer(HeapObjectHeader&);
inline bool IsCompactionEnabled() const {
return movable_slots_worklist_.get();
}
#ifdef DEBUG
HeapBase& heap_;
#endif // DEBUG
@ -113,12 +131,17 @@ class MarkingStateBase {
MarkingWorklists::EphemeronPairsWorklist::Local
ephemeron_pairs_for_processing_worklist_;
MarkingWorklists::WeakContainersWorklist& weak_containers_worklist_;
// Existence of the worklist (|movable_slot_worklist_| != nullptr) denotes
// that compaction is currently enabled and slots must be recorded.
std::unique_ptr<CompactionWorklists::MovableReferencesWorklist::Local>
movable_slots_worklist_;
size_t marked_bytes_ = 0;
};
MarkingStateBase::MarkingStateBase(HeapBase& heap,
MarkingWorklists& marking_worklists)
MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
:
#ifdef DEBUG
heap_(heap),
@ -137,6 +160,11 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
ephemeron_pairs_for_processing_worklist_(
marking_worklists.ephemeron_pairs_for_processing_worklist()),
weak_containers_worklist_(*marking_worklists.weak_containers_worklist()) {
if (compaction_worklists) {
movable_slots_worklist_ =
std::make_unique<CompactionWorklists::MovableReferencesWorklist::Local>(
compaction_worklists->movable_slots_worklist());
}
}
void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
@ -260,8 +288,9 @@ void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
class MutatorMarkingState : public MarkingStateBase {
public:
MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
: MarkingStateBase(heap, marking_worklists) {}
MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
: MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
inline bool MarkNoPush(HeapObjectHeader& header) {
return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
@ -327,8 +356,9 @@ bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
class ConcurrentMarkingState : public MarkingStateBase {
public:
ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
: MarkingStateBase(heap, marking_worklists) {}
ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
: MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }

View File

@ -43,6 +43,12 @@ void MarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
marking_state_.RegisterWeakCallback(callback, object);
}
void MarkingVisitorBase::HandleMovableReference(const void** slot) {
auto* worklist = marking_state_.movable_slots_worklist();
if (!worklist) return;
worklist->Push(slot);
}
ConservativeMarkingVisitor::ConservativeMarkingVisitor(
HeapBase& heap, MutatorMarkingState& marking_state, cppgc::Visitor& visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), visitor),

View File

@ -33,6 +33,7 @@ class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
TraceDescriptor weak_desc, WeakCallback callback,
const void* data) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
void HandleMovableReference(const void**) final;
MarkingStateBase& marking_state_;
};

View File

@ -446,10 +446,19 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
// - moves all Heap pages to local Sweeper's state (SpaceStates).
class PrepareForSweepVisitor final
: public HeapVisitor<PrepareForSweepVisitor> {
using CompactableSpaceHandling =
Sweeper::SweepingConfig::CompactableSpaceHandling;
public:
explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
PrepareForSweepVisitor(SpaceStates* states,
CompactableSpaceHandling compactable_space_handling)
: states_(states),
compactable_space_handling_(compactable_space_handling) {}
bool VisitNormalPageSpace(NormalPageSpace* space) {
if ((compactable_space_handling_ == CompactableSpaceHandling::kIgnore) &&
space->is_compactable())
return true;
DCHECK(!space->linear_allocation_buffer().size());
space->free_list().Clear();
ExtractPages(space);
@ -469,6 +478,7 @@ class PrepareForSweepVisitor final
}
SpaceStates* states_;
CompactableSpaceHandling compactable_space_handling_;
};
} // namespace
@ -485,17 +495,20 @@ class Sweeper::SweeperImpl final {
~SweeperImpl() { CancelSweepers(); }
void Start(Config config) {
void Start(SweepingConfig config) {
is_in_progress_ = true;
#if DEBUG
// Verify bitmap for all spaces regardless of |compactable_space_handling|.
ObjectStartBitmapVerifier().Verify(heap_);
#endif
PrepareForSweepVisitor(&space_states_).Traverse(heap_);
PrepareForSweepVisitor(&space_states_, config.compactable_space_handling)
.Traverse(heap_);
if (config == Config::kAtomic) {
if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
Finish();
} else {
DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
DCHECK_EQ(SweepingConfig::SweepingType::kIncrementalAndConcurrent,
config.sweeping_type);
ScheduleIncrementalSweeping();
ScheduleConcurrentSweeping();
}
@ -620,7 +633,7 @@ Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
Sweeper::~Sweeper() = default;
void Sweeper::Start(Config config) { impl_->Start(config); }
void Sweeper::Start(SweepingConfig config) { impl_->Start(config); }
void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
void Sweeper::WaitForConcurrentSweepingForTesting() {
impl_->WaitForConcurrentSweepingForTesting();

View File

@ -21,7 +21,14 @@ class ConcurrentSweeperTest;
class V8_EXPORT_PRIVATE Sweeper final {
public:
enum class Config { kAtomic, kIncrementalAndConcurrent };
struct SweepingConfig {
enum class SweepingType : uint8_t { kAtomic, kIncrementalAndConcurrent };
enum class CompactableSpaceHandling { kSweep, kIgnore };
SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
CompactableSpaceHandling compactable_space_handling =
CompactableSpaceHandling::kSweep;
};
Sweeper(RawHeap*, cppgc::Platform*, StatsCollector*);
~Sweeper();
@ -30,7 +37,7 @@ class V8_EXPORT_PRIVATE Sweeper final {
Sweeper& operator=(const Sweeper&) = delete;
// Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(Config);
void Start(SweepingConfig);
void FinishIfRunning();
private:

View File

@ -80,6 +80,7 @@ v8_source_set("cppgc_unittests_sources") {
testonly = true
sources = [
"heap/cppgc/compactor-unittest.cc",
"heap/cppgc/concurrent-marking-unittest.cc",
"heap/cppgc/concurrent-sweeper-unittest.cc",
"heap/cppgc/cross-thread-persistent-unittest.cc",

View File

@ -0,0 +1,250 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/compactor.h"
#include "include/cppgc/allocation.h"
#include "include/cppgc/custom-space.h"
#include "include/cppgc/persistent.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/stats-collector.h"
#include "test/unittests/heap/cppgc/tests.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cppgc {
class CompactableCustomSpace : public CustomSpace<CompactableCustomSpace> {
public:
static constexpr size_t kSpaceIndex = 0;
static constexpr bool kSupportsCompaction = true;
};
namespace internal {
namespace {
struct CompactableGCed : public GarbageCollected<CompactableGCed> {
public:
~CompactableGCed() { ++g_destructor_callcount; }
void Trace(Visitor* visitor) const {
visitor->Trace(other);
visitor->RegisterMovableReference(other.GetSlotForTesting());
}
static size_t g_destructor_callcount;
Member<CompactableGCed> other;
size_t id = 0;
};
// static
size_t CompactableGCed::g_destructor_callcount = 0;
template <int kNumObjects>
struct CompactableHolder
: public GarbageCollected<CompactableHolder<kNumObjects>> {
public:
explicit CompactableHolder(cppgc::AllocationHandle& allocation_handle) {
for (int i = 0; i < kNumObjects; ++i)
objects[i] = MakeGarbageCollected<CompactableGCed>(allocation_handle);
}
void Trace(Visitor* visitor) const {
for (int i = 0; i < kNumObjects; ++i) {
visitor->Trace(objects[i]);
visitor->RegisterMovableReference(objects[i].GetSlotForTesting());
}
}
Member<CompactableGCed> objects[kNumObjects];
};
class CompactorTest : public testing::TestWithPlatform {
public:
CompactorTest() {
Heap::HeapOptions options;
options.custom_spaces.emplace_back(
std::make_unique<CompactableCustomSpace>());
heap_ = Heap::Create(platform_, std::move(options));
}
void StartCompaction() {
compactor().EnableForNextGCForTesting();
compactor().InitializeIfShouldCompact(
GarbageCollector::Config::MarkingType::kIncremental,
GarbageCollector::Config::StackState::kNoHeapPointers);
EXPECT_TRUE(compactor().IsEnabledForTesting());
}
void CancelCompaction() {
bool cancelled = compactor().CancelIfShouldNotCompact(
GarbageCollector::Config::MarkingType::kAtomic,
GarbageCollector::Config::StackState::kMayContainHeapPointers);
EXPECT_TRUE(cancelled);
}
void FinishCompaction() { compactor().CompactSpacesIfEnabled(); }
void StartGC() {
CompactableGCed::g_destructor_callcount = 0u;
StartCompaction();
heap()->StartIncrementalGarbageCollection(
GarbageCollector::Config::PreciseIncrementalConfig());
}
void EndGC() {
heap()->marker()->FinishMarking(
GarbageCollector::Config::StackState::kNoHeapPointers);
FinishCompaction();
// Sweeping also verifies the object start bitmap.
const Sweeper::SweepingConfig sweeping_config{
Sweeper::SweepingConfig::SweepingType::kAtomic,
Sweeper::SweepingConfig::CompactableSpaceHandling::kIgnore};
heap()->sweeper().Start(sweeping_config);
}
Heap* heap() { return Heap::From(heap_.get()); }
cppgc::AllocationHandle& GetAllocationHandle() {
return heap_->GetAllocationHandle();
}
Compactor& compactor() { return heap()->compactor(); }
private:
std::unique_ptr<cppgc::Heap> heap_;
};
} // namespace
} // namespace internal
template <>
struct SpaceTrait<internal::CompactableGCed> {
using Space = CompactableCustomSpace;
};
namespace internal {
TEST_F(CompactorTest, NothingToCompact) {
StartCompaction();
FinishCompaction();
}
TEST_F(CompactorTest, CancelledNothingToCompact) {
StartCompaction();
CancelCompaction();
}
TEST_F(CompactorTest, NonEmptySpaceAllLive) {
static constexpr int kNumObjects = 10;
Persistent<CompactableHolder<kNumObjects>> holder =
MakeGarbageCollected<CompactableHolder<kNumObjects>>(
GetAllocationHandle(), GetAllocationHandle());
CompactableGCed* references[kNumObjects] = {nullptr};
for (int i = 0; i < kNumObjects; ++i) {
references[i] = holder->objects[i];
}
StartGC();
EndGC();
EXPECT_EQ(0u, CompactableGCed::g_destructor_callcount);
for (int i = 0; i < kNumObjects; ++i) {
EXPECT_EQ(holder->objects[i], references[i]);
}
}
TEST_F(CompactorTest, NonEmptySpaceAllDead) {
static constexpr int kNumObjects = 10;
Persistent<CompactableHolder<kNumObjects>> holder =
MakeGarbageCollected<CompactableHolder<kNumObjects>>(
GetAllocationHandle(), GetAllocationHandle());
CompactableGCed::g_destructor_callcount = 0u;
StartGC();
for (int i = 0; i < kNumObjects; ++i) {
holder->objects[i] = nullptr;
}
EndGC();
EXPECT_EQ(10u, CompactableGCed::g_destructor_callcount);
}
TEST_F(CompactorTest, NonEmptySpaceHalfLive) {
static constexpr int kNumObjects = 10;
Persistent<CompactableHolder<kNumObjects>> holder =
MakeGarbageCollected<CompactableHolder<kNumObjects>>(
GetAllocationHandle(), GetAllocationHandle());
CompactableGCed* references[kNumObjects] = {nullptr};
for (int i = 0; i < kNumObjects; ++i) {
references[i] = holder->objects[i];
}
StartGC();
for (int i = 0; i < kNumObjects; i += 2) {
holder->objects[i] = nullptr;
}
EndGC();
// Half of object were destroyed.
EXPECT_EQ(5u, CompactableGCed::g_destructor_callcount);
// Remaining objects are compacted.
for (int i = 1; i < kNumObjects; i += 2) {
EXPECT_EQ(holder->objects[i], references[i / 2]);
}
}
TEST_F(CompactorTest, CompactAcrossPages) {
Persistent<CompactableHolder<1>> holder =
MakeGarbageCollected<CompactableHolder<1>>(GetAllocationHandle(),
GetAllocationHandle());
CompactableGCed* reference = holder->objects[0];
static constexpr size_t kObjectsPerPage =
kPageSize / (sizeof(CompactableGCed) + sizeof(HeapObjectHeader));
for (size_t i = 0; i < kObjectsPerPage; ++i) {
holder->objects[0] =
MakeGarbageCollected<CompactableGCed>(GetAllocationHandle());
}
// Last allocated object should be on a new page.
EXPECT_NE(reference, holder->objects[0]);
EXPECT_NE(BasePage::FromInnerAddress(heap(), reference),
BasePage::FromInnerAddress(heap(), holder->objects[0].Get()));
StartGC();
EndGC();
// Half of object were destroyed.
EXPECT_EQ(kObjectsPerPage, CompactableGCed::g_destructor_callcount);
EXPECT_EQ(reference, holder->objects[0]);
}
TEST_F(CompactorTest, InteriorSlotToPreviousObject) {
static constexpr int kNumObjects = 3;
Persistent<CompactableHolder<kNumObjects>> holder =
MakeGarbageCollected<CompactableHolder<kNumObjects>>(
GetAllocationHandle(), GetAllocationHandle());
CompactableGCed* references[kNumObjects] = {nullptr};
for (int i = 0; i < kNumObjects; ++i) {
references[i] = holder->objects[i];
}
holder->objects[2]->other = holder->objects[1];
holder->objects[1] = nullptr;
holder->objects[0] = nullptr;
StartGC();
EndGC();
EXPECT_EQ(1u, CompactableGCed::g_destructor_callcount);
EXPECT_EQ(references[1], holder->objects[2]);
EXPECT_EQ(references[0], holder->objects[2]->other);
}
TEST_F(CompactorTest, InteriorSlotToNextObject) {
static constexpr int kNumObjects = 3;
Persistent<CompactableHolder<kNumObjects>> holder =
MakeGarbageCollected<CompactableHolder<kNumObjects>>(
GetAllocationHandle(), GetAllocationHandle());
CompactableGCed* references[kNumObjects] = {nullptr};
for (int i = 0; i < kNumObjects; ++i) {
references[i] = holder->objects[i];
}
holder->objects[1]->other = holder->objects[2];
holder->objects[2] = nullptr;
holder->objects[0] = nullptr;
StartGC();
EndGC();
EXPECT_EQ(1u, CompactableGCed::g_destructor_callcount);
EXPECT_EQ(references[0], holder->objects[1]);
EXPECT_EQ(references[1], holder->objects[1]->other);
}
} // namespace internal
} // namespace cppgc

View File

@ -75,7 +75,10 @@ class ConcurrentSweeperTest : public testing::TestWithHeap {
heap->stats_collector()->NotifyMarkingStarted();
heap->stats_collector()->NotifyMarkingCompleted(0);
Sweeper& sweeper = heap->sweeper();
sweeper.Start(Sweeper::Config::kIncrementalAndConcurrent);
const Sweeper::SweepingConfig sweeping_config{
Sweeper::SweepingConfig::SweepingType::kIncrementalAndConcurrent,
Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
sweeper.Start(sweeping_config);
}
void WaitForConcurrentSweeping() {

View File

@ -48,7 +48,10 @@ class SweeperTest : public testing::TestWithHeap {
// methods are called in the right order.
heap->stats_collector()->NotifyMarkingStarted();
heap->stats_collector()->NotifyMarkingCompleted(0);
sweeper.Start(Sweeper::Config::kAtomic);
const Sweeper::SweepingConfig sweeping_config{
Sweeper::SweepingConfig::SweepingType::kAtomic,
Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
sweeper.Start(sweeping_config);
sweeper.FinishIfRunning();
}