[heap] Introduce OffThreadSpace
Add a new PagedSpace called OffThreadSpace. This space will be used for off-thread allocation -- it never marks or sweeps, and always expands into fresh pages. Once allocation completes, this space's pages can be merged into the old space. The space is similar to the CompactionSpace, and merging for both is identical, so we intrduce a new LocalSpace base class that both extend. They differ in interaction with the sweeper and in how new pages are acquired. This patch adds the new space and uses it in a few unittests. Future work will use it in the main source code. Bug: chromium:1011762 Change-Id: Ia008cc95c6e1ef1d1e7ae305fa80fbfc1ff4be2d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1873690 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#64509}
This commit is contained in:
parent
b1c1a81faf
commit
fa13871a7d
@ -757,11 +757,15 @@ enum MinimumCapacity {
|
||||
|
||||
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
|
||||
|
||||
enum class CompactionSpaceKind {
|
||||
enum class LocalSpaceKind {
|
||||
kNone,
|
||||
kScavenge,
|
||||
kMarkCompact,
|
||||
kMinorMarkCompact,
|
||||
kOffThreadSpace,
|
||||
kCompactionSpaceForScavenge,
|
||||
kCompactionSpaceForMarkCompact,
|
||||
kCompactionSpaceForMinorMarkCompact,
|
||||
|
||||
kFirstCompactionSpace = kCompactionSpaceForScavenge,
|
||||
kLastCompactionSpace = kCompactionSpaceForMinorMarkCompact,
|
||||
};
|
||||
|
||||
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
|
||||
|
@ -19,18 +19,17 @@ class LocalAllocator {
|
||||
static const int kLabSize = 32 * KB;
|
||||
static const int kMaxLabObjectSize = 8 * KB;
|
||||
|
||||
explicit LocalAllocator(Heap* heap, CompactionSpaceKind compaction_space_kind)
|
||||
explicit LocalAllocator(Heap* heap, LocalSpaceKind local_space_kind)
|
||||
: heap_(heap),
|
||||
new_space_(heap->new_space()),
|
||||
compaction_spaces_(heap, compaction_space_kind),
|
||||
compaction_spaces_(heap, local_space_kind),
|
||||
new_space_lab_(LocalAllocationBuffer::InvalidBuffer()),
|
||||
lab_allocation_will_fail_(false) {}
|
||||
|
||||
// Needs to be called from the main thread to finalize this LocalAllocator.
|
||||
void Finalize() {
|
||||
heap_->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
|
||||
heap_->code_space()->MergeCompactionSpace(
|
||||
compaction_spaces_.Get(CODE_SPACE));
|
||||
heap_->old_space()->MergeLocalSpace(compaction_spaces_.Get(OLD_SPACE));
|
||||
heap_->code_space()->MergeLocalSpace(compaction_spaces_.Get(CODE_SPACE));
|
||||
// Give back remaining LAB space if this LocalAllocator's new space LAB
|
||||
// sits right next to new space allocation top.
|
||||
const LinearAllocationArea info = new_space_lab_.Close();
|
||||
|
@ -468,7 +468,6 @@ void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
|
||||
evacuation_candidates_.push_back(p);
|
||||
}
|
||||
|
||||
|
||||
static void TraceFragmentation(PagedSpace* space) {
|
||||
int number_of_pages = space->CountTotalPages();
|
||||
intptr_t reserved = (number_of_pages * space->AreaSize());
|
||||
@ -538,7 +537,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
|
||||
for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
|
||||
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
|
||||
@ -786,7 +784,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::AbortCompaction() {
|
||||
if (compacting_) {
|
||||
RememberedSet<OLD_TO_OLD>::ClearAll(heap());
|
||||
@ -799,7 +796,6 @@ void MarkCompactCollector::AbortCompaction() {
|
||||
DCHECK(evacuation_candidates_.empty());
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::Prepare() {
|
||||
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
|
||||
|
||||
@ -1024,9 +1020,7 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
int PointersRemoved() {
|
||||
return pointers_removed_;
|
||||
}
|
||||
int PointersRemoved() { return pointers_removed_; }
|
||||
|
||||
private:
|
||||
Heap* heap_;
|
||||
@ -2865,7 +2859,7 @@ class FullEvacuator : public Evacuator {
|
||||
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
|
||||
FLAG_always_promote_young_mc),
|
||||
record_visitor_(collector, &ephemeron_remembered_set_),
|
||||
local_allocator_(heap_, CompactionSpaceKind::kMarkCompact),
|
||||
local_allocator_(heap_, LocalSpaceKind::kCompactionSpaceForMarkCompact),
|
||||
collector_(collector) {}
|
||||
|
||||
GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
|
||||
@ -4922,7 +4916,8 @@ class YoungGenerationEvacuator : public Evacuator {
|
||||
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
|
||||
false),
|
||||
record_visitor_(collector->heap()->mark_compact_collector()),
|
||||
local_allocator_(heap_, CompactionSpaceKind::kMinorMarkCompact),
|
||||
local_allocator_(heap_,
|
||||
LocalSpaceKind::kCompactionSpaceForMinorMarkCompact),
|
||||
collector_(collector) {}
|
||||
|
||||
GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
|
||||
|
@ -402,7 +402,7 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
|
||||
copied_size_(0),
|
||||
promoted_size_(0),
|
||||
allocator_(heap, CompactionSpaceKind::kScavenge),
|
||||
allocator_(heap, LocalSpaceKind::kCompactionSpaceForScavenge),
|
||||
is_logging_(is_logging),
|
||||
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
|
||||
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
|
||||
|
@ -480,7 +480,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
|
||||
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
|
||||
#endif
|
||||
HeapObject heap_obj;
|
||||
if (!result.IsRetry() && result.To(&heap_obj) && !is_compaction_space()) {
|
||||
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
|
||||
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
|
||||
heap_obj.address(), size_in_bytes);
|
||||
StartNextInlineAllocationStep();
|
||||
|
@ -1640,10 +1640,10 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
|
||||
|
||||
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
|
||||
Executability executable, FreeList* free_list,
|
||||
CompactionSpaceKind compaction_space_kind)
|
||||
LocalSpaceKind local_space_kind)
|
||||
: SpaceWithLinearArea(heap, space, free_list),
|
||||
executable_(executable),
|
||||
compaction_space_kind_(compaction_space_kind) {
|
||||
local_space_kind_(local_space_kind) {
|
||||
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
|
||||
accounting_stats_.Clear();
|
||||
}
|
||||
@ -1664,6 +1664,8 @@ void PagedSpace::RefillFreeList() {
|
||||
identity() != MAP_SPACE && identity() != RO_SPACE) {
|
||||
return;
|
||||
}
|
||||
DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
|
||||
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
|
||||
DCHECK(!IsDetached());
|
||||
MarkCompactCollector* collector = heap()->mark_compact_collector();
|
||||
size_t added = 0;
|
||||
@ -1682,7 +1684,7 @@ void PagedSpace::RefillFreeList() {
|
||||
// Also merge old-to-new remembered sets if not scavenging because of
|
||||
// data races: One thread might iterate remembered set, while another
|
||||
// thread merges them.
|
||||
if (compaction_space_kind() != CompactionSpaceKind::kScavenge) {
|
||||
if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
|
||||
p->MergeOldToNewRememberedSets();
|
||||
}
|
||||
|
||||
@ -1708,7 +1710,13 @@ void PagedSpace::RefillFreeList() {
|
||||
}
|
||||
}
|
||||
|
||||
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
void OffThreadSpace::RefillFreeList() {
|
||||
// We should never try to refill the free list in off-thread space, because
|
||||
// we know it will always be fully linear.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void PagedSpace::MergeLocalSpace(LocalSpace* other) {
|
||||
base::MutexGuard guard(mutex());
|
||||
|
||||
DCHECK(identity() == other->identity());
|
||||
@ -1742,7 +1750,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
DCHECK_EQ(0u, other->Capacity());
|
||||
}
|
||||
|
||||
|
||||
size_t PagedSpace::CommittedPhysicalMemory() {
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
@ -2049,7 +2056,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
|
||||
// if it is big enough.
|
||||
FreeLinearAllocationArea();
|
||||
|
||||
if (!is_compaction_space()) {
|
||||
if (!is_local_space()) {
|
||||
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
|
||||
heap()->GCFlagsForIncrementalMarking(),
|
||||
kGCCallbackScheduleIdleGarbageCollection);
|
||||
@ -3718,6 +3725,12 @@ bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes,
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OffThreadSpace::SweepAndRetryAllocation(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
// Sweeping is not supported in the off-thread space.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
VMState<GC> state(heap()->isolate());
|
||||
@ -3731,8 +3744,26 @@ bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
|
||||
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
|
||||
}
|
||||
|
||||
bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
|
||||
return true;
|
||||
|
||||
if (Expand()) {
|
||||
DCHECK((CountTotalPages() > 1) ||
|
||||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
|
||||
return RefillLinearAllocationAreaFromFreeList(
|
||||
static_cast<size_t>(size_in_bytes), origin);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
// Non-compaction local spaces are not supported.
|
||||
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
|
||||
|
||||
// Allocation in this space has failed.
|
||||
DCHECK_GE(size_in_bytes, 0);
|
||||
const int kMaxPagesToSweep = 1;
|
||||
|
@ -48,6 +48,7 @@ class FreeList;
|
||||
class Isolate;
|
||||
class LinearAllocationArea;
|
||||
class LocalArrayBufferTracker;
|
||||
class LocalSpace;
|
||||
class MemoryAllocator;
|
||||
class MemoryChunk;
|
||||
class MemoryChunkLayout;
|
||||
@ -600,7 +601,7 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
+ kSystemPointerSize // Address owner_
|
||||
+ kSizetSize // size_t progress_bar_
|
||||
+ kIntptrSize // intptr_t live_byte_count_
|
||||
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
|
||||
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
|
||||
+ kSystemPointerSize *
|
||||
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
|
||||
+ kSystemPointerSize *
|
||||
@ -793,7 +794,6 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
return this->address() + (index << kTaggedSizeLog2);
|
||||
}
|
||||
|
||||
|
||||
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
|
||||
|
||||
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
|
||||
@ -1692,7 +1692,6 @@ class LinearAllocationArea {
|
||||
Address limit_;
|
||||
};
|
||||
|
||||
|
||||
// An abstraction of the accounting statistics of a page-structured space.
|
||||
//
|
||||
// The stats are only set by functions that ensure they stay balanced. These
|
||||
@ -1774,7 +1773,6 @@ class AllocationStats {
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
// The free list is organized in categories as follows:
|
||||
// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
|
||||
// allocation, when categories >= small do not have entries anymore.
|
||||
@ -2285,10 +2283,9 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
static const size_t kCompactionMemoryWanted = 500 * KB;
|
||||
|
||||
// Creates a space with an id.
|
||||
PagedSpace(
|
||||
Heap* heap, AllocationSpace id, Executability executable,
|
||||
FreeList* free_list,
|
||||
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
|
||||
PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
|
||||
FreeList* free_list,
|
||||
LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
|
||||
|
||||
~PagedSpace() override { TearDown(); }
|
||||
|
||||
@ -2464,15 +2461,18 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
// Return size of allocatable area on a page in this space.
|
||||
inline int AreaSize() { return static_cast<int>(area_size_); }
|
||||
|
||||
bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
|
||||
|
||||
bool is_compaction_space() {
|
||||
return compaction_space_kind_ != CompactionSpaceKind::kNone;
|
||||
return IsInRange(local_space_kind_, LocalSpaceKind::kFirstCompactionSpace,
|
||||
LocalSpaceKind::kLastCompactionSpace);
|
||||
}
|
||||
|
||||
CompactionSpaceKind compaction_space_kind() { return compaction_space_kind_; }
|
||||
LocalSpaceKind local_space_kind() { return local_space_kind_; }
|
||||
|
||||
// Merges {other} into the current space. Note that this modifies {other},
|
||||
// e.g., removes its bump pointer area and resets statistics.
|
||||
void MergeCompactionSpace(CompactionSpace* other);
|
||||
void MergeLocalSpace(LocalSpace* other);
|
||||
|
||||
// Refills the free list from the corresponding free list filled by the
|
||||
// sweeper.
|
||||
@ -2509,7 +2509,7 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
void DecreaseLimit(Address new_limit);
|
||||
void UpdateInlineAllocationLimit(size_t min_size) override;
|
||||
bool SupportsInlineAllocation() override {
|
||||
return identity() == OLD_SPACE && !is_compaction_space();
|
||||
return identity() == OLD_SPACE && !is_local_space();
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -2566,7 +2566,7 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
|
||||
Executability executable_;
|
||||
|
||||
CompactionSpaceKind compaction_space_kind_;
|
||||
LocalSpaceKind local_space_kind_;
|
||||
|
||||
size_t area_size_;
|
||||
|
||||
@ -2690,15 +2690,11 @@ class SemiSpace : public Space {
|
||||
// If we don't have these here then SemiSpace will be abstract. However
|
||||
// they should never be called:
|
||||
|
||||
size_t Size() override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
size_t Size() override { UNREACHABLE(); }
|
||||
|
||||
size_t SizeOfObjects() override { return Size(); }
|
||||
|
||||
size_t Available() override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
size_t Available() override { UNREACHABLE(); }
|
||||
|
||||
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
|
||||
Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
|
||||
@ -3039,21 +3035,34 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Compaction space that is used temporarily during compaction.
|
||||
// Base class for compaction space and off-thread space.
|
||||
|
||||
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
|
||||
class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
|
||||
public:
|
||||
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
|
||||
CompactionSpaceKind compaction_space_kind)
|
||||
LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
|
||||
LocalSpaceKind local_space_kind)
|
||||
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
|
||||
compaction_space_kind) {
|
||||
DCHECK_NE(compaction_space_kind, CompactionSpaceKind::kNone);
|
||||
local_space_kind) {
|
||||
DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
|
||||
}
|
||||
|
||||
protected:
|
||||
// The space is temporary and not included in any snapshots.
|
||||
bool snapshotable() override { return false; }
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Compaction space that is used temporarily during compaction.
|
||||
|
||||
class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
|
||||
public:
|
||||
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
|
||||
LocalSpaceKind local_space_kind)
|
||||
: LocalSpace(heap, id, executable, local_space_kind) {
|
||||
DCHECK(is_compaction_space());
|
||||
}
|
||||
|
||||
protected:
|
||||
V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
|
||||
int size_in_bytes, AllocationOrigin origin) override;
|
||||
|
||||
@ -3065,11 +3074,11 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
|
||||
class CompactionSpaceCollection : public Malloced {
|
||||
public:
|
||||
explicit CompactionSpaceCollection(Heap* heap,
|
||||
CompactionSpaceKind compaction_space_kind)
|
||||
LocalSpaceKind local_space_kind)
|
||||
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
|
||||
compaction_space_kind),
|
||||
local_space_kind),
|
||||
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
|
||||
compaction_space_kind) {}
|
||||
local_space_kind) {}
|
||||
|
||||
CompactionSpace* Get(AllocationSpace space) {
|
||||
switch (space) {
|
||||
@ -3123,7 +3132,6 @@ class CodeSpace : public PagedSpace {
|
||||
(info).top() <= (space).page_high() && \
|
||||
(info).limit() <= (space).page_high())
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Old space for all map objects
|
||||
|
||||
@ -3148,6 +3156,25 @@ class MapSpace : public PagedSpace {
|
||||
#endif
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Off-thread space that is used for folded allocation on a different thread.
|
||||
|
||||
class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
|
||||
public:
|
||||
explicit OffThreadSpace(Heap* heap)
|
||||
: LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
|
||||
LocalSpaceKind::kOffThreadSpace) {}
|
||||
|
||||
protected:
|
||||
V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
|
||||
int size_in_bytes, AllocationOrigin origin) override;
|
||||
|
||||
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
|
||||
int size_in_bytes, AllocationOrigin origin) override;
|
||||
|
||||
void RefillFreeList() override;
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Read Only space for all Immortal Immovable and Immutable objects
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/spaces.h"
|
||||
#include <memory>
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
@ -18,8 +20,9 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
|
||||
OldSpace* old_space = heap->old_space();
|
||||
EXPECT_TRUE(old_space != nullptr);
|
||||
|
||||
CompactionSpace* compaction_space = new CompactionSpace(
|
||||
heap, OLD_SPACE, NOT_EXECUTABLE, CompactionSpaceKind::kMarkCompact);
|
||||
CompactionSpace* compaction_space =
|
||||
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
|
||||
LocalSpaceKind::kCompactionSpaceForMarkCompact);
|
||||
EXPECT_TRUE(compaction_space != nullptr);
|
||||
|
||||
for (Page* p : *old_space) {
|
||||
@ -45,13 +48,93 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
|
||||
int pages_in_old_space = old_space->CountTotalPages();
|
||||
int pages_in_compaction_space = compaction_space->CountTotalPages();
|
||||
EXPECT_EQ(kExpectedPages, pages_in_compaction_space);
|
||||
old_space->MergeCompactionSpace(compaction_space);
|
||||
old_space->MergeLocalSpace(compaction_space);
|
||||
EXPECT_EQ(pages_in_old_space + pages_in_compaction_space,
|
||||
old_space->CountTotalPages());
|
||||
|
||||
delete compaction_space;
|
||||
}
|
||||
|
||||
class OffThreadAllocationThread final : public base::Thread {
|
||||
public:
|
||||
explicit OffThreadAllocationThread(Heap* heap)
|
||||
: Thread(Options("OffThreadAllocationThread")), heap_(heap) {}
|
||||
void Run() override {
|
||||
off_thread_space_ = std::make_unique<OffThreadSpace>(heap_);
|
||||
EXPECT_TRUE(off_thread_space_ != nullptr);
|
||||
|
||||
// Cannot loop until "Available()" since we initially have 0 bytes available
|
||||
// and would thus neither grow, nor be able to allocate an object.
|
||||
const int kNumObjects = 10;
|
||||
const int kNumObjectsPerPage =
|
||||
off_thread_space_->AreaSize() / kMaxRegularHeapObjectSize;
|
||||
const int kExpectedPages =
|
||||
(kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
|
||||
for (int i = 0; i < kNumObjects; i++) {
|
||||
HeapObject object =
|
||||
off_thread_space_->AllocateRawUnaligned(kMaxRegularHeapObjectSize)
|
||||
.ToObjectChecked();
|
||||
heap_->CreateFillerObjectAt(object.address(), kMaxRegularHeapObjectSize,
|
||||
ClearRecordedSlots::kNo);
|
||||
}
|
||||
int pages_in_off_thread_space = off_thread_space_->CountTotalPages();
|
||||
EXPECT_EQ(kExpectedPages, pages_in_off_thread_space);
|
||||
}
|
||||
|
||||
OffThreadSpace* space() { return off_thread_space_.get(); }
|
||||
|
||||
private:
|
||||
Heap* heap_;
|
||||
std::unique_ptr<OffThreadSpace> off_thread_space_;
|
||||
};
|
||||
|
||||
TEST_F(SpacesTest, OffThreadSpaceAllocate) {
|
||||
Heap* heap = i_isolate()->heap();
|
||||
|
||||
static const int kNumThreads = 10;
|
||||
std::unique_ptr<OffThreadAllocationThread> threads[10];
|
||||
for (int i = 0; i < kNumThreads; ++i) {
|
||||
threads[i] = std::make_unique<OffThreadAllocationThread>(heap);
|
||||
}
|
||||
for (int i = 0; i < kNumThreads; ++i) {
|
||||
CHECK(threads[i]->Start());
|
||||
}
|
||||
for (int i = 0; i < kNumThreads; ++i) {
|
||||
threads[i]->Join();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(SpacesTest, OffThreadSpaceMerge) {
|
||||
Heap* heap = i_isolate()->heap();
|
||||
OldSpace* old_space = heap->old_space();
|
||||
EXPECT_TRUE(old_space != nullptr);
|
||||
|
||||
static const int kNumThreads = 10;
|
||||
std::unique_ptr<OffThreadAllocationThread> threads[10];
|
||||
for (int i = 0; i < kNumThreads; ++i) {
|
||||
threads[i] = std::make_unique<OffThreadAllocationThread>(heap);
|
||||
}
|
||||
for (int i = 0; i < kNumThreads; ++i) {
|
||||
CHECK(threads[i]->Start());
|
||||
}
|
||||
for (int i = 0; i < kNumThreads; ++i) {
|
||||
threads[i]->Join();
|
||||
}
|
||||
|
||||
int pages_in_old_space = old_space->CountTotalPages();
|
||||
|
||||
int expected_merged_pages = 0;
|
||||
for (int i = 0; i < kNumThreads; ++i) {
|
||||
int pages_in_off_thread_space = threads[i]->space()->CountTotalPages();
|
||||
|
||||
old_space->MergeLocalSpace(threads[i]->space());
|
||||
expected_merged_pages += pages_in_off_thread_space;
|
||||
}
|
||||
|
||||
EXPECT_EQ(pages_in_old_space + expected_merged_pages,
|
||||
old_space->CountTotalPages());
|
||||
}
|
||||
|
||||
TEST_F(SpacesTest, WriteBarrierFromHeapObject) {
|
||||
constexpr Address address1 = Page::kPageSize;
|
||||
HeapObject object1 = HeapObject::unchecked_cast(Object(address1));
|
||||
|
Loading…
Reference in New Issue
Block a user