cppgc: Replace worklist implementation with new worklist

This CL migrates cppgc to use Ulan's new worklist implementation.

Since there is no central segments array anymore, we cannot rely on
getting the same view (now renamed to Local) given the same task id.
To avoid creating many short lived segments (e.g. for write barriers)
marking state now holds local views for all worklists and provides
access to them.

Bug: chromium:1056170
Change-Id: Id19fe1196b79ed251810e91074046998dc2a9177
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2390771
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69767}
This commit is contained in:
Omer Katz 2020-09-09 10:50:34 +02:00 committed by Commit Bot
parent 80b1d7ff24
commit b7b3abe83a
12 changed files with 573 additions and 641 deletions

View File

@ -4337,6 +4337,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/logging.cc",
"src/heap/cppgc/marker.cc",
"src/heap/cppgc/marker.h",
"src/heap/cppgc/marking-state.cc",
"src/heap/cppgc/marking-state.h",
"src/heap/cppgc/marking-verifier.cc",
"src/heap/cppgc/marking-verifier.h",

View File

@ -86,13 +86,13 @@ void ResetRememberedSet(HeapBase& heap) {
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename Worklist, typename Callback, typename Predicate>
bool DrainWorklistWithDeadline(Predicate should_yield, Worklist* worklist,
Callback callback, int task_id) {
typename WorklistLocal, typename Callback, typename Predicate>
bool DrainWorklistWithDeadline(Predicate should_yield,
WorklistLocal& worklist_local,
Callback callback) {
size_t processed_callback_count = 0;
typename Worklist::View view(worklist, task_id);
typename Worklist::EntryType item;
while (view.Pop(&item)) {
typename WorklistLocal::ItemType item;
while (worklist_local.Pop(&item)) {
callback(item);
if (processed_callback_count-- == 0) {
if (should_yield()) {
@ -105,18 +105,18 @@ bool DrainWorklistWithDeadline(Predicate should_yield, Worklist* worklist,
}
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename Worklist, typename Callback>
typename WorklistLocal, typename Callback>
bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
size_t marked_bytes_deadline,
v8::base::TimeTicks time_deadline,
Worklist* worklist,
Callback callback, int task_id) {
WorklistLocal& worklist_local,
Callback callback) {
return DrainWorklistWithDeadline(
[&marking_state, marked_bytes_deadline, time_deadline]() {
return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
(time_deadline <= v8::base::TimeTicks::Now());
},
worklist, callback, task_id);
worklist_local, callback);
}
void TraceMarkedObject(Visitor* visitor, const HeapObjectHeader* header) {
@ -168,11 +168,7 @@ MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
config_(config),
platform_(platform),
foreground_task_runner_(platform_->GetForegroundTaskRunner()),
mutator_marking_state_(
heap, marking_worklists_.marking_worklist(),
marking_worklists_.not_fully_constructed_worklist(),
marking_worklists_.weak_callback_worklist(),
MarkingWorklists::kMutatorThreadId) {}
mutator_marking_state_(heap, marking_worklists_) {}
MarkerBase::~MarkerBase() {
// The fixed point iteration may have found not-fully-constructed objects.
@ -182,10 +178,9 @@ MarkerBase::~MarkerBase() {
#if DEBUG
DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
HeapObjectHeader* header;
MarkingWorklists::NotFullyConstructedWorklist::View view(
marking_worklists_.not_fully_constructed_worklist(),
MarkingWorklists::kMutatorThreadId);
while (view.Pop(&header)) {
MarkingWorklists::NotFullyConstructedWorklist::Local& local =
mutator_marking_state_.not_fully_constructed_worklist();
while (local.Pop(&header)) {
DCHECK(header->IsMarked());
}
#else
@ -218,7 +213,7 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
// VisitRoots also resets the LABs.
VisitRoots(config_.stack_state);
if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
marking_worklists_.FlushNotFullyConstructedObjects();
mutator_marking_state_.FlushNotFullyConstructedObjects();
} else {
MarkNotFullyConstructedObjects();
}
@ -237,6 +232,7 @@ void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
EnterAtomicPause(stack_state);
ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
v8::base::TimeDelta::Max());
mutator_marking_state_.Publish();
LeaveAtomicPause();
is_marking_started_ = false;
}
@ -247,10 +243,9 @@ void MarkerBase::ProcessWeakness() {
// Call weak callbacks on objects that may now be pointing to dead objects.
MarkingWorklists::WeakCallbackItem item;
LivenessBroker broker = LivenessBrokerFactory::Create();
MarkingWorklists::WeakCallbackWorklist::View view(
marking_worklists_.weak_callback_worklist(),
MarkingWorklists::kMutatorThreadId);
while (view.Pop(&item)) {
MarkingWorklists::WeakCallbackWorklist::Local& local =
mutator_marking_state_.weak_callback_worklist();
while (local.Pop(&item)) {
item.callback(broker, item.parameter);
}
// Weak callbacks should not add any new objects for marking.
@ -285,7 +280,7 @@ bool MarkerBase::IncrementalMarkingStepForTesting(
bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
marking_worklists_.FlushNotFullyConstructedObjects();
mutator_marking_state_.FlushNotFullyConstructedObjects();
}
config_.stack_state = stack_state;
@ -315,6 +310,7 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
ScheduleIncrementalMarkingTask();
}
mutator_marking_state_.Publish();
return is_done;
}
@ -329,18 +325,17 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
// callbacks.
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
marking_worklists_.previously_not_fully_constructed_worklist(),
mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
TraceMarkedObject(&visitor(), header);
mutator_marking_state_.AccountMarkedBytes(*header);
},
MarkingWorklists::kMutatorThreadId)) {
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
marking_worklists_.marking_worklist(),
mutator_marking_state_.marking_worklist(),
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
@ -350,32 +345,28 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
header.IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
item.callback(&visitor(), item.base_object_payload);
mutator_marking_state_.AccountMarkedBytes(header);
},
MarkingWorklists::kMutatorThreadId)) {
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
marking_worklists_.write_barrier_worklist(),
mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
TraceMarkedObject(&visitor(), header);
mutator_marking_state_.AccountMarkedBytes(*header);
},
MarkingWorklists::kMutatorThreadId)) {
})) {
return false;
}
} while (!marking_worklists_.marking_worklist()->IsLocalViewEmpty(
MarkingWorklists::kMutatorThreadId));
} while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty());
return true;
}
void MarkerBase::MarkNotFullyConstructedObjects() {
HeapObjectHeader* header;
MarkingWorklists::NotFullyConstructedWorklist::View view(
marking_worklists_.not_fully_constructed_worklist(),
MarkingWorklists::kMutatorThreadId);
while (view.Pop(&header)) {
MarkingWorklists::NotFullyConstructedWorklist::Local& local =
mutator_marking_state_.not_fully_constructed_worklist();
while (local.Pop(&header)) {
DCHECK(header);
DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
// TraceConservativelyIfNeeded will either push to a worklist

View File

@ -200,18 +200,11 @@ class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
};
void MarkerBase::WriteBarrierForInConstructionObject(HeapObjectHeader& header) {
MarkingWorklists::NotFullyConstructedWorklist::View
not_fully_constructed_worklist(
marking_worklists_.not_fully_constructed_worklist(),
MarkingWorklists::kMutatorThreadId);
not_fully_constructed_worklist.Push(&header);
mutator_marking_state_.not_fully_constructed_worklist().Push(&header);
}
void MarkerBase::WriteBarrierForObject(HeapObjectHeader& header) {
MarkingWorklists::WriteBarrierWorklist::View write_barrier_worklist(
marking_worklists_.write_barrier_worklist(),
MarkingWorklists::kMutatorThreadId);
write_barrier_worklist.Push(&header);
mutator_marking_state_.write_barrier_worklist().Push(&header);
}
} // namespace internal

View File

@ -0,0 +1,20 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/marking-state.h"
namespace cppgc {
namespace internal {
void MarkingState::FlushNotFullyConstructedObjects() {
not_fully_constructed_worklist().Publish();
if (!not_fully_constructed_worklist_.IsGlobalEmpty()) {
previously_not_fully_constructed_worklist_.Merge(
&not_fully_constructed_worklist_);
}
DCHECK(not_fully_constructed_worklist_.IsGlobalEmpty());
}
} // namespace internal
} // namespace cppgc

View File

@ -18,9 +18,7 @@ namespace internal {
// C++ marking implementation.
class MarkingState {
public:
inline MarkingState(HeapBase& heap, MarkingWorklists::MarkingWorklist*,
MarkingWorklists::NotFullyConstructedWorklist*,
MarkingWorklists::WeakCallbackWorklist*, int);
inline MarkingState(HeapBase& heap, MarkingWorklists&);
MarkingState(const MarkingState&) = delete;
MarkingState& operator=(const MarkingState&) = delete;
@ -44,31 +42,64 @@ class MarkingState {
inline void AccountMarkedBytes(const HeapObjectHeader&);
size_t marked_bytes() const { return marked_bytes_; }
void Publish() {
marking_worklist_.Publish();
not_fully_constructed_worklist_.Publish();
previously_not_fully_constructed_worklist_.Publish();
weak_callback_worklist_.Publish();
write_barrier_worklist_.Publish();
}
// Moves objects in not_fully_constructed_worklist_ to
// previously_not_full_constructed_worklists_.
void FlushNotFullyConstructedObjects();
MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
return marking_worklist_;
}
MarkingWorklists::NotFullyConstructedWorklist::Local&
not_fully_constructed_worklist() {
return not_fully_constructed_worklist_;
}
MarkingWorklists::NotFullyConstructedWorklist::Local&
previously_not_fully_constructed_worklist() {
return previously_not_fully_constructed_worklist_;
}
MarkingWorklists::WeakCallbackWorklist::Local& weak_callback_worklist() {
return weak_callback_worklist_;
}
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() {
return write_barrier_worklist_;
}
private:
#ifdef DEBUG
HeapBase& heap_;
#endif // DEBUG
MarkingWorklists::MarkingWorklist::View marking_worklist_;
MarkingWorklists::NotFullyConstructedWorklist::View
MarkingWorklists::MarkingWorklist::Local marking_worklist_;
MarkingWorklists::NotFullyConstructedWorklist::Local
not_fully_constructed_worklist_;
MarkingWorklists::WeakCallbackWorklist::View weak_callback_worklist_;
MarkingWorklists::NotFullyConstructedWorklist::Local
previously_not_fully_constructed_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_;
size_t marked_bytes_ = 0;
};
MarkingState::MarkingState(
HeapBase& heap, MarkingWorklists::MarkingWorklist* marking_worklist,
MarkingWorklists::NotFullyConstructedWorklist*
not_fully_constructed_worklist,
MarkingWorklists::WeakCallbackWorklist* weak_callback_worklist, int task_id)
MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
:
#ifdef DEBUG
heap_(heap),
#endif // DEBUG
marking_worklist_(marking_worklist, task_id),
not_fully_constructed_worklist_(not_fully_constructed_worklist, task_id),
weak_callback_worklist_(weak_callback_worklist, task_id) {
marking_worklist_(marking_worklists.marking_worklist()),
not_fully_constructed_worklist_(
marking_worklists.not_fully_constructed_worklist()),
previously_not_fully_constructed_worklist_(
marking_worklists.previously_not_fully_constructed_worklist()),
weak_callback_worklist_(marking_worklists.weak_callback_worklist()),
write_barrier_worklist_(marking_worklists.write_barrier_worklist()) {
}
void MarkingState::MarkAndPush(const void* object, TraceDescriptor desc) {

View File

@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE MarkingVisitor : public VisitorBase {
MarkingVisitor(HeapBase&, MarkingState&);
~MarkingVisitor() override = default;
private:
protected:
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
void VisitRoot(const void*, TraceDescriptor) final;

View File

@ -18,15 +18,5 @@ void MarkingWorklists::ClearForTesting() {
weak_callback_worklist_.Clear();
}
void MarkingWorklists::FlushNotFullyConstructedObjects() {
if (!not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId)) {
not_fully_constructed_worklist_.FlushToGlobal(kMutatorThreadId);
previously_not_fully_constructed_worklist_.MergeGlobalPool(
&not_fully_constructed_worklist_);
}
DCHECK(not_fully_constructed_worklist_.IsLocalViewEmpty(
MarkingWorklists::kMutatorThreadId));
}
} // namespace internal
} // namespace cppgc

View File

@ -14,9 +14,6 @@ namespace internal {
class HeapObjectHeader;
class MarkingWorklists {
static constexpr int kNumConcurrentMarkers = 0;
static constexpr int kNumMarkers = 1 + kNumConcurrentMarkers;
public:
static constexpr int kMutatorThreadId = 0;
@ -28,14 +25,13 @@ class MarkingWorklists {
// Segment size of 512 entries necessary to avoid throughput regressions.
// Since the work list is currently a temporary object this is not a problem.
using MarkingWorklist =
Worklist<MarkingItem, 512 /* local entries */, kNumMarkers>;
using MarkingWorklist = Worklist<MarkingItem, 512 /* local entries */>;
using NotFullyConstructedWorklist =
Worklist<HeapObjectHeader*, 16 /* local entries */, kNumMarkers>;
Worklist<HeapObjectHeader*, 16 /* local entries */>;
using WeakCallbackWorklist =
Worklist<WeakCallbackItem, 64 /* local entries */, kNumMarkers>;
Worklist<WeakCallbackItem, 64 /* local entries */>;
using WriteBarrierWorklist =
Worklist<HeapObjectHeader*, 64 /*local entries */, kNumMarkers>;
Worklist<HeapObjectHeader*, 64 /*local entries */>;
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
@ -51,10 +47,6 @@ class MarkingWorklists {
return &weak_callback_worklist_;
}
// Moves objects in not_fully_constructed_worklist_ to
// previously_not_full_constructed_worklists_.
void FlushNotFullyConstructedObjects();
void ClearForTesting();
private:

View File

@ -16,457 +16,395 @@
namespace cppgc {
namespace internal {
// A concurrent worklist based on segments. Each tasks gets private
// push and pop segments. Empty pop segments are swapped with their
// corresponding push segments. Full push segments are published to a global
// pool of segments and replaced with empty segments.
//
// Work stealing is best effort, i.e., there is no way to inform other tasks
// of the need of items.
template <typename EntryType_, int SEGMENT_SIZE, int max_num_tasks = 8>
// A global marking worklist that is similar the existing Worklist
// but does not reserve space and keep track of the local segments.
// Eventually this will replace Worklist after all its current uses
// are migrated.
template <typename EntryType, int SegmentSize>
class Worklist {
using WorklistType = Worklist<EntryType_, SEGMENT_SIZE, max_num_tasks>;
public:
using EntryType = EntryType_;
static constexpr int kMaxNumTasks = max_num_tasks;
static constexpr size_t kSegmentCapacity = SEGMENT_SIZE;
static const int kSegmentSize = SegmentSize;
class Segment;
class Local;
class View {
public:
View(WorklistType* worklist, int task_id)
: worklist_(worklist), task_id_(task_id) {}
Worklist() = default;
~Worklist() { CHECK(IsEmpty()); }
// Pushes an entry onto the worklist.
bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
void Push(Segment* segment);
bool Pop(Segment** segment);
// Pops an entry from the worklist.
bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
// Returns true if the list of segments is empty.
bool IsEmpty();
// Returns the number of segments in the list.
size_t Size();
// Returns true if the local portion of the worklist is empty.
bool IsLocalEmpty() const { return worklist_->IsLocalEmpty(task_id_); }
// Moves the segments of the given marking worklist into this
// marking worklist.
void Merge(Worklist<EntryType, SegmentSize>* other);
// Returns true if the worklist is empty. Can only be used from the main
// thread without concurrent access.
bool IsEmpty() const { return worklist_->IsEmpty(); }
bool IsGlobalPoolEmpty() const { return worklist_->IsGlobalPoolEmpty(); }
// Returns true if the local portion and the global pool are empty (i.e.
// whether the current view cannot pop anymore).
bool IsLocalViewEmpty() const {
return worklist_->IsLocalViewEmpty(task_id_);
}
void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
void* operator new(size_t, void* location) = delete;
void* operator new(size_t) = delete;
private:
WorklistType* const worklist_;
const int task_id_;
};
Worklist() : Worklist(kMaxNumTasks) {}
explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
DCHECK_LE(num_tasks_, kMaxNumTasks);
for (int i = 0; i < num_tasks_; i++) {
private_push_segment(i) = NewSegment();
private_pop_segment(i) = NewSegment();
}
}
~Worklist() {
CHECK(IsEmpty());
for (int i = 0; i < num_tasks_; i++) {
DCHECK_NOT_NULL(private_push_segment(i));
DCHECK_NOT_NULL(private_pop_segment(i));
delete private_push_segment(i);
delete private_pop_segment(i);
}
}
// Swaps content with the given worklist. Local buffers need to
// be empty, not thread safe.
void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
CHECK(AreLocalsEmpty());
CHECK(other.AreLocalsEmpty());
global_pool_.Swap(other.global_pool_);
}
bool Push(int task_id, EntryType entry) {
DCHECK_LT(task_id, num_tasks_);
DCHECK_NOT_NULL(private_push_segment(task_id));
if (!private_push_segment(task_id)->Push(entry)) {
PublishPushSegmentToGlobal(task_id);
bool success = private_push_segment(task_id)->Push(entry);
USE(success);
DCHECK(success);
}
return true;
}
bool Pop(int task_id, EntryType* entry) {
DCHECK_LT(task_id, num_tasks_);
DCHECK_NOT_NULL(private_pop_segment(task_id));
if (!private_pop_segment(task_id)->Pop(entry)) {
if (!private_push_segment(task_id)->IsEmpty()) {
Segment* tmp = private_pop_segment(task_id);
private_pop_segment(task_id) = private_push_segment(task_id);
private_push_segment(task_id) = tmp;
} else if (!StealPopSegmentFromGlobal(task_id)) {
return false;
}
bool success = private_pop_segment(task_id)->Pop(entry);
USE(success);
DCHECK(success);
}
return true;
}
size_t LocalPushSegmentSize(int task_id) const {
return private_push_segment(task_id)->Size();
}
bool IsLocalEmpty(int task_id) const {
return private_pop_segment(task_id)->IsEmpty() &&
private_push_segment(task_id)->IsEmpty();
}
bool IsGlobalPoolEmpty() const { return global_pool_.IsEmpty(); }
bool IsEmpty() const {
if (!AreLocalsEmpty()) return false;
return IsGlobalPoolEmpty();
}
bool AreLocalsEmpty() const {
for (int i = 0; i < num_tasks_; i++) {
if (!IsLocalEmpty(i)) return false;
}
return true;
}
bool IsLocalViewEmpty(int task_id) const {
return IsLocalEmpty(task_id) && IsGlobalPoolEmpty();
}
size_t LocalSize(int task_id) const {
return private_pop_segment(task_id)->Size() +
private_push_segment(task_id)->Size();
}
// Thread-safe but may return an outdated result.
size_t GlobalPoolSize() const { return global_pool_.Size(); }
// Clears all segments. Frees the global segment pool.
//
// Assumes that no other tasks are running.
void Clear() {
for (int i = 0; i < num_tasks_; i++) {
private_pop_segment(i)->Clear();
private_push_segment(i)->Clear();
}
global_pool_.Clear();
}
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback.
// The signature of the callback is
// bool Callback(EntryType old, EntryType* new).
// If the callback returns |false| then the element is removed from the
// worklist. Otherwise the |new| entry is updated.
//
// Assumes that no other tasks are running.
// These functions are not thread-safe. They should be called only
// if all local marking worklists that use the current worklist have
// been published and are empty.
void Clear();
template <typename Callback>
void Update(Callback callback) {
for (int i = 0; i < num_tasks_; i++) {
private_pop_segment(i)->Update(callback);
private_push_segment(i)->Update(callback);
}
global_pool_.Update(callback);
}
// Calls the specified callback on each element of the deques.
// The signature of the callback is:
// void Callback(EntryType entry).
//
// Assumes that no other tasks are running.
void Update(Callback callback);
template <typename Callback>
void Iterate(Callback callback) {
for (int i = 0; i < num_tasks_; i++) {
private_pop_segment(i)->Iterate(callback);
private_push_segment(i)->Iterate(callback);
}
global_pool_.Iterate(callback);
}
template <typename Callback>
void IterateGlobalPool(Callback callback) {
global_pool_.Iterate(callback);
}
void FlushToGlobal(int task_id) {
PublishPushSegmentToGlobal(task_id);
PublishPopSegmentToGlobal(task_id);
}
void MergeGlobalPool(Worklist* other) {
global_pool_.Merge(&other->global_pool_);
}
void Iterate(Callback callback);
private:
FRIEND_TEST(CppgcWorkListTest, SegmentCreate);
FRIEND_TEST(CppgcWorkListTest, SegmentPush);
FRIEND_TEST(CppgcWorkListTest, SegmentPushPop);
FRIEND_TEST(CppgcWorkListTest, SegmentIsEmpty);
FRIEND_TEST(CppgcWorkListTest, SegmentIsFull);
FRIEND_TEST(CppgcWorkListTest, SegmentClear);
FRIEND_TEST(CppgcWorkListTest, SegmentFullPushFails);
FRIEND_TEST(CppgcWorkListTest, SegmentEmptyPopFails);
FRIEND_TEST(CppgcWorkListTest, SegmentUpdateFalse);
FRIEND_TEST(CppgcWorkListTest, SegmentUpdate);
class Segment {
public:
static const size_t kCapacity = kSegmentCapacity;
Segment() : index_(0) {}
bool Push(EntryType entry) {
if (IsFull()) return false;
entries_[index_++] = entry;
return true;
}
bool Pop(EntryType* entry) {
if (IsEmpty()) return false;
*entry = entries_[--index_];
return true;
}
size_t Size() const { return index_; }
bool IsEmpty() const { return index_ == 0; }
bool IsFull() const { return index_ == kCapacity; }
void Clear() { index_ = 0; }
template <typename Callback>
void Update(Callback callback) {
size_t new_index = 0;
for (size_t i = 0; i < index_; i++) {
if (callback(entries_[i], &entries_[new_index])) {
new_index++;
}
}
index_ = new_index;
}
template <typename Callback>
void Iterate(Callback callback) const {
for (size_t i = 0; i < index_; i++) {
callback(entries_[i]);
}
}
Segment* next() const { return next_; }
void set_next(Segment* segment) { next_ = segment; }
private:
Segment* next_;
size_t index_;
EntryType entries_[kCapacity];
};
struct PrivateSegmentHolder {
Segment* private_push_segment;
Segment* private_pop_segment;
char cache_line_padding[64];
};
class GlobalPool {
public:
GlobalPool() : top_(nullptr) {}
// Swaps contents, not thread safe.
void Swap(GlobalPool& other) {
Segment* temp = top_;
set_top(other.top_);
other.set_top(temp);
size_t other_size = other.size_.exchange(
size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
size_.store(other_size, std::memory_order_relaxed);
}
V8_INLINE void Push(Segment* segment) {
v8::base::MutexGuard guard(&lock_);
segment->set_next(top_);
set_top(segment);
size_.fetch_add(1, std::memory_order_relaxed);
}
V8_INLINE bool Pop(Segment** segment) {
v8::base::MutexGuard guard(&lock_);
if (top_) {
DCHECK_LT(0U, size_);
size_.fetch_sub(1, std::memory_order_relaxed);
*segment = top_;
set_top(top_->next());
return true;
}
return false;
}
V8_INLINE bool IsEmpty() const {
return v8::base::AsAtomicPtr(&top_)->load(std::memory_order_relaxed) ==
nullptr;
}
V8_INLINE size_t Size() const {
// It is safe to read |size_| without a lock since this variable is
// atomic, keeping in mind that threads may not immediately see the new
// value when it is updated.
return size_.load(std::memory_order_relaxed);
}
void Clear() {
v8::base::MutexGuard guard(&lock_);
size_.store(0, std::memory_order_relaxed);
Segment* current = top_;
while (current) {
Segment* tmp = current;
current = current->next();
delete tmp;
}
set_top(nullptr);
}
// See Worklist::Update.
template <typename Callback>
void Update(Callback callback) {
v8::base::MutexGuard guard(&lock_);
Segment* prev = nullptr;
Segment* current = top_;
while (current) {
current->Update(callback);
if (current->IsEmpty()) {
DCHECK_LT(0U, size_);
size_.fetch_sub(1, std::memory_order_relaxed);
if (!prev) {
top_ = current->next();
} else {
prev->set_next(current->next());
}
Segment* tmp = current;
current = current->next();
delete tmp;
} else {
prev = current;
current = current->next();
}
}
}
// See Worklist::Iterate.
template <typename Callback>
void Iterate(Callback callback) {
v8::base::MutexGuard guard(&lock_);
for (Segment* current = top_; current; current = current->next()) {
current->Iterate(callback);
}
}
void Merge(GlobalPool* other) {
Segment* top = nullptr;
size_t other_size = 0;
{
v8::base::MutexGuard guard(&other->lock_);
if (!other->top_) return;
top = other->top_;
other_size = other->size_.load(std::memory_order_relaxed);
other->size_.store(0, std::memory_order_relaxed);
other->set_top(nullptr);
}
// It's safe to iterate through these segments because the top was
// extracted from |other|.
Segment* end = top;
while (end->next()) end = end->next();
{
v8::base::MutexGuard guard(&lock_);
size_.fetch_add(other_size, std::memory_order_relaxed);
end->set_next(top_);
set_top(top);
}
}
void* operator new(size_t, void* location) = delete;
void* operator new(size_t) = delete;
private:
void set_top(Segment* segment) {
v8::base::AsAtomicPtr(&top_)->store(segment, std::memory_order_relaxed);
}
v8::base::Mutex lock_;
Segment* top_;
std::atomic<size_t> size_{0};
};
V8_INLINE Segment*& private_push_segment(int task_id) {
return private_segments_[task_id].private_push_segment;
void set_top(Segment* segment) {
v8::base::AsAtomicPtr(&top_)->store(segment, std::memory_order_relaxed);
}
V8_INLINE Segment* const& private_push_segment(int task_id) const {
return private_segments_[task_id].private_push_segment;
}
v8::base::Mutex lock_;
Segment* top_ = nullptr;
std::atomic<size_t> size_{0};
};
V8_INLINE Segment*& private_pop_segment(int task_id) {
return private_segments_[task_id].private_pop_segment;
}
template <typename EntryType, int SegmentSize>
void Worklist<EntryType, SegmentSize>::Push(Segment* segment) {
v8::base::MutexGuard guard(&lock_);
segment->set_next(top_);
set_top(segment);
size_.fetch_add(1, std::memory_order_relaxed);
}
V8_INLINE Segment* const& private_pop_segment(int task_id) const {
return private_segments_[task_id].private_pop_segment;
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::Pop(Segment** segment) {
v8::base::MutexGuard guard(&lock_);
if (top_ != nullptr) {
DCHECK_LT(0U, size_);
size_.fetch_sub(1, std::memory_order_relaxed);
*segment = top_;
set_top(top_->next());
return true;
}
return false;
}
V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
if (!private_push_segment(task_id)->IsEmpty()) {
global_pool_.Push(private_push_segment(task_id));
private_push_segment(task_id) = NewSegment();
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::IsEmpty() {
return v8::base::AsAtomicPtr(&top_)->load(std::memory_order_relaxed) ==
nullptr;
}
template <typename EntryType, int SegmentSize>
size_t Worklist<EntryType, SegmentSize>::Size() {
// It is safe to read |size_| without a lock since this variable is
// atomic, keeping in mind that threads may not immediately see the new
// value when it is updated.
return size_.load(std::memory_order_relaxed);
}
template <typename EntryType, int SegmentSize>
void Worklist<EntryType, SegmentSize>::Clear() {
v8::base::MutexGuard guard(&lock_);
size_.store(0, std::memory_order_relaxed);
Segment* current = top_;
while (current != nullptr) {
Segment* tmp = current;
current = current->next();
delete tmp;
}
set_top(nullptr);
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void Worklist<EntryType, SegmentSize>::Update(Callback callback) {
v8::base::MutexGuard guard(&lock_);
Segment* prev = nullptr;
Segment* current = top_;
size_t num_deleted = 0;
while (current != nullptr) {
current->Update(callback);
if (current->IsEmpty()) {
DCHECK_LT(0U, size_);
++num_deleted;
if (prev == nullptr) {
top_ = current->next();
} else {
prev->set_next(current->next());
}
Segment* tmp = current;
current = current->next();
delete tmp;
} else {
prev = current;
current = current->next();
}
}
size_.fetch_sub(num_deleted, std::memory_order_relaxed);
}
V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
if (!private_pop_segment(task_id)->IsEmpty()) {
global_pool_.Push(private_pop_segment(task_id));
private_pop_segment(task_id) = NewSegment();
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void Worklist<EntryType, SegmentSize>::Iterate(Callback callback) {
v8::base::MutexGuard guard(&lock_);
for (Segment* current = top_; current != nullptr; current = current->next()) {
current->Iterate(callback);
}
}
template <typename EntryType, int SegmentSize>
void Worklist<EntryType, SegmentSize>::Merge(
Worklist<EntryType, SegmentSize>* other) {
Segment* top = nullptr;
size_t other_size = 0;
{
v8::base::MutexGuard guard(&other->lock_);
if (!other->top_) return;
top = other->top_;
other_size = other->size_.load(std::memory_order_relaxed);
other->size_.store(0, std::memory_order_relaxed);
other->set_top(nullptr);
}
V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
if (global_pool_.IsEmpty()) return false;
Segment* new_segment = nullptr;
if (global_pool_.Pop(&new_segment)) {
delete private_pop_segment(task_id);
private_pop_segment(task_id) = new_segment;
return true;
}
return false;
}
// It's safe to iterate through these segments because the top was
// extracted from |other|.
Segment* end = top;
while (end->next()) end = end->next();
V8_INLINE Segment* NewSegment() {
{
v8::base::MutexGuard guard(&lock_);
size_.fetch_add(other_size, std::memory_order_relaxed);
end->set_next(top_);
set_top(top);
}
}
template <typename EntryType, int SegmentSize>
class Worklist<EntryType, SegmentSize>::Segment {
public:
static const size_t kSize = SegmentSize;
Segment() = default;
bool Push(EntryType entry);
bool Pop(EntryType* entry);
size_t Size() const { return index_; }
bool IsEmpty() const { return index_ == 0; }
bool IsFull() const { return index_ == kSize; }
void Clear() { index_ = 0; }
template <typename Callback>
void Update(Callback callback);
template <typename Callback>
void Iterate(Callback callback) const;
Segment* next() const { return next_; }
void set_next(Segment* segment) { next_ = segment; }
private:
Segment* next_ = nullptr;
size_t index_ = 0;
EntryType entries_[kSize];
};
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::Segment::Push(EntryType entry) {
if (IsFull()) return false;
entries_[index_++] = entry;
return true;
}
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::Segment::Pop(EntryType* entry) {
if (IsEmpty()) return false;
*entry = entries_[--index_];
return true;
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void Worklist<EntryType, SegmentSize>::Segment::Update(Callback callback) {
size_t new_index = 0;
for (size_t i = 0; i < index_; i++) {
if (callback(entries_[i], &entries_[new_index])) {
new_index++;
}
}
index_ = new_index;
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void Worklist<EntryType, SegmentSize>::Segment::Iterate(
Callback callback) const {
for (size_t i = 0; i < index_; i++) {
callback(entries_[i]);
}
}
// A thread-local view of the marking worklist.
template <typename EntryType, int SegmentSize>
class Worklist<EntryType, SegmentSize>::Local {
public:
using ItemType = EntryType;
Local() = default;
explicit Local(Worklist<EntryType, SegmentSize>* worklist);
~Local();
Local(Local&&) V8_NOEXCEPT;
Local& operator=(Local&&) V8_NOEXCEPT;
// Disable copying since having multiple copies of the same
// local marking worklist is unsafe.
Local(const Local&) = delete;
Local& operator=(const Local& other) = delete;
void Push(EntryType entry);
bool Pop(EntryType* entry);
bool IsLocalAndGlobalEmpty() const;
bool IsLocalEmpty() const;
bool IsGlobalEmpty() const;
void Publish();
void Merge(Worklist<EntryType, SegmentSize>::Local* other);
size_t PushSegmentSize() const { return push_segment_->Size(); }
private:
void PublishPushSegment();
void PublishPopSegment();
bool StealPopSegment();
Segment* NewSegment() const {
// Bottleneck for filtering in crash dumps.
return new Segment();
}
PrivateSegmentHolder private_segments_[kMaxNumTasks];
GlobalPool global_pool_;
int num_tasks_;
Worklist<EntryType, SegmentSize>* worklist_ = nullptr;
Segment* push_segment_ = nullptr;
Segment* pop_segment_ = nullptr;
};
template <typename EntryType, int SegmentSize>
Worklist<EntryType, SegmentSize>::Local::Local(
Worklist<EntryType, SegmentSize>* worklist)
: worklist_(worklist),
push_segment_(NewSegment()),
pop_segment_(NewSegment()) {}
template <typename EntryType, int SegmentSize>
Worklist<EntryType, SegmentSize>::Local::~Local() {
CHECK_IMPLIES(push_segment_, push_segment_->IsEmpty());
CHECK_IMPLIES(pop_segment_, pop_segment_->IsEmpty());
delete push_segment_;
delete pop_segment_;
}
template <typename EntryType, int SegmentSize>
Worklist<EntryType, SegmentSize>::Local::Local(
Worklist<EntryType, SegmentSize>::Local&& other) V8_NOEXCEPT {
worklist_ = other.worklist_;
push_segment_ = other.push_segment_;
pop_segment_ = other.pop_segment_;
other.worklist_ = nullptr;
other.push_segment_ = nullptr;
other.pop_segment_ = nullptr;
}
template <typename EntryType, int SegmentSize>
typename Worklist<EntryType, SegmentSize>::Local&
Worklist<EntryType, SegmentSize>::Local::operator=(
Worklist<EntryType, SegmentSize>::Local&& other) V8_NOEXCEPT {
if (this != &other) {
DCHECK_NULL(worklist_);
DCHECK_NULL(push_segment_);
DCHECK_NULL(pop_segment_);
worklist_ = other.worklist_;
push_segment_ = other.push_segment_;
pop_segment_ = other.pop_segment_;
other.worklist_ = nullptr;
other.push_segment_ = nullptr;
other.pop_segment_ = nullptr;
}
return *this;
}
template <typename EntryType, int SegmentSize>
void Worklist<EntryType, SegmentSize>::Local::Push(EntryType entry) {
if (V8_UNLIKELY(!push_segment_->Push(entry))) {
PublishPushSegment();
bool success = push_segment_->Push(entry);
USE(success);
DCHECK(success);
}
}
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::Local::Pop(EntryType* entry) {
if (!pop_segment_->Pop(entry)) {
if (!push_segment_->IsEmpty()) {
std::swap(push_segment_, pop_segment_);
} else if (!StealPopSegment()) {
return false;
}
bool success = pop_segment_->Pop(entry);
USE(success);
DCHECK(success);
}
return true;
}
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::Local::IsLocalAndGlobalEmpty() const {
return IsLocalEmpty() && IsGlobalEmpty();
}
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::Local::IsLocalEmpty() const {
return push_segment_->IsEmpty() && pop_segment_->IsEmpty();
}
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::Local::IsGlobalEmpty() const {
return worklist_->IsEmpty();
}
template <typename EntryType, int SegmentSize>
void Worklist<EntryType, SegmentSize>::Local::Publish() {
if (!push_segment_->IsEmpty()) {
PublishPushSegment();
}
if (!pop_segment_->IsEmpty()) {
PublishPopSegment();
}
}
template <typename EntryType, int SegmentSize>
void Worklist<EntryType, SegmentSize>::Local::Merge(
Worklist<EntryType, SegmentSize>::Local* other) {
other->Publish();
worklist_->Merge(other->worklist_);
}
template <typename EntryType, int SegmentSize>
void Worklist<EntryType, SegmentSize>::Local::PublishPushSegment() {
worklist_->Push(push_segment_);
push_segment_ = NewSegment();
}
template <typename EntryType, int SegmentSize>
void Worklist<EntryType, SegmentSize>::Local::PublishPopSegment() {
worklist_->Push(pop_segment_);
pop_segment_ = NewSegment();
}
template <typename EntryType, int SegmentSize>
bool Worklist<EntryType, SegmentSize>::Local::StealPopSegment() {
if (worklist_->IsEmpty()) return false;
Segment* new_segment = nullptr;
if (worklist_->Pop(&new_segment)) {
delete pop_segment_;
pop_segment_ = new_segment;
return true;
}
return false;
}
} // namespace internal
} // namespace cppgc

View File

@ -48,6 +48,7 @@ class TestMarkingVisitor : public MarkingVisitor {
public:
explicit TestMarkingVisitor(Marker* marker)
: MarkingVisitor(marker->heap(), marker->MarkingStateForTesting()) {}
~TestMarkingVisitor() { marking_state_.Publish(); }
};
} // namespace

View File

@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/heap/cppgc/worklist.h"
#include "test/unittests/heap/cppgc/tests.h"
namespace cppgc {
@ -48,7 +47,7 @@ TEST(CppgcWorkListTest, SegmentIsEmpty) {
TEST(CppgcWorkListTest, SegmentIsFull) {
TestWorklist::Segment segment;
EXPECT_FALSE(segment.IsFull());
for (size_t i = 0; i < TestWorklist::Segment::kCapacity; i++) {
for (size_t i = 0; i < TestWorklist::Segment::kSize; i++) {
EXPECT_TRUE(segment.Push(nullptr));
}
EXPECT_TRUE(segment.IsFull());
@ -60,7 +59,7 @@ TEST(CppgcWorkListTest, SegmentClear) {
EXPECT_FALSE(segment.IsEmpty());
segment.Clear();
EXPECT_TRUE(segment.IsEmpty());
for (size_t i = 0; i < TestWorklist::Segment::kCapacity; i++) {
for (size_t i = 0; i < TestWorklist::Segment::kSize; i++) {
EXPECT_TRUE(segment.Push(nullptr));
}
}
@ -68,7 +67,7 @@ TEST(CppgcWorkListTest, SegmentClear) {
TEST(CppgcWorkListTest, SegmentFullPushFails) {
TestWorklist::Segment segment;
EXPECT_FALSE(segment.IsFull());
for (size_t i = 0; i < TestWorklist::Segment::kCapacity; i++) {
for (size_t i = 0; i < TestWorklist::Segment::kSize; i++) {
EXPECT_TRUE(segment.Push(nullptr));
}
EXPECT_TRUE(segment.IsFull());
@ -109,87 +108,71 @@ TEST(CppgcWorkListTest, SegmentUpdate) {
TEST(CppgcWorkListTest, CreateEmpty) {
TestWorklist worklist;
TestWorklist::View worklist_view(&worklist, 0);
EXPECT_TRUE(worklist_view.IsLocalEmpty());
TestWorklist::Local worklist_local(&worklist);
EXPECT_TRUE(worklist_local.IsLocalEmpty());
EXPECT_TRUE(worklist.IsEmpty());
}
TEST(CppgcWorkListTest, LocalPushPop) {
TestWorklist worklist;
TestWorklist::View worklist_view(&worklist, 0);
TestWorklist::Local worklist_local(&worklist);
SomeObject dummy;
SomeObject* retrieved = nullptr;
EXPECT_TRUE(worklist_view.Push(&dummy));
EXPECT_FALSE(worklist_view.IsLocalEmpty());
EXPECT_TRUE(worklist_view.Pop(&retrieved));
worklist_local.Push(&dummy);
EXPECT_FALSE(worklist_local.IsLocalEmpty());
EXPECT_TRUE(worklist_local.Pop(&retrieved));
EXPECT_EQ(&dummy, retrieved);
}
TEST(CppgcWorkListTest, LocalIsBasedOnId) {
TestWorklist worklist;
// Use the same id.
TestWorklist::View worklist_view1(&worklist, 0);
TestWorklist::View worklist_view2(&worklist, 0);
SomeObject dummy;
SomeObject* retrieved = nullptr;
EXPECT_TRUE(worklist_view1.Push(&dummy));
EXPECT_FALSE(worklist_view1.IsLocalEmpty());
EXPECT_FALSE(worklist_view2.IsLocalEmpty());
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
EXPECT_EQ(&dummy, retrieved);
EXPECT_TRUE(worklist_view1.IsLocalEmpty());
EXPECT_TRUE(worklist_view2.IsLocalEmpty());
}
TEST(CppgcWorkListTest, LocalPushStaysPrivate) {
TestWorklist worklist;
TestWorklist::View worklist_view1(&worklist, 0);
TestWorklist::View worklist_view2(&worklist, 1);
TestWorklist::Local worklist_view1(&worklist);
TestWorklist::Local worklist_view2(&worklist);
SomeObject dummy;
SomeObject* retrieved = nullptr;
EXPECT_TRUE(worklist.IsEmpty());
EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_TRUE(worklist_view1.Push(&dummy));
EXPECT_FALSE(worklist.IsEmpty());
EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_EQ(0U, worklist.Size());
worklist_view1.Push(&dummy);
EXPECT_EQ(0U, worklist.Size());
EXPECT_FALSE(worklist_view2.Pop(&retrieved));
EXPECT_EQ(nullptr, retrieved);
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
EXPECT_EQ(&dummy, retrieved);
EXPECT_TRUE(worklist.IsEmpty());
EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_EQ(0U, worklist.Size());
}
TEST(CppgcWorkListTest, GlobalUpdateNull) {
TestWorklist worklist;
TestWorklist::View worklist_view(&worklist, 0);
TestWorklist::Local worklist_local(&worklist);
SomeObject* object;
object = reinterpret_cast<SomeObject*>(&object);
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view.Push(object));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
worklist_local.Push(object);
}
EXPECT_TRUE(worklist_view.Push(object));
worklist_local.Push(object);
worklist_local.Publish();
worklist.Update([](SomeObject* object, SomeObject** out) { return false; });
EXPECT_TRUE(worklist.IsEmpty());
EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_EQ(0U, worklist.Size());
}
TEST(CppgcWorkListTest, GlobalUpdate) {
TestWorklist worklist;
TestWorklist::View worklist_view(&worklist, 0);
TestWorklist::Local worklist_local(&worklist);
SomeObject* objectA = nullptr;
objectA = reinterpret_cast<SomeObject*>(&objectA);
SomeObject* objectB = nullptr;
objectB = reinterpret_cast<SomeObject*>(&objectB);
SomeObject* objectC = nullptr;
objectC = reinterpret_cast<SomeObject*>(&objectC);
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view.Push(objectA));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
worklist_local.Push(objectA);
}
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view.Push(objectB));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
worklist_local.Push(objectB);
}
EXPECT_TRUE(worklist_view.Push(objectA));
worklist_local.Push(objectA);
worklist_local.Publish();
worklist.Update([objectA, objectC](SomeObject* object, SomeObject** out) {
if (object != objectA) {
*out = objectC;
@ -197,146 +180,144 @@ TEST(CppgcWorkListTest, GlobalUpdate) {
}
return false;
});
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
SomeObject* object;
EXPECT_TRUE(worklist_view.Pop(&object));
EXPECT_TRUE(worklist_local.Pop(&object));
EXPECT_EQ(object, objectC);
}
}
TEST(CppgcWorkListTest, FlushToGlobalPushSegment) {
TestWorklist worklist;
TestWorklist::View worklist_view0(&worklist, 0);
TestWorklist::View worklist_view1(&worklist, 1);
TestWorklist::Local worklist_local0(&worklist);
TestWorklist::Local worklist_local1(&worklist);
SomeObject* object = nullptr;
SomeObject* objectA = nullptr;
objectA = reinterpret_cast<SomeObject*>(&objectA);
EXPECT_TRUE(worklist_view0.Push(objectA));
worklist.FlushToGlobal(0);
EXPECT_EQ(1U, worklist.GlobalPoolSize());
EXPECT_TRUE(worklist_view1.Pop(&object));
worklist_local0.Push(objectA);
worklist_local0.Publish();
EXPECT_EQ(1U, worklist.Size());
EXPECT_TRUE(worklist_local1.Pop(&object));
}
TEST(CppgcWorkListTest, FlushToGlobalPopSegment) {
TestWorklist worklist;
TestWorklist::View worklist_view0(&worklist, 0);
TestWorklist::View worklist_view1(&worklist, 1);
TestWorklist::Local worklist_local0(&worklist);
TestWorklist::Local worklist_local1(&worklist);
SomeObject* object = nullptr;
SomeObject* objectA = nullptr;
objectA = reinterpret_cast<SomeObject*>(&objectA);
EXPECT_TRUE(worklist_view0.Push(objectA));
EXPECT_TRUE(worklist_view0.Push(objectA));
EXPECT_TRUE(worklist_view0.Pop(&object));
worklist.FlushToGlobal(0);
EXPECT_EQ(1U, worklist.GlobalPoolSize());
EXPECT_TRUE(worklist_view1.Pop(&object));
worklist_local0.Push(objectA);
worklist_local0.Push(objectA);
worklist_local0.Pop(&object);
worklist_local0.Publish();
EXPECT_EQ(1U, worklist.Size());
EXPECT_TRUE(worklist_local1.Pop(&object));
}
TEST(CppgcWorkListTest, Clear) {
TestWorklist worklist;
TestWorklist::View worklist_view(&worklist, 0);
TestWorklist::Local worklist_local(&worklist);
SomeObject* object;
object = reinterpret_cast<SomeObject*>(&object);
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view.Push(object));
}
EXPECT_TRUE(worklist_view.Push(object));
EXPECT_EQ(1U, worklist.GlobalPoolSize());
worklist_local.Push(object);
worklist_local.Publish();
EXPECT_EQ(1U, worklist.Size());
worklist.Clear();
EXPECT_TRUE(worklist.IsEmpty());
EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_EQ(0U, worklist.Size());
}
TEST(CppgcWorkListTest, SingleSegmentSteal) {
TestWorklist worklist;
TestWorklist::View worklist_view1(&worklist, 0);
TestWorklist::View worklist_view2(&worklist, 1);
TestWorklist::Local worklist_local1(&worklist);
TestWorklist::Local worklist_local2(&worklist);
SomeObject dummy;
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view1.Push(&dummy));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
worklist_local1.Push(&dummy);
}
SomeObject* retrieved = nullptr;
// One more push/pop to publish the full segment.
EXPECT_TRUE(worklist_view1.Push(nullptr));
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
worklist_local1.Push(nullptr);
EXPECT_TRUE(worklist_local1.Pop(&retrieved));
EXPECT_EQ(nullptr, retrieved);
EXPECT_EQ(1U, worklist.GlobalPoolSize());
EXPECT_EQ(1U, worklist.Size());
// Stealing.
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
EXPECT_TRUE(worklist_local2.Pop(&retrieved));
EXPECT_EQ(&dummy, retrieved);
EXPECT_FALSE(worklist_view1.Pop(&retrieved));
EXPECT_FALSE(worklist_local1.Pop(&retrieved));
}
EXPECT_TRUE(worklist.IsEmpty());
EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_EQ(0U, worklist.Size());
}
TEST(CppgcWorkListTest, MultipleSegmentsStolen) {
TestWorklist worklist;
TestWorklist::View worklist_view1(&worklist, 0);
TestWorklist::View worklist_view2(&worklist, 1);
TestWorklist::View worklist_view3(&worklist, 2);
TestWorklist::Local worklist_local1(&worklist);
TestWorklist::Local worklist_local2(&worklist);
TestWorklist::Local worklist_local3(&worklist);
SomeObject dummy1;
SomeObject dummy2;
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view1.Push(&dummy1));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
worklist_local1.Push(&dummy1);
}
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view1.Push(&dummy2));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
worklist_local1.Push(&dummy2);
}
SomeObject* retrieved = nullptr;
SomeObject dummy3;
// One more push/pop to publish the full segment.
EXPECT_TRUE(worklist_view1.Push(&dummy3));
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
worklist_local1.Push(&dummy3);
EXPECT_TRUE(worklist_local1.Pop(&retrieved));
EXPECT_EQ(&dummy3, retrieved);
EXPECT_EQ(2U, worklist.GlobalPoolSize());
EXPECT_EQ(2U, worklist.Size());
// Stealing.
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
EXPECT_TRUE(worklist_local2.Pop(&retrieved));
SomeObject* const expect_bag2 = retrieved;
EXPECT_TRUE(worklist_view3.Pop(&retrieved));
EXPECT_TRUE(worklist_local3.Pop(&retrieved));
SomeObject* const expect_bag3 = retrieved;
EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_EQ(0U, worklist.Size());
EXPECT_NE(expect_bag2, expect_bag3);
EXPECT_TRUE(expect_bag2 == &dummy1 || expect_bag2 == &dummy2);
EXPECT_TRUE(expect_bag3 == &dummy1 || expect_bag3 == &dummy2);
for (size_t i = 1; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
for (size_t i = 1; i < TestWorklist::kSegmentSize; i++) {
EXPECT_TRUE(worklist_local2.Pop(&retrieved));
EXPECT_EQ(expect_bag2, retrieved);
EXPECT_FALSE(worklist_view1.Pop(&retrieved));
EXPECT_FALSE(worklist_local1.Pop(&retrieved));
}
for (size_t i = 1; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view3.Pop(&retrieved));
for (size_t i = 1; i < TestWorklist::kSegmentSize; i++) {
EXPECT_TRUE(worklist_local3.Pop(&retrieved));
EXPECT_EQ(expect_bag3, retrieved);
EXPECT_FALSE(worklist_view1.Pop(&retrieved));
EXPECT_FALSE(worklist_local1.Pop(&retrieved));
}
EXPECT_TRUE(worklist.IsEmpty());
}
TEST(CppgcWorkListTest, MergeGlobalPool) {
TestWorklist worklist1;
TestWorklist::View worklist_view1(&worklist1, 0);
TestWorklist::Local worklist_local1(&worklist1);
SomeObject dummy;
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view1.Push(&dummy));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
worklist_local1.Push(&dummy);
}
SomeObject* retrieved = nullptr;
// One more push/pop to publish the full segment.
EXPECT_TRUE(worklist_view1.Push(nullptr));
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
worklist_local1.Push(nullptr);
EXPECT_TRUE(worklist_local1.Pop(&retrieved));
EXPECT_EQ(nullptr, retrieved);
EXPECT_EQ(1U, worklist1.GlobalPoolSize());
EXPECT_EQ(1U, worklist1.Size());
// Merging global pool into a new Worklist.
TestWorklist worklist2;
TestWorklist::View worklist_view2(&worklist2, 0);
EXPECT_EQ(0U, worklist2.GlobalPoolSize());
worklist2.MergeGlobalPool(&worklist1);
EXPECT_EQ(1U, worklist2.GlobalPoolSize());
TestWorklist::Local worklist_local2(&worklist2);
EXPECT_EQ(0U, worklist2.Size());
worklist2.Merge(&worklist1);
EXPECT_EQ(1U, worklist2.Size());
EXPECT_FALSE(worklist2.IsEmpty());
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
for (size_t i = 0; i < TestWorklist::kSegmentSize; i++) {
EXPECT_TRUE(worklist_local2.Pop(&retrieved));
EXPECT_EQ(&dummy, retrieved);
EXPECT_FALSE(worklist_view1.Pop(&retrieved));
EXPECT_FALSE(worklist_local1.Pop(&retrieved));
}
EXPECT_TRUE(worklist1.IsEmpty());
EXPECT_TRUE(worklist2.IsEmpty());

View File

@ -43,15 +43,12 @@ class ExpectWriteBarrierFires final : private IncrementalMarkingScope {
ExpectWriteBarrierFires(MarkerBase* marker,
std::initializer_list<void*> objects)
: IncrementalMarkingScope(marker),
marking_worklist_(
marker->MarkingWorklistsForTesting().marking_worklist(),
MarkingWorklists::kMutatorThreadId),
marking_worklist_(marker->MarkingStateForTesting().marking_worklist()),
write_barrier_worklist_(
marker->MarkingWorklistsForTesting().write_barrier_worklist(),
MarkingWorklists::kMutatorThreadId),
marker->MarkingStateForTesting().write_barrier_worklist()),
objects_(objects) {
EXPECT_TRUE(marking_worklist_.IsGlobalPoolEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalPoolEmpty());
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
for (void* object : objects) {
headers_.push_back(&HeapObjectHeader::FromPayload(object));
EXPECT_FALSE(headers_.back()->IsMarked());
@ -79,13 +76,13 @@ class ExpectWriteBarrierFires final : private IncrementalMarkingScope {
EXPECT_TRUE(header->IsMarked());
header->Unmark();
}
EXPECT_TRUE(marking_worklist_.IsGlobalPoolEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalPoolEmpty());
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
}
private:
MarkingWorklists::MarkingWorklist::View marking_worklist_;
MarkingWorklists::WriteBarrierWorklist::View write_barrier_worklist_;
MarkingWorklists::MarkingWorklist::Local& marking_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist_;
std::vector<void*> objects_;
std::vector<HeapObjectHeader*> headers_;
};
@ -95,14 +92,11 @@ class ExpectNoWriteBarrierFires final : private IncrementalMarkingScope {
ExpectNoWriteBarrierFires(MarkerBase* marker,
std::initializer_list<void*> objects)
: IncrementalMarkingScope(marker),
marking_worklist_(
marker->MarkingWorklistsForTesting().marking_worklist(),
MarkingWorklists::kMutatorThreadId),
marking_worklist_(marker->MarkingStateForTesting().marking_worklist()),
write_barrier_worklist_(
marker->MarkingWorklistsForTesting().write_barrier_worklist(),
MarkingWorklists::kMutatorThreadId) {
EXPECT_TRUE(marking_worklist_.IsGlobalPoolEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalPoolEmpty());
marker->MarkingStateForTesting().write_barrier_worklist()) {
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
for (void* object : objects) {
auto* header = &HeapObjectHeader::FromPayload(object);
headers_.emplace_back(header, header->IsMarked());
@ -110,16 +104,16 @@ class ExpectNoWriteBarrierFires final : private IncrementalMarkingScope {
}
~ExpectNoWriteBarrierFires() {
EXPECT_TRUE(marking_worklist_.IsGlobalPoolEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalPoolEmpty());
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
for (const auto& pair : headers_) {
EXPECT_EQ(pair.second, pair.first->IsMarked());
}
}
private:
MarkingWorklists::MarkingWorklist::View marking_worklist_;
MarkingWorklists::WriteBarrierWorklist::View write_barrier_worklist_;
MarkingWorklists::MarkingWorklist::Local& marking_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist_;
std::vector<std::pair<HeapObjectHeader*, bool /* was marked */>> headers_;
};