cppgc: Rework GC info creation

Previously, GCInfoTrait relied on the non-trivial constructor of a
static object for registering a new GCInfo object. The generated code
is required to be thread-safe which is achieved by introducing guard
variables in the compiler.

The new version is similar to Blink in that it relies on zero
initialization of a trivially constructible atomic.

Compared to guard variables that are created per GCInfo registration,
the atomic creates less bloat (~20bytes/type) and also results in a
better fast path.

Minimum example: https://godbolt.org/z/qrdTf8

Bug: chromium:1056170
Change-Id: I95efbbf035b655d0440c9477f5391e310e2b71fa
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2764750
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73463}
This commit is contained in:
Michael Lippautz 2021-03-17 11:16:16 +01:00 committed by Commit Bot
parent 8ea1fd79b3
commit d76064df4f
5 changed files with 78 additions and 52 deletions

View File

@ -5,7 +5,8 @@
#ifndef INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
#define INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
#include <stdint.h>
#include <atomic>
#include <cstdint>
#include "cppgc/internal/finalizer-trait.h"
#include "cppgc/internal/name-trait.h"
@ -17,16 +18,11 @@ namespace internal {
using GCInfoIndex = uint16_t;
class V8_EXPORT RegisteredGCInfoIndex final {
public:
RegisteredGCInfoIndex(FinalizationCallback finalization_callback,
TraceCallback trace_callback,
NameCallback name_callback, bool has_v_table);
GCInfoIndex GetIndex() const { return index_; }
private:
const GCInfoIndex index_;
};
// Acquires a new GC info object and returns the index. In addition, also
// updates `registered_index` atomically.
V8_EXPORT GCInfoIndex
EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
FinalizationCallback, TraceCallback, NameCallback, bool);
// Fold types based on finalizer behavior. Note that finalizer characteristics
// align with trace behavior, i.e., destructors are virtual when trace methods
@ -59,13 +55,17 @@ struct GCInfoFolding {
// Trait determines how the garbage collector treats objects wrt. to traversing,
// finalization, and naming.
template <typename T>
struct GCInfoTrait {
struct GCInfoTrait final {
static GCInfoIndex Index() {
static_assert(sizeof(T), "T must be fully defined");
static const RegisteredGCInfoIndex registered_index(
FinalizerTrait<T>::kCallback, TraceTrait<T>::Trace,
NameTrait<T>::GetName, std::is_polymorphic<T>::value);
return registered_index.GetIndex();
static std::atomic<GCInfoIndex>
registered_index; // Uses zero initialization.
const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
return index ? index
: EnsureGCInfoIndex(
registered_index, FinalizerTrait<T>::kCallback,
TraceTrait<T>::Trace, NameTrait<T>::GetName,
std::is_polymorphic<T>::value);
}
};

View File

@ -125,11 +125,19 @@ void GCInfoTable::CheckMemoryIsZeroed(uintptr_t* base, size_t len) {
#endif // DEBUG
}
GCInfoIndex GCInfoTable::RegisterNewGCInfo(const GCInfo& info) {
GCInfoIndex GCInfoTable::RegisterNewGCInfo(
std::atomic<GCInfoIndex>& registered_index, const GCInfo& info) {
// Ensuring a new index involves current index adjustment as well as
// potentially resizing the table. For simplicity we use a lock.
v8::base::MutexGuard guard(&table_mutex_);
// Check the registered index again after taking the lock as some other
// thread may have registered the info at the same time.
GCInfoIndex index = registered_index.load(std::memory_order_relaxed);
if (index) {
return index;
}
if (current_index_ == limit_) {
Resize();
}
@ -137,6 +145,7 @@ GCInfoIndex GCInfoTable::RegisterNewGCInfo(const GCInfo& info) {
GCInfoIndex new_index = current_index_++;
CHECK_LT(new_index, GCInfoTable::kMaxIndex);
table_[new_index] = info;
registered_index.store(new_index, std::memory_order_release);
return new_index;
}

View File

@ -54,7 +54,7 @@ class V8_EXPORT GCInfoTable final {
GCInfoTable(const GCInfoTable&) = delete;
GCInfoTable& operator=(const GCInfoTable&) = delete;
GCInfoIndex RegisterNewGCInfo(const GCInfo& info);
GCInfoIndex RegisterNewGCInfo(std::atomic<uint16_t>&, const GCInfo& info);
const GCInfo& GCInfoFromIndex(GCInfoIndex index) const {
DCHECK_GE(index, kMinIndex);

View File

@ -9,12 +9,14 @@
namespace cppgc {
namespace internal {
RegisteredGCInfoIndex::RegisteredGCInfoIndex(
FinalizationCallback finalization_callback, TraceCallback trace_callback,
NameCallback name_callback, bool has_v_table)
: index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
{finalization_callback, trace_callback, name_callback,
has_v_table})) {}
GCInfoIndex EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
FinalizationCallback finalization_callback,
TraceCallback trace_callback,
NameCallback name_callback, bool has_v_table) {
return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, name_callback, has_v_table});
}
} // namespace internal
} // namespace cppgc

View File

@ -20,56 +20,72 @@ namespace {
constexpr GCInfo GetEmptyGCInfo() { return {nullptr, nullptr, nullptr, false}; }
class GCInfoTableTest : public ::testing::Test {
public:
GCInfoIndex RegisterNewGCInfoForTesting(const GCInfo& info) {
// Unused registered index will result in registering a new index.
std::atomic<GCInfoIndex> registered_index{0};
return table().RegisterNewGCInfo(registered_index, info);
}
void SetUp() override {
table_ = std::make_unique<GCInfoTable>(&page_allocator_);
}
void TearDown() override { table_.reset(); }
GCInfoTable& table() { return *table_; }
const GCInfoTable& table() const { return *table_; }
private:
v8::base::PageAllocator page_allocator_;
std::unique_ptr<GCInfoTable> table_;
};
using GCInfoTableDeathTest = GCInfoTableTest;
} // namespace
TEST(GCInfoTableTest, InitialEmpty) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
EXPECT_EQ(GCInfoTable::kMinIndex, table.NumberOfGCInfos());
TEST_F(GCInfoTableTest, InitialEmpty) {
EXPECT_EQ(GCInfoTable::kMinIndex, table().NumberOfGCInfos());
}
TEST(GCInfoTableTest, ResizeToMaxIndex) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
TEST_F(GCInfoTableTest, ResizeToMaxIndex) {
GCInfo info = GetEmptyGCInfo();
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
GCInfoIndex index = table.RegisterNewGCInfo(info);
GCInfoIndex index = RegisterNewGCInfoForTesting(info);
EXPECT_EQ(i, index);
}
}
TEST(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
TEST_F(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
GCInfo info = GetEmptyGCInfo();
// Create GCInfoTable::kMaxIndex entries.
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
table.RegisterNewGCInfo(info);
RegisterNewGCInfoForTesting(info);
}
EXPECT_DEATH_IF_SUPPORTED(table.RegisterNewGCInfo(info), "");
EXPECT_DEATH_IF_SUPPORTED(RegisterNewGCInfoForTesting(info), "");
}
TEST(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
TEST_F(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
GCInfo info = GetEmptyGCInfo();
// Use up all slots until limit.
GCInfoIndex limit = table.LimitForTesting();
GCInfoIndex limit = table().LimitForTesting();
// Bail out if initial limit is already the maximum because of large committed
// pages. In this case, nothing can be comitted as read-only.
if (limit == GCInfoTable::kMaxIndex) {
return;
}
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < limit; i++) {
table.RegisterNewGCInfo(info);
RegisterNewGCInfoForTesting(info);
}
EXPECT_EQ(limit, table.LimitForTesting());
table.RegisterNewGCInfo(info);
EXPECT_NE(limit, table.LimitForTesting());
EXPECT_EQ(limit, table().LimitForTesting());
RegisterNewGCInfoForTesting(info);
EXPECT_NE(limit, table().LimitForTesting());
// Old area is now read-only.
auto& first_slot = table.TableSlotForTesting(GCInfoTable::kMinIndex);
auto& first_slot = table().TableSlotForTesting(GCInfoTable::kMinIndex);
EXPECT_DEATH_IF_SUPPORTED(first_slot.finalize = nullptr, "");
}
@ -85,8 +101,9 @@ class ThreadRegisteringGCInfoObjects final : public v8::base::Thread {
void Run() final {
GCInfo info = GetEmptyGCInfo();
std::atomic<GCInfoIndex> registered_index;
for (GCInfoIndex i = 0; i < num_registrations_; i++) {
table_->RegisterNewGCInfo(info);
table_->RegisterNewGCInfo(registered_index, info);
}
}
@ -97,7 +114,7 @@ class ThreadRegisteringGCInfoObjects final : public v8::base::Thread {
} // namespace
TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
TEST_F(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
constexpr size_t num_threads = 4;
constexpr size_t main_thread_initialized = 2;
constexpr size_t gc_infos_to_register =
@ -107,17 +124,15 @@ TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
"must sum up to kMaxIndex");
constexpr size_t gc_infos_per_thread = gc_infos_to_register / num_threads;
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
GCInfo info = GetEmptyGCInfo();
for (size_t i = 0; i < main_thread_initialized; i++) {
table.RegisterNewGCInfo(info);
RegisterNewGCInfoForTesting(info);
}
v8::base::Thread* threads[num_threads];
for (size_t i = 0; i < num_threads; i++) {
threads[i] =
new ThreadRegisteringGCInfoObjects(&table, gc_infos_per_thread);
new ThreadRegisteringGCInfoObjects(&table(), gc_infos_per_thread);
}
for (size_t i = 0; i < num_threads; i++) {
CHECK(threads[i]->Start());