cppgc: Add HeapObjectHeader

This adds HeapObjectHeader, a meta object that is put in front of
every managed object. HeapObjectHeader provides accessors for:
1. GCInfoIndex
2. In construction bit
3. size
4. Mark bit

Meta info is distributed among two uint16_t fields as (1.,2.) and
(3.,4.). This is convenient as the non-bit accessors (size,
GCInfoIndex) are constant during marking.

Object layout see heap-object-header.h.

Note: The current implementation does not bypass ASAN poisoning and
assumes an unpoisoned header whenever performing an access.

Bug: chromium:1056170
Change-Id: I753f15467ed5c2b22b47e64d3aa5a3c1baddf8e1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2116031
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66861}
This commit is contained in:
Michael Lippautz 2020-03-25 16:59:13 +01:00 committed by Commit Bot
parent 8d47a28bae
commit 0a64a52b34
10 changed files with 583 additions and 0 deletions

View File

@ -3927,6 +3927,8 @@ v8_source_set("cppgc_base") {
visibility = [ ":*" ]
sources = [
"include/cppgc/allocation.h",
"include/cppgc/api-constants.h",
"include/cppgc/finalizer-trait.h",
"include/cppgc/gc-info.h",
"include/cppgc/platform.h",
@ -3934,6 +3936,9 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/gc-info-table.cc",
"src/heap/cppgc/gc-info-table.h",
"src/heap/cppgc/gc-info.cc",
"src/heap/cppgc/heap-object-header-inl.h",
"src/heap/cppgc/heap-object-header.cc",
"src/heap/cppgc/heap-object-header.h",
]
configs = [ ":internal_config" ]

View File

@ -0,0 +1,33 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_ALLOCATION_H_
#define INCLUDE_CPPGC_ALLOCATION_H_
#include <stdint.h>
#include <atomic>
#include "include/cppgc/api-constants.h"
namespace cppgc {
namespace internal {
// Marks an object as being fully constructed, resulting in precise handling
// by the garbage collector.
inline void MarkObjectAsFullyConstructed(const void* payload) {
// See api_constants for an explanation of the constants.
std::atomic<uint16_t>* atomic_mutable_bitfield =
reinterpret_cast<std::atomic<uint16_t>*>(
const_cast<uint16_t*>(reinterpret_cast<const uint16_t*>(
reinterpret_cast<const uint8_t*>(payload) -
api_constants::kFullyConstructedBitFieldOffsetFromPayload)));
uint16_t value = atomic_mutable_bitfield->load(std::memory_order_relaxed);
value = value | api_constants::kFullyConstructedBitMask;
atomic_mutable_bitfield->store(value, std::memory_order_release);
}
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_ALLOCATION_H_

View File

@ -0,0 +1,32 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_API_CONSTANTS_H_
#define INCLUDE_CPPGC_API_CONSTANTS_H_
#include <stddef.h>
#include <stdint.h>
#include "include/v8config.h"
namespace cppgc {
namespace internal {
// Internal constants to avoid exposing internal types on the API surface.
// DO NOT USE THESE CONSTANTS FROM USER CODE!
namespace api_constants {
// Offset of the uint16_t bitfield from the payload contaning the
// in-construction bit. This is subtracted from the payload pointer to get
// to the right bitfield.
static constexpr size_t kFullyConstructedBitFieldOffsetFromPayload =
2 * sizeof(uint16_t);
// Mask for in-construction bit.
static constexpr size_t kFullyConstructedBitMask = size_t{1};
} // namespace api_constants
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_API_CONSTANTS_H_

View File

@ -203,6 +203,16 @@ inline void CheckedDecrement(std::atomic<T>* number, T amount) {
USE(old);
}
template <typename T>
V8_INLINE std::atomic<T>* AsAtomicPtr(T* t) {
return reinterpret_cast<std::atomic<T>*>(t);
}
template <typename T>
V8_INLINE const std::atomic<T>* AsAtomicPtr(const T* t) {
return reinterpret_cast<const std::atomic<T>*>(t);
}
} // namespace base
} // namespace v8

32
src/heap/cppgc/globals.h Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_GLOBALS_H_
#define V8_HEAP_CPPGC_GLOBALS_H_
#include <stddef.h>
#include <stdint.h>
namespace cppgc {
namespace internal {
using Address = uint8_t*;
using ConstAddress = const uint8_t*;
// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
// do not fully support all alignment restrictions (following
// alignof(std::max_­align_­t)) but limit to alignof(double).
//
// This means that any scalar type with stricter alignment requirements (in
// practice: long double) cannot be used unrestricted in garbage-collected
// objects.
//
// Note: We use the same allocation granularity on 32-bit and 64-bit systems.
constexpr size_t kAllocationGranularity = 8;
constexpr size_t kAllocationMask = kAllocationGranularity - 1;
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_GLOBALS_H_

View File

@ -0,0 +1,144 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_
#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_
#include "include/cppgc/gc-info.h"
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
namespace internal {
// static
HeapObjectHeader& HeapObjectHeader::FromPayload(void* payload) {
return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(payload) -
sizeof(HeapObjectHeader));
}
// static
const HeapObjectHeader& HeapObjectHeader::FromPayload(const void* payload) {
return *reinterpret_cast<const HeapObjectHeader*>(
static_cast<ConstAddress>(payload) - sizeof(HeapObjectHeader));
}
HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
#if defined(V8_TARGET_ARCH_64_BIT)
USE(padding_);
#endif // defined(V8_TARGET_ARCH_64_BIT)
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & kAllocationMask);
DCHECK_GE(kMaxSize, size);
encoded_high_ = GCInfoIndexField::encode(gc_info_index);
encoded_low_ = EncodeSize(size);
DCHECK(IsInConstruction());
#ifdef DEBUG
CheckApiConstants();
#endif // DEBUG
}
Address HeapObjectHeader::Payload() const {
return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
sizeof(HeapObjectHeader);
}
template <HeapObjectHeader::AccessMode mode>
GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
return GCInfoIndexField::decode(encoded);
}
template <HeapObjectHeader::AccessMode mode>
size_t HeapObjectHeader::GetSize() const {
// Size is immutable after construction while either marking or sweeping
// is running so relaxed load (if mode == kAtomic) is enough.
uint16_t encoded_low_value =
LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
const size_t size = DecodeSize(encoded_low_value);
return size;
}
void HeapObjectHeader::SetSize(size_t size) {
DCHECK(!IsMarked());
encoded_low_ |= EncodeSize(size);
}
template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
return GetSize<mode>() == kLargeObjectSizeInHeader;
}
template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsInConstruction() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
return !FullyConstructedField::decode(encoded);
}
template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsMarked() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
return MarkBitField::decode(encoded);
}
template <HeapObjectHeader::AccessMode mode>
void HeapObjectHeader::Unmark() {
DCHECK(IsMarked<mode>());
StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
MarkBitField::encode(false), MarkBitField::kMask);
}
bool HeapObjectHeader::TryMarkAtomic() {
auto* atomic_encoded = v8::base::AsAtomicPtr(&encoded_low_);
uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed);
const uint16_t new_value = old_value | MarkBitField::encode(true);
if (new_value == old_value) {
return false;
}
return atomic_encoded->compare_exchange_strong(old_value, new_value,
std::memory_order_relaxed);
}
template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
const uint16_t& half =
part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
if (mode == AccessMode::kNonAtomic) return half;
return v8::base::AsAtomicPtr(&half)->load(memory_order);
}
template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
// Caveat: Not all changes to HeapObjectHeader's bitfields go through
// StoreEncoded. The following have their own implementations and need to be
// kept in sync:
// - HeapObjectHeader::TryMarkAtomic
// - MarkObjectAsFullyConstructed (API)
DCHECK_EQ(0u, bits & ~mask);
uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
if (mode == AccessMode::kNonAtomic) {
half = (half & ~mask) | bits;
return;
}
// We don't perform CAS loop here assuming that only none of the info that
// shares the same encoded halfs change at the same time.
auto* atomic_encoded = v8::base::AsAtomicPtr(&half);
uint16_t value = atomic_encoded->load(std::memory_order_relaxed);
value = (value & ~mask) | bits;
atomic_encoded->store(value, memory_order);
}
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_

View File

@ -0,0 +1,20 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/heap-object-header.h"
#include "src/base/macros.h"
namespace cppgc {
namespace internal {
void HeapObjectHeader::CheckApiConstants() {
STATIC_ASSERT(api_constants::kFullyConstructedBitMask ==
FullyConstructedField::kMask);
STATIC_ASSERT(api_constants::kFullyConstructedBitFieldOffsetFromPayload ==
(sizeof(encoded_high_) + sizeof(encoded_low_)));
}
} // namespace internal
} // namespace cppgc

View File

@ -0,0 +1,125 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
#include <stdint.h>
#include <atomic>
#include "include/cppgc/api-constants.h"
#include "include/cppgc/gc-info.h"
#include "src/base/bit-field.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
namespace internal {
// HeapObjectHeader contains meta data per object and is prepended to each
// object.
//
// +-----------------+------+------------------------------------------+
// | name | bits | |
// +-----------------+------+------------------------------------------+
// | padding | 32 | Only present on 64-bit platform. |
// +-----------------+------+------------------------------------------+
// | GCInfoIndex | 14 | |
// | unused | 1 | |
// | in construction | 1 | In construction encoded as |false|. |
// +-----------------+------+------------------------------------------+
// | size | 14 | 17 bits because allocations are aligned. |
// | unused | 1 | |
// | mark bit | 1 | |
// +-----------------+------+------------------------------------------+
//
// Notes:
// - See |GCInfoTable| for constraints on GCInfoIndex.
// - |size| for regular objects is encoded with 14 bits but can actually
// represent sizes up to |kBlinkPageSize| (2^17) because allocations are
// always 8 byte aligned (see kAllocationGranularity).
// - |size| for large objects is encoded as 0. The size of a large object is
// stored in |LargeObjectPage::PayloadSize()|.
// - |mark bit| and |in construction| bits are located in separate 16-bit halves
// to allow potentially accessing them non-atomically.
class HeapObjectHeader final {
public:
enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
static constexpr size_t kSizeLog2 = 17;
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
inline static HeapObjectHeader& FromPayload(void* address);
inline static const HeapObjectHeader& FromPayload(const void* address);
inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index);
// The payload starts directly after the HeapObjectHeader.
inline Address Payload() const;
template <AccessMode mode = AccessMode::kNonAtomic>
inline GCInfoIndex GetGCInfoIndex() const;
template <AccessMode mode = AccessMode::kNonAtomic>
inline size_t GetSize() const;
inline void SetSize(size_t size);
template <AccessMode mode = AccessMode::kNonAtomic>
inline bool IsLargeObject() const;
template <AccessMode = AccessMode::kNonAtomic>
bool IsInConstruction() const;
// Use MarkObjectAsFullyConstructed() to mark an object as being constructed.
template <AccessMode = AccessMode::kNonAtomic>
bool IsMarked() const;
template <AccessMode = AccessMode::kNonAtomic>
void Unmark();
inline bool TryMarkAtomic();
private:
enum class EncodedHalf : uint8_t { kLow, kHigh };
static constexpr uint16_t kLargeObjectSizeInHeader = 0;
// Used in |encoded_high_|.
using FullyConstructedField = v8::base::BitField16<bool, 0, 1>;
using UnusedField1 = FullyConstructedField::Next<bool, 1>;
using GCInfoIndexField = UnusedField1::Next<GCInfoIndex, 14>;
// Used in |encoded_low_|.
using MarkBitField = v8::base::BitField16<bool, 0, 1>;
using UnusedField2 = MarkBitField::Next<bool, 1>;
using SizeField = void; // Use EncodeSize/DecodeSize instead.
static constexpr size_t DecodeSize(uint16_t encoded) {
// Essentially, gets optimized to << 1.
using SizeField = UnusedField2::Next<size_t, 14>;
return SizeField::decode(encoded) * kAllocationGranularity;
}
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
using SizeField = UnusedField2::Next<size_t, 14>;
return SizeField::encode(size) / kAllocationGranularity;
}
V8_EXPORT_PRIVATE void CheckApiConstants();
template <AccessMode, EncodedHalf part,
std::memory_order memory_order = std::memory_order_seq_cst>
inline uint16_t LoadEncoded() const;
template <AccessMode mode, EncodedHalf part,
std::memory_order memory_order = std::memory_order_seq_cst>
inline void StoreEncoded(uint16_t bits, uint16_t mask);
#if defined(V8_TARGET_ARCH_64_BIT)
uint32_t padding_ = 0;
#endif // defined(V8_TARGET_ARCH_64_BIT)
uint16_t encoded_high_;
uint16_t encoded_low_;
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_

View File

@ -46,6 +46,7 @@ v8_source_set("cppgc_unittests_sources") {
sources = [
"heap/cppgc/finalizer-trait_unittest.cc",
"heap/cppgc/gc-info_unittest.cc",
"heap/cppgc/heap-object-header_unittest.cc",
]
configs = [

View File

@ -0,0 +1,181 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/heap-object-header.h"
#include <atomic>
#include <memory>
#include "include/cppgc/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cppgc {
namespace internal {
TEST(HeapObjectHeaderTest, Constructor) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(kSize, header.GetSize());
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
EXPECT_TRUE(header.IsInConstruction());
EXPECT_FALSE(header.IsMarked());
}
TEST(HeapObjectHeaderTest, Payload) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(reinterpret_cast<ConstAddress>(&header) + sizeof(HeapObjectHeader),
header.Payload());
}
TEST(HeapObjectHeaderTest, GetGCInfoIndex) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
EXPECT_EQ(kGCInfoIndex,
header.GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, GetSize) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity * 23;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(kSize, header.GetSize());
EXPECT_EQ(kSize, header.GetSize<HeapObjectHeader::AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, IsLargeObject) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity * 23;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(false, header.IsLargeObject());
EXPECT_EQ(false,
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>());
HeapObjectHeader large_header(0, kGCInfoIndex + 1);
EXPECT_EQ(true, large_header.IsLargeObject());
EXPECT_EQ(
true,
large_header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, MarkObjectAsFullyConstructed) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_TRUE(header.IsInConstruction());
MarkObjectAsFullyConstructed(&header + 1);
EXPECT_FALSE(header.IsInConstruction());
// Size shares the same bitfield and should be unaffected by
// MarkObjectAsFullyConstructed.
EXPECT_EQ(kSize, header.GetSize());
}
TEST(HeapObjectHeaderTest, TryMark) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity * 7;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_FALSE(header.IsMarked());
EXPECT_TRUE(header.TryMarkAtomic());
// GCInfoIndex shares the same bitfield and should be unaffected by
// TryMarkAtomic.
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
EXPECT_FALSE(header.TryMarkAtomic());
// GCInfoIndex shares the same bitfield and should be unaffected by
// TryMarkAtomic.
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
EXPECT_TRUE(header.IsMarked());
}
TEST(HeapObjectHeaderTest, Unmark) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity * 7;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_FALSE(header.IsMarked());
EXPECT_TRUE(header.TryMarkAtomic());
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
EXPECT_TRUE(header.IsMarked());
header.Unmark();
// GCInfoIndex shares the same bitfield and should be unaffected by Unmark.
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
EXPECT_FALSE(header.IsMarked());
HeapObjectHeader header2(kSize, kGCInfoIndex);
EXPECT_FALSE(header2.IsMarked());
EXPECT_TRUE(header2.TryMarkAtomic());
EXPECT_TRUE(header2.IsMarked());
header2.Unmark<HeapObjectHeader::AccessMode::kAtomic>();
// GCInfoIndex shares the same bitfield and should be unaffected by Unmark.
EXPECT_EQ(kGCInfoIndex, header2.GetGCInfoIndex());
EXPECT_FALSE(header2.IsMarked());
}
namespace {
struct Payload {
volatile size_t value{5};
};
class ConcurrentGCThread final : public v8::base::Thread {
public:
explicit ConcurrentGCThread(HeapObjectHeader* header, Payload* payload)
: v8::base::Thread(Options("Thread accessing object.")),
header_(header),
payload_(payload) {}
void Run() final {
while (header_->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
}
v8::base::AsAtomicPtr(const_cast<size_t*>(&payload_->value))
->load(std::memory_order_relaxed);
}
private:
HeapObjectHeader* header_;
Payload* payload_;
};
} // namespace
TEST(HeapObjectHeaderTest, ConstructionBitProtectsNonAtomicWrites) {
// Object publishing: Test checks that non-atomic stores in the payload can be
// guarded using MarkObjectAsFullyConstructed/IsInConstruction. The test
// relies on TSAN to find data races.
constexpr size_t kSize =
(sizeof(HeapObjectHeader) + sizeof(Payload) + kAllocationMask) &
~kAllocationMask;
typename std::aligned_storage<kSize, kAllocationGranularity>::type data;
HeapObjectHeader* header = new (&data) HeapObjectHeader(kSize, 1);
ConcurrentGCThread gc_thread(header,
reinterpret_cast<Payload*>(header->Payload()));
CHECK(gc_thread.Start());
new (header->Payload()) Payload();
MarkObjectAsFullyConstructed(header->Payload());
gc_thread.Join();
}
#ifdef DEBUG
TEST(HeapObjectHeaderDeathTest, ConstructorTooLargeSize) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = HeapObjectHeader::kMaxSize + 1;
EXPECT_DEATH(HeapObjectHeader header(kSize, kGCInfoIndex), "");
}
TEST(HeapObjectHeaderDeathTest, ConstructorTooLargeGCInfoIndex) {
constexpr GCInfoIndex kGCInfoIndex = GCInfoTable::kMaxIndex + 1;
constexpr size_t kSize = kAllocationGranularity;
EXPECT_DEATH(HeapObjectHeader header(kSize, kGCInfoIndex), "");
}
#endif // DEBUG
} // namespace internal
} // namespace cppgc