cppgc: Move caged heap into a separate class

This also introduces CagedHeapLocalData.

Bug: chromium:1029379
Change-Id: Ice04fe5ad7daa02f17ad107e78e53bdd32479737
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2246560
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68358}
This commit is contained in:
Anton Bikineev 2020-06-15 20:57:06 +02:00 committed by Commit Bot
parent 30c60f38d1
commit 3a929dfa1c
10 changed files with 217 additions and 56 deletions

View File

@ -4186,6 +4186,14 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/write-barrier.cc",
]
if (cppgc_enable_caged_heap) {
sources += [
"include/cppgc/internal/caged-heap-local-data.h",
"src/heap/cppgc/caged-heap.cc",
"src/heap/cppgc/caged-heap.h",
]
}
if (is_clang || !is_win) {
if (target_cpu == "x64") {
sources += [ "src/heap/cppgc/asm/x64/push_registers_asm.cc" ]

View File

@ -17,6 +17,11 @@ namespace internal {
// Internal constants to avoid exposing internal types on the API surface.
namespace api_constants {
constexpr size_t kKB = 1024;
constexpr size_t kMB = kKB * 1024;
constexpr size_t kGB = kMB * 1024;
// Offset of the uint16_t bitfield from the payload contaning the
// in-construction bit. This is subtracted from the payload pointer to get
// to the right bitfield.
@ -29,6 +34,11 @@ static constexpr size_t kPageSize = size_t{1} << 17;
static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
#if defined(CPPGC_CAGED_HEAP)
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
#endif
} // namespace api_constants
} // namespace internal

View File

@ -0,0 +1,18 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
namespace cppgc {
namespace internal {
struct CagedHeapLocalData final {
bool is_marking_in_progress = false;
};
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_

View File

@ -5,27 +5,47 @@
#ifndef INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_
#define INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/process-heap.h"
#include "v8config.h" // NOLINT(build/include_directory)
#if defined(CPPGC_CAGED_HEAP)
#include "cppgc/internal/caged-heap-local-data.h"
#endif
namespace cppgc {
namespace internal {
class BasePage;
class Heap;
class V8_EXPORT WriteBarrier final {
public:
static V8_INLINE void MarkingBarrier(const void* slot, const void* value) {
#if defined(CPPGC_CAGED_HEAP)
const uintptr_t start =
reinterpret_cast<uintptr_t>(value) &
~(api_constants::kCagedHeapReservationAlignment - 1);
const uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) - start;
if (slot_offset > api_constants::kCagedHeapReservationSize) {
// Check if slot is on stack or value is sentinel or nullptr.
return;
}
CagedHeapLocalData* local_data =
reinterpret_cast<CagedHeapLocalData*>(start);
if (V8_LIKELY(!local_data->is_marking_in_progress)) return;
MarkingBarrierSlow(value);
#else
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) return;
MarkingBarrierSlow(slot, value);
MarkingBarrierSlowWithSentinelCheck(value);
#endif // CPPGC_CAGED_HEAP
}
private:
WriteBarrier() = delete;
static void MarkingBarrierSlow(const void* slot, const void* value);
static void MarkingBarrierSlow(const void* value);
static void MarkingBarrierSlowWithSentinelCheck(const void* value);
};
} // namespace internal

View File

@ -0,0 +1,70 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if !defined(CPPGC_CAGED_HEAP)
#error "Must be compiled with caged heap enabled"
#endif
#include "src/heap/cppgc/caged-heap.h"
#include "include/cppgc/internal/caged-heap-local-data.h"
#include "src/base/bounded-page-allocator.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
namespace internal {
namespace {
VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
DCHECK_EQ(0u,
kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
static constexpr size_t kAllocationTries = 4;
for (size_t i = 0; i < kAllocationTries; ++i) {
void* hint = reinterpret_cast<void*>(RoundDown(
reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
kCagedHeapReservationAlignment));
VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
kCagedHeapReservationAlignment, hint);
if (memory.IsReserved()) return memory;
}
FATAL("Fatal process out of memory: Failed to reserve memory for caged heap");
UNREACHABLE();
}
std::unique_ptr<CagedHeap::AllocatorType> CreateBoundedAllocator(
v8::PageAllocator* platform_allocator, void* caged_heap_start) {
DCHECK(caged_heap_start);
auto start =
reinterpret_cast<CagedHeap::AllocatorType::Address>(caged_heap_start);
return std::make_unique<CagedHeap::AllocatorType>(
platform_allocator, start, kCagedHeapReservationSize, kPageSize);
}
} // namespace
CagedHeap::CagedHeap(PageAllocator* platform_allocator)
: reserved_area_(ReserveCagedHeap(platform_allocator)) {
void* caged_heap_start = reserved_area_.address();
CHECK(platform_allocator->SetPermissions(
reserved_area_.address(),
RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()),
PageAllocator::kReadWrite));
new (reserved_area_.address()) CagedHeapLocalData;
caged_heap_start = reinterpret_cast<void*>(
RoundUp(reinterpret_cast<uintptr_t>(caged_heap_start) +
sizeof(CagedHeapLocalData),
kPageSize));
bounded_allocator_ =
CreateBoundedAllocator(platform_allocator, caged_heap_start);
}
} // namespace internal
} // namespace cppgc

View File

@ -0,0 +1,47 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_
#define V8_HEAP_CPPGC_CAGED_HEAP_H_
#include <memory>
#include "include/cppgc/platform.h"
#include "src/base/bounded-page-allocator.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/virtual-memory.h"
namespace cppgc {
namespace internal {
struct CagedHeapLocalData;
class CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
explicit CagedHeap(PageAllocator* platform_allocator);
CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete;
AllocatorType& allocator() { return *bounded_allocator_; }
const AllocatorType& allocator() const { return *bounded_allocator_; }
CagedHeapLocalData& local_data() {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
}
const CagedHeapLocalData& local_data() const {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
}
private:
VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_CAGED_HEAP_H_

View File

@ -50,38 +50,6 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
size_t accumulated_size_ = 0;
};
#if defined(CPPGC_CAGED_HEAP)
VirtualMemory ReserveCagedHeap(v8::PageAllocator* platform_allocator) {
DCHECK_EQ(0u,
kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
static constexpr size_t kAllocationTries = 4;
for (size_t i = 0; i < kAllocationTries; ++i) {
void* hint = reinterpret_cast<void*>(RoundDown(
reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
kCagedHeapReservationAlignment));
VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
kCagedHeapReservationAlignment, hint);
if (memory.IsReserved()) return memory;
}
FATAL("Fatal process out of memory: Failed to reserve memory for caged heap");
UNREACHABLE();
}
std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
v8::PageAllocator* platform_allocator, void* caged_heap_start) {
DCHECK(caged_heap_start);
auto start = reinterpret_cast<v8::base::BoundedPageAllocator::Address>(
caged_heap_start);
return std::make_unique<v8::base::BoundedPageAllocator>(
platform_allocator, start, kCagedHeapReservationSize, kPageSize);
}
#endif
} // namespace
HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
@ -89,10 +57,8 @@ HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
#if defined(CPPGC_CAGED_HEAP)
reserved_area_(ReserveCagedHeap(platform_->GetPageAllocator())),
bounded_allocator_(CreateBoundedAllocator(platform_->GetPageAllocator(),
reserved_area_.address())),
page_backend_(std::make_unique<PageBackend>(bounded_allocator_.get())),
caged_heap_(platform_->GetPageAllocator()),
page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
#else
page_backend_(
std::make_unique<PageBackend>(platform_->GetPageAllocator())),

View File

@ -13,14 +13,9 @@
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h"
#include "src/heap/cppgc/virtual-memory.h"
#if defined(CPPGC_CAGED_HEAP)
namespace v8 {
namespace base {
class BoundedPageAllocator;
}
} // namespace v8
#include "src/heap/cppgc/caged-heap.h"
#endif
namespace cppgc {
@ -78,6 +73,11 @@ class V8_EXPORT_PRIVATE HeapBase {
return stats_collector_.get();
}
#if defined(CPPGC_CAGED_HEAP)
CagedHeap& caged_heap() { return caged_heap_; }
const CagedHeap& caged_heap() const { return caged_heap_; }
#endif
Stack* stack() { return stack_.get(); }
PreFinalizerHandler* prefinalizer_handler() {
@ -111,10 +111,7 @@ class V8_EXPORT_PRIVATE HeapBase {
RawHeap raw_heap_;
std::shared_ptr<cppgc::Platform> platform_;
#if defined(CPPGC_CAGED_HEAP)
// The order is important: page_backend_ must be destroyed before
// reserved_area_ is freed.
VirtualMemory reserved_area_;
std::unique_ptr<v8::base::BoundedPageAllocator> bounded_allocator_;
CagedHeap caged_heap_;
#endif
std::unique_ptr<PageBackend> page_backend_;

View File

@ -12,25 +12,35 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/stats-collector.h"
#if defined(CPPGC_CAGED_HEAP)
#include "include/cppgc/internal/caged-heap-local-data.h"
#endif
namespace cppgc {
namespace internal {
namespace {
void EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config) {
void EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config, Heap* heap) {
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::EnterIncrementalOrConcurrentMarking();
}
#if defined(CPPGC_CAGED_HEAP)
heap->caged_heap().local_data().is_marking_in_progress = true;
#endif
}
void ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config) {
void ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config, Heap* heap) {
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::ExitIncrementalOrConcurrentMarking();
}
#if defined(CPPGC_CAGED_HEAP)
heap->caged_heap().local_data().is_marking_in_progress = false;
#endif
}
template <typename Worklist, typename Callback>
@ -86,11 +96,11 @@ void Marker::StartMarking(MarkingConfig config) {
config_ = config;
VisitRoots();
EnterIncrementalMarkingIfNeeded(config);
EnterIncrementalMarkingIfNeeded(config, heap());
}
void Marker::FinishMarking(MarkingConfig config) {
ExitIncrementalMarkingIfNeeded(config_);
ExitIncrementalMarkingIfNeeded(config_, heap());
config_ = config;
// Reset LABs before trying to conservatively mark in-construction objects.

View File

@ -5,6 +5,7 @@
#include "include/cppgc/internal/write-barrier.h"
#include "include/cppgc/internal/pointer-policies.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page-inl.h"
@ -12,12 +13,22 @@
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/marking-visitor.h"
#if defined(CPPGC_CAGED_HEAP)
#include "include/cppgc/internal/caged-heap-local-data.h"
#endif
namespace cppgc {
namespace internal {
namespace {
void MarkValue(const BasePage* page, Marker* marker, const void* value) {
#if defined(CPPGC_CAGED_HEAP)
DCHECK(reinterpret_cast<CagedHeapLocalData*>(
reinterpret_cast<uintptr_t>(value) &
~(kCagedHeapReservationAlignment - 1))
->is_marking_in_progress);
#endif
auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
if (!header.TryMarkAtomic()) return;
@ -41,9 +52,13 @@ void MarkValue(const BasePage* page, Marker* marker, const void* value) {
} // namespace
void WriteBarrier::MarkingBarrierSlow(const void*, const void* value) {
void WriteBarrier::MarkingBarrierSlowWithSentinelCheck(const void* value) {
if (!value || value == kSentinelPointer) return;
MarkingBarrierSlow(value);
}
void WriteBarrier::MarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap();