Reland "cppgc: Add page memory allocation backend"
This is a port of src/components/gc that was added recently.
Differences:
- Added back bucketing to the page pool, as that guarantees that
arenas used for specific types do not have their pages used by other
arenas.
- Replaced base::flat_map with std::map. This may cause performance
regressions when using PageMemoryRegionTree in hot paths. A
vector-like representation may be used to fix such a regression
This reverts commit a056cea51e
.
Bug: chromium:1056170
Change-Id: Iffb8b0d91c8cca1815d7a1cda9486e7716aea75f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2144060
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67117}
This commit is contained in:
parent
2db93c0233
commit
67ea621133
3
BUILD.gn
3
BUILD.gn
@ -3975,6 +3975,9 @@ v8_source_set("cppgc_base") {
|
||||
"src/heap/cppgc/heap.cc",
|
||||
"src/heap/cppgc/heap.h",
|
||||
"src/heap/cppgc/liveness-broker.cc",
|
||||
"src/heap/cppgc/page-memory-inl.h",
|
||||
"src/heap/cppgc/page-memory.cc",
|
||||
"src/heap/cppgc/page-memory.h",
|
||||
"src/heap/cppgc/platform.cc",
|
||||
"src/heap/cppgc/pointer-policies.cc",
|
||||
"src/heap/cppgc/sanitizers.h",
|
||||
|
@ -31,6 +31,10 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
|
||||
constexpr size_t kPageOffsetMask = kPageSize - 1;
|
||||
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
|
||||
|
||||
// Guard pages are always put into memory. Whether they are actually protected
|
||||
// depends on the allocator provided to the garbage collector.
|
||||
constexpr size_t kGuardPageSize = 4096;
|
||||
|
||||
constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
|
||||
|
||||
} // namespace internal
|
||||
|
57
src/heap/cppgc/page-memory-inl.h
Normal file
57
src/heap/cppgc/page-memory-inl.h
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
|
||||
#define V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
|
||||
|
||||
#include "src/heap/cppgc/page-memory.h"
|
||||
|
||||
namespace cppgc {
|
||||
namespace internal {
|
||||
|
||||
// Returns true if the provided allocator supports committing at the required
|
||||
// granularity.
|
||||
inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
|
||||
return kGuardPageSize % allocator->CommitPageSize() == 0;
|
||||
}
|
||||
|
||||
Address NormalPageMemoryRegion::Lookup(Address address) const {
|
||||
size_t index = GetIndex(address);
|
||||
if (!page_memories_in_use_[index]) return nullptr;
|
||||
const MemoryRegion writeable_region = GetPageMemory(index).writeable_region();
|
||||
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
|
||||
}
|
||||
|
||||
Address LargePageMemoryRegion::Lookup(Address address) const {
|
||||
const MemoryRegion writeable_region = GetPageMemory().writeable_region();
|
||||
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
|
||||
}
|
||||
|
||||
Address PageMemoryRegion::Lookup(Address address) const {
|
||||
DCHECK(reserved_region().Contains(address));
|
||||
return is_large()
|
||||
? static_cast<const LargePageMemoryRegion*>(this)->Lookup(address)
|
||||
: static_cast<const NormalPageMemoryRegion*>(this)->Lookup(
|
||||
address);
|
||||
}
|
||||
|
||||
PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
|
||||
auto it = set_.upper_bound(address);
|
||||
// This check also covers set_.size() > 0, since for empty vectors it is
|
||||
// guaranteed that begin() == end().
|
||||
if (it == set_.begin()) return nullptr;
|
||||
auto* result = std::next(it, -1)->second;
|
||||
if (address < result->reserved_region().end()) return result;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Address PageBackend::Lookup(Address address) const {
|
||||
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
|
||||
return pmr ? pmr->Lookup(address) : nullptr;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace cppgc
|
||||
|
||||
#endif // V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
|
211
src/heap/cppgc/page-memory.cc
Normal file
211
src/heap/cppgc/page-memory.cc
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/cppgc/page-memory.h"
|
||||
|
||||
#include "src/base/macros.h"
|
||||
#include "src/heap/cppgc/page-memory-inl.h"
|
||||
|
||||
namespace cppgc {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
|
||||
if (SupportsCommittingGuardPages(allocator)) {
|
||||
CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
|
||||
page_memory.writeable_region().size(),
|
||||
PageAllocator::Permission::kReadWrite));
|
||||
} else {
|
||||
// No protection in case the allocator cannot commit at the required
|
||||
// granularity. Only protect if the allocator supports committing at that
|
||||
// granularity.
|
||||
//
|
||||
// The allocator needs to support committing the overall range.
|
||||
CHECK_EQ(0u,
|
||||
page_memory.overall_region().size() % allocator->CommitPageSize());
|
||||
CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
|
||||
page_memory.overall_region().size(),
|
||||
PageAllocator::Permission::kReadWrite));
|
||||
}
|
||||
}
|
||||
|
||||
void Protect(PageAllocator* allocator, const PageMemory& page_memory) {
|
||||
if (SupportsCommittingGuardPages(allocator)) {
|
||||
// Swap the same region, providing the OS with a chance for fast lookup and
|
||||
// change.
|
||||
CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
|
||||
page_memory.writeable_region().size(),
|
||||
PageAllocator::Permission::kNoAccess));
|
||||
} else {
|
||||
// See Unprotect().
|
||||
CHECK_EQ(0u,
|
||||
page_memory.overall_region().size() % allocator->CommitPageSize());
|
||||
CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
|
||||
page_memory.overall_region().size(),
|
||||
PageAllocator::Permission::kNoAccess));
|
||||
}
|
||||
}
|
||||
|
||||
MemoryRegion ReserveMemoryRegion(PageAllocator* allocator,
|
||||
size_t allocation_size) {
|
||||
void* region_memory =
|
||||
allocator->AllocatePages(nullptr, allocation_size, kPageSize,
|
||||
PageAllocator::Permission::kNoAccess);
|
||||
const MemoryRegion reserved_region(static_cast<Address>(region_memory),
|
||||
allocation_size);
|
||||
DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
|
||||
return reserved_region;
|
||||
}
|
||||
|
||||
void FreeMemoryRegion(PageAllocator* allocator,
|
||||
const MemoryRegion& reserved_region) {
|
||||
allocator->FreePages(reserved_region.base(), reserved_region.size());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
PageMemoryRegion::PageMemoryRegion(PageAllocator* allocator,
|
||||
MemoryRegion reserved_region, bool is_large)
|
||||
: allocator_(allocator),
|
||||
reserved_region_(reserved_region),
|
||||
is_large_(is_large) {}
|
||||
|
||||
PageMemoryRegion::~PageMemoryRegion() {
|
||||
FreeMemoryRegion(allocator_, reserved_region());
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
|
||||
|
||||
NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator* allocator)
|
||||
: PageMemoryRegion(allocator,
|
||||
ReserveMemoryRegion(
|
||||
allocator, RoundUp(kPageSize * kNumPageRegions,
|
||||
allocator->AllocatePageSize())),
|
||||
false) {
|
||||
#ifdef DEBUG
|
||||
for (size_t i = 0; i < kNumPageRegions; ++i) {
|
||||
DCHECK_EQ(false, page_memories_in_use_[i]);
|
||||
}
|
||||
#endif // DEBUG
|
||||
}
|
||||
|
||||
NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
|
||||
|
||||
void NormalPageMemoryRegion::Allocate(Address writeable_base) {
|
||||
const size_t index = GetIndex(writeable_base);
|
||||
ChangeUsed(index, true);
|
||||
Unprotect(allocator_, GetPageMemory(index));
|
||||
}
|
||||
|
||||
void NormalPageMemoryRegion::Free(Address writeable_base) {
|
||||
const size_t index = GetIndex(writeable_base);
|
||||
ChangeUsed(index, false);
|
||||
Protect(allocator_, GetPageMemory(index));
|
||||
}
|
||||
|
||||
void NormalPageMemoryRegion::UnprotectForTesting() {
|
||||
for (size_t i = 0; i < kNumPageRegions; ++i) {
|
||||
Unprotect(allocator_, GetPageMemory(i));
|
||||
}
|
||||
}
|
||||
|
||||
LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator* allocator,
|
||||
size_t length)
|
||||
: PageMemoryRegion(allocator,
|
||||
ReserveMemoryRegion(
|
||||
allocator, RoundUp(length + 2 * kGuardPageSize,
|
||||
allocator->AllocatePageSize())),
|
||||
true) {}
|
||||
|
||||
LargePageMemoryRegion::~LargePageMemoryRegion() = default;
|
||||
|
||||
void LargePageMemoryRegion::UnprotectForTesting() {
|
||||
Unprotect(allocator_, GetPageMemory());
|
||||
}
|
||||
|
||||
PageMemoryRegionTree::PageMemoryRegionTree() = default;
|
||||
|
||||
PageMemoryRegionTree::~PageMemoryRegionTree() = default;
|
||||
|
||||
void PageMemoryRegionTree::Add(PageMemoryRegion* region) {
|
||||
DCHECK(region);
|
||||
auto result = set_.emplace(region->reserved_region().base(), region);
|
||||
USE(result);
|
||||
DCHECK(result.second);
|
||||
}
|
||||
|
||||
void PageMemoryRegionTree::Remove(PageMemoryRegion* region) {
|
||||
DCHECK(region);
|
||||
auto size = set_.erase(region->reserved_region().base());
|
||||
USE(size);
|
||||
DCHECK_EQ(1u, size);
|
||||
}
|
||||
|
||||
NormalPageMemoryPool::NormalPageMemoryPool() = default;
|
||||
|
||||
NormalPageMemoryPool::~NormalPageMemoryPool() = default;
|
||||
|
||||
void NormalPageMemoryPool::Add(size_t bucket, NormalPageMemoryRegion* pmr,
|
||||
Address writeable_base) {
|
||||
DCHECK_LT(bucket, kNumPoolBuckets);
|
||||
pool_[bucket].push_back(std::make_pair(pmr, writeable_base));
|
||||
}
|
||||
|
||||
std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
|
||||
size_t bucket) {
|
||||
DCHECK_LT(bucket, kNumPoolBuckets);
|
||||
if (pool_[bucket].empty()) return {nullptr, nullptr};
|
||||
std::pair<NormalPageMemoryRegion*, Address> pair = pool_[bucket].back();
|
||||
pool_[bucket].pop_back();
|
||||
return pair;
|
||||
}
|
||||
|
||||
PageBackend::PageBackend(PageAllocator* allocator) : allocator_(allocator) {}
|
||||
|
||||
PageBackend::~PageBackend() = default;
|
||||
|
||||
Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
|
||||
std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
|
||||
if (!result.first) {
|
||||
auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator_);
|
||||
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
|
||||
page_pool_.Add(bucket, pmr.get(),
|
||||
pmr->GetPageMemory(i).writeable_region().base());
|
||||
}
|
||||
page_memory_region_tree_.Add(pmr.get());
|
||||
normal_page_memory_regions_.push_back(std::move(pmr));
|
||||
return AllocateNormalPageMemory(bucket);
|
||||
}
|
||||
result.first->Allocate(result.second);
|
||||
return result.second;
|
||||
}
|
||||
|
||||
void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
|
||||
auto* pmr = static_cast<NormalPageMemoryRegion*>(
|
||||
page_memory_region_tree_.Lookup(writeable_base));
|
||||
pmr->Free(writeable_base);
|
||||
page_pool_.Add(bucket, pmr, writeable_base);
|
||||
}
|
||||
|
||||
Address PageBackend::AllocateLargePageMemory(size_t size) {
|
||||
auto pmr = std::make_unique<LargePageMemoryRegion>(allocator_, size);
|
||||
const PageMemory pm = pmr->GetPageMemory();
|
||||
Unprotect(allocator_, pm);
|
||||
page_memory_region_tree_.Add(pmr.get());
|
||||
large_page_memory_regions_.insert({pmr.get(), std::move(pmr)});
|
||||
return pm.writeable_region().base();
|
||||
}
|
||||
|
||||
void PageBackend::FreeLargePageMemory(Address writeable_base) {
|
||||
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(writeable_base);
|
||||
page_memory_region_tree_.Remove(pmr);
|
||||
auto size = large_page_memory_regions_.erase(pmr);
|
||||
USE(size);
|
||||
DCHECK_EQ(1u, size);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace cppgc
|
237
src/heap/cppgc/page-memory.h
Normal file
237
src/heap/cppgc/page-memory.h
Normal file
@ -0,0 +1,237 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_CPPGC_PAGE_MEMORY_H_
|
||||
#define V8_HEAP_CPPGC_PAGE_MEMORY_H_
|
||||
|
||||
#include <array>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "include/cppgc/platform.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/heap/cppgc/globals.h"
|
||||
|
||||
namespace cppgc {
|
||||
namespace internal {
|
||||
|
||||
class V8_EXPORT_PRIVATE MemoryRegion final {
|
||||
public:
|
||||
MemoryRegion() = default;
|
||||
MemoryRegion(Address base, size_t size) : base_(base), size_(size) {
|
||||
DCHECK(base);
|
||||
DCHECK_LT(0u, size);
|
||||
}
|
||||
|
||||
Address base() const { return base_; }
|
||||
size_t size() const { return size_; }
|
||||
Address end() const { return base_ + size_; }
|
||||
|
||||
bool Contains(Address addr) const {
|
||||
return (reinterpret_cast<uintptr_t>(addr) -
|
||||
reinterpret_cast<uintptr_t>(base_)) < size_;
|
||||
}
|
||||
|
||||
bool Contains(const MemoryRegion& other) const {
|
||||
return base_ <= other.base() && other.end() <= end();
|
||||
}
|
||||
|
||||
private:
|
||||
Address base_ = nullptr;
|
||||
size_t size_ = 0;
|
||||
};
|
||||
|
||||
// PageMemory provides the backing of a single normal or large page.
|
||||
class V8_EXPORT_PRIVATE PageMemory final {
|
||||
public:
|
||||
PageMemory(MemoryRegion overall, MemoryRegion writeable)
|
||||
: overall_(overall), writable_(writeable) {
|
||||
DCHECK(overall.Contains(writeable));
|
||||
}
|
||||
|
||||
const MemoryRegion writeable_region() const { return writable_; }
|
||||
const MemoryRegion overall_region() const { return overall_; }
|
||||
|
||||
private:
|
||||
MemoryRegion overall_;
|
||||
MemoryRegion writable_;
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE PageMemoryRegion {
|
||||
public:
|
||||
virtual ~PageMemoryRegion();
|
||||
|
||||
const MemoryRegion reserved_region() const { return reserved_region_; }
|
||||
bool is_large() const { return is_large_; }
|
||||
|
||||
// Lookup writeable base for an |address| that's contained in
|
||||
// PageMemoryRegion. Filters out addresses that are contained in non-writeable
|
||||
// regions (e.g. guard pages).
|
||||
inline Address Lookup(Address address) const;
|
||||
|
||||
// Disallow copy/move.
|
||||
PageMemoryRegion(const PageMemoryRegion&) = delete;
|
||||
PageMemoryRegion& operator=(const PageMemoryRegion&) = delete;
|
||||
|
||||
virtual void UnprotectForTesting() = 0;
|
||||
|
||||
protected:
|
||||
PageMemoryRegion(PageAllocator*, MemoryRegion, bool);
|
||||
|
||||
PageAllocator* const allocator_;
|
||||
const MemoryRegion reserved_region_;
|
||||
const bool is_large_;
|
||||
};
|
||||
|
||||
// NormalPageMemoryRegion serves kNumPageRegions normal-sized PageMemory object.
|
||||
class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
|
||||
public:
|
||||
static constexpr size_t kNumPageRegions = 10;
|
||||
|
||||
explicit NormalPageMemoryRegion(PageAllocator*);
|
||||
~NormalPageMemoryRegion() override;
|
||||
|
||||
const PageMemory GetPageMemory(size_t index) const {
|
||||
DCHECK_LT(index, kNumPageRegions);
|
||||
return PageMemory(
|
||||
MemoryRegion(reserved_region().base() + kPageSize * index, kPageSize),
|
||||
MemoryRegion(
|
||||
reserved_region().base() + kPageSize * index + kGuardPageSize,
|
||||
kPageSize - 2 * kGuardPageSize));
|
||||
}
|
||||
|
||||
// Allocates a normal page at |writeable_base| address. Changes page
|
||||
// protection.
|
||||
void Allocate(Address writeable_base);
|
||||
|
||||
// Frees a normal page at at |writeable_base| address. Changes page
|
||||
// protection.
|
||||
void Free(Address);
|
||||
|
||||
inline Address Lookup(Address) const;
|
||||
|
||||
void UnprotectForTesting() final;
|
||||
|
||||
private:
|
||||
void ChangeUsed(size_t index, bool value) {
|
||||
DCHECK_LT(index, kNumPageRegions);
|
||||
DCHECK_EQ(value, !page_memories_in_use_[index]);
|
||||
page_memories_in_use_[index] = value;
|
||||
}
|
||||
|
||||
size_t GetIndex(Address address) const {
|
||||
return static_cast<size_t>(address - reserved_region().base()) >>
|
||||
kPageSizeLog2;
|
||||
}
|
||||
|
||||
std::array<bool, kNumPageRegions> page_memories_in_use_ = {};
|
||||
};
|
||||
|
||||
// LargePageMemoryRegion serves a single large PageMemory object.
|
||||
class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
|
||||
public:
|
||||
LargePageMemoryRegion(PageAllocator*, size_t);
|
||||
~LargePageMemoryRegion() override;
|
||||
|
||||
const PageMemory GetPageMemory() const {
|
||||
return PageMemory(
|
||||
MemoryRegion(reserved_region().base(), reserved_region().size()),
|
||||
MemoryRegion(reserved_region().base() + kGuardPageSize,
|
||||
reserved_region().size() - 2 * kGuardPageSize));
|
||||
}
|
||||
|
||||
inline Address Lookup(Address) const;
|
||||
|
||||
void UnprotectForTesting() final;
|
||||
};
|
||||
|
||||
// A PageMemoryRegionTree is a binary search tree of PageMemoryRegions sorted
|
||||
// by reserved base addresses.
|
||||
//
|
||||
// The tree does not keep its elements alive but merely provides indexing
|
||||
// capabilities.
|
||||
class V8_EXPORT_PRIVATE PageMemoryRegionTree final {
|
||||
public:
|
||||
PageMemoryRegionTree();
|
||||
~PageMemoryRegionTree();
|
||||
|
||||
void Add(PageMemoryRegion*);
|
||||
void Remove(PageMemoryRegion*);
|
||||
|
||||
inline PageMemoryRegion* Lookup(Address) const;
|
||||
|
||||
private:
|
||||
std::map<Address, PageMemoryRegion*> set_;
|
||||
};
|
||||
|
||||
// A pool of PageMemory objects represented by the writeable base addresses.
|
||||
//
|
||||
// The pool does not keep its elements alive but merely provides pooling
|
||||
// capabilities.
|
||||
class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
|
||||
public:
|
||||
static constexpr size_t kNumPoolBuckets = 16;
|
||||
|
||||
using Result = std::pair<NormalPageMemoryRegion*, Address>;
|
||||
|
||||
NormalPageMemoryPool();
|
||||
~NormalPageMemoryPool();
|
||||
|
||||
void Add(size_t, NormalPageMemoryRegion*, Address);
|
||||
Result Take(size_t);
|
||||
|
||||
private:
|
||||
std::vector<Result> pool_[kNumPoolBuckets];
|
||||
};
|
||||
|
||||
// A backend that is used for allocating and freeing normal and large pages.
|
||||
//
|
||||
// Internally maintaints a set of PageMemoryRegions. The backend keeps its used
|
||||
// regions alive.
|
||||
class V8_EXPORT_PRIVATE PageBackend final {
|
||||
public:
|
||||
explicit PageBackend(PageAllocator*);
|
||||
~PageBackend();
|
||||
|
||||
// Allocates a normal page from the backend.
|
||||
//
|
||||
// Returns the writeable base of the region.
|
||||
Address AllocateNormalPageMemory(size_t);
|
||||
|
||||
// Returns normal page memory back to the backend. Expects the
|
||||
// |writeable_base| returned by |AllocateNormalMemory()|.
|
||||
void FreeNormalPageMemory(size_t, Address writeable_base);
|
||||
|
||||
// Allocates a large page from the backend.
|
||||
//
|
||||
// Returns the writeable base of the region.
|
||||
Address AllocateLargePageMemory(size_t size);
|
||||
|
||||
// Returns large page memory back to the backend. Expects the |writeable_base|
|
||||
// returned by |AllocateLargePageMemory()|.
|
||||
void FreeLargePageMemory(Address writeable_base);
|
||||
|
||||
// Returns the writeable base if |address| is contained in a valid page
|
||||
// memory.
|
||||
inline Address Lookup(Address) const;
|
||||
|
||||
// Disallow copy/move.
|
||||
PageBackend(const PageBackend&) = delete;
|
||||
PageBackend& operator=(const PageBackend&) = delete;
|
||||
|
||||
private:
|
||||
PageAllocator* allocator_;
|
||||
NormalPageMemoryPool page_pool_;
|
||||
PageMemoryRegionTree page_memory_region_tree_;
|
||||
std::vector<std::unique_ptr<PageMemoryRegion>> normal_page_memory_regions_;
|
||||
std::unordered_map<PageMemoryRegion*, std::unique_ptr<PageMemoryRegion>>
|
||||
large_page_memory_regions_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace cppgc
|
||||
|
||||
#endif // V8_HEAP_CPPGC_PAGE_MEMORY_H_
|
@ -51,6 +51,7 @@ v8_source_set("cppgc_unittests_sources") {
|
||||
"heap/cppgc/heap-page_unittest.cc",
|
||||
"heap/cppgc/heap_unittest.cc",
|
||||
"heap/cppgc/member_unittests.cc",
|
||||
"heap/cppgc/page-memory_unittest.cc",
|
||||
"heap/cppgc/source-location_unittest.cc",
|
||||
"heap/cppgc/stack_unittest.cc",
|
||||
"heap/cppgc/tests.cc",
|
||||
|
308
test/unittests/heap/cppgc/page-memory_unittest.cc
Normal file
308
test/unittests/heap/cppgc/page-memory_unittest.cc
Normal file
@ -0,0 +1,308 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/cppgc/page-memory.h"
|
||||
|
||||
#include "src/base/page-allocator.h"
|
||||
#include "src/heap/cppgc/page-memory-inl.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace cppgc {
|
||||
namespace internal {
|
||||
|
||||
TEST(MemoryRegionTest, Construct) {
|
||||
constexpr size_t kSize = 17;
|
||||
uint8_t dummy[kSize];
|
||||
const MemoryRegion region(dummy, kSize);
|
||||
EXPECT_EQ(dummy, region.base());
|
||||
EXPECT_EQ(kSize, region.size());
|
||||
EXPECT_EQ(dummy + kSize, region.end());
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
Address AtOffset(uint8_t* base, intptr_t offset) {
|
||||
return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(base) + offset);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(MemoryRegionTest, ContainsAddress) {
|
||||
constexpr size_t kSize = 7;
|
||||
uint8_t dummy[kSize];
|
||||
const MemoryRegion region(dummy, kSize);
|
||||
EXPECT_FALSE(region.Contains(AtOffset(dummy, -1)));
|
||||
EXPECT_TRUE(region.Contains(dummy));
|
||||
EXPECT_TRUE(region.Contains(dummy + kSize - 1));
|
||||
EXPECT_FALSE(region.Contains(AtOffset(dummy, kSize)));
|
||||
}
|
||||
|
||||
TEST(MemoryRegionTest, ContainsMemoryRegion) {
|
||||
constexpr size_t kSize = 7;
|
||||
uint8_t dummy[kSize];
|
||||
const MemoryRegion region(dummy, kSize);
|
||||
const MemoryRegion contained_region1(dummy, kSize - 1);
|
||||
EXPECT_TRUE(region.Contains(contained_region1));
|
||||
const MemoryRegion contained_region2(dummy + 1, kSize - 1);
|
||||
EXPECT_TRUE(region.Contains(contained_region2));
|
||||
const MemoryRegion not_contained_region1(AtOffset(dummy, -1), kSize);
|
||||
EXPECT_FALSE(region.Contains(not_contained_region1));
|
||||
const MemoryRegion not_contained_region2(AtOffset(dummy, kSize), 1);
|
||||
EXPECT_FALSE(region.Contains(not_contained_region2));
|
||||
}
|
||||
|
||||
TEST(PageMemoryTest, Construct) {
|
||||
constexpr size_t kOverallSize = 17;
|
||||
uint8_t dummy[kOverallSize];
|
||||
const MemoryRegion overall_region(dummy, kOverallSize);
|
||||
const MemoryRegion writeable_region(dummy + 1, kOverallSize - 2);
|
||||
const PageMemory page_memory(overall_region, writeable_region);
|
||||
EXPECT_EQ(dummy, page_memory.overall_region().base());
|
||||
EXPECT_EQ(dummy + kOverallSize, page_memory.overall_region().end());
|
||||
EXPECT_EQ(dummy + 1, page_memory.writeable_region().base());
|
||||
EXPECT_EQ(dummy + kOverallSize - 1, page_memory.writeable_region().end());
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
|
||||
TEST(PageMemoryDeathTest, ConstructNonContainedRegions) {
|
||||
constexpr size_t kOverallSize = 17;
|
||||
uint8_t dummy[kOverallSize];
|
||||
const MemoryRegion overall_region(dummy, kOverallSize);
|
||||
const MemoryRegion writeable_region(dummy + 1, kOverallSize);
|
||||
EXPECT_DEATH_IF_SUPPORTED(PageMemory(overall_region, writeable_region), "");
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
|
||||
TEST(PageMemoryRegionTest, NormalPageMemoryRegion) {
|
||||
v8::base::PageAllocator allocator;
|
||||
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
|
||||
pmr->UnprotectForTesting();
|
||||
MemoryRegion prev_overall;
|
||||
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
|
||||
const PageMemory pm = pmr->GetPageMemory(i);
|
||||
// Previous PageMemory aligns with the current one.
|
||||
if (prev_overall.base()) {
|
||||
EXPECT_EQ(prev_overall.end(), pm.overall_region().base());
|
||||
}
|
||||
prev_overall =
|
||||
MemoryRegion(pm.overall_region().base(), pm.overall_region().size());
|
||||
// Writeable region is contained in overall region.
|
||||
EXPECT_TRUE(pm.overall_region().Contains(pm.writeable_region()));
|
||||
EXPECT_EQ(0u, pm.writeable_region().base()[0]);
|
||||
EXPECT_EQ(0u, pm.writeable_region().end()[-1]);
|
||||
// Front guard page.
|
||||
EXPECT_EQ(pm.writeable_region().base(),
|
||||
pm.overall_region().base() + kGuardPageSize);
|
||||
// Back guard page.
|
||||
EXPECT_EQ(pm.overall_region().end(),
|
||||
pm.writeable_region().end() + kGuardPageSize);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PageMemoryRegionTest, LargePageMemoryRegion) {
|
||||
v8::base::PageAllocator allocator;
|
||||
auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, 1024);
|
||||
pmr->UnprotectForTesting();
|
||||
const PageMemory pm = pmr->GetPageMemory();
|
||||
EXPECT_LE(1024u, pm.writeable_region().size());
|
||||
EXPECT_EQ(0u, pm.writeable_region().base()[0]);
|
||||
EXPECT_EQ(0u, pm.writeable_region().end()[-1]);
|
||||
}
|
||||
|
||||
TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
|
||||
// This tests that the testing allocator actually uses protected guard
|
||||
// regions.
|
||||
v8::base::PageAllocator allocator;
|
||||
#ifdef V8_HOST_ARCH_PPC64
|
||||
EXPECT_FALSE(SupportsCommittingGuardPages(&allocator));
|
||||
#else // !V8_HOST_ARCH_PPC64
|
||||
EXPECT_TRUE(SupportsCommittingGuardPages(&allocator));
|
||||
#endif // !V8_HOST_ARCH_PPC64
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
V8_NOINLINE uint8_t access(volatile const uint8_t& u) { return u; }
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(PageMemoryRegionDeathTest, ReservationIsFreed) {
|
||||
// Full sequence as part of the death test macro as otherwise, the macro
|
||||
// may expand to statements that re-purpose the previously freed memory
|
||||
// and thus not crash.
|
||||
EXPECT_DEATH_IF_SUPPORTED(
|
||||
v8::base::PageAllocator allocator; Address base; {
|
||||
auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, 1024);
|
||||
base = pmr->reserved_region().base();
|
||||
} access(base[0]);
|
||||
, "");
|
||||
}
|
||||
|
||||
TEST(PageMemoryRegionDeathTest, FrontGuardPageAccessCrashes) {
|
||||
v8::base::PageAllocator allocator;
|
||||
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
|
||||
if (SupportsCommittingGuardPages(&allocator)) {
|
||||
EXPECT_DEATH_IF_SUPPORTED(
|
||||
access(pmr->GetPageMemory(0).overall_region().base()[0]), "");
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PageMemoryRegionDeathTest, BackGuardPageAccessCrashes) {
|
||||
v8::base::PageAllocator allocator;
|
||||
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
|
||||
if (SupportsCommittingGuardPages(&allocator)) {
|
||||
EXPECT_DEATH_IF_SUPPORTED(
|
||||
access(pmr->GetPageMemory(0).writeable_region().end()[0]), "");
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PageMemoryRegionTreeTest, AddNormalLookupRemove) {
|
||||
v8::base::PageAllocator allocator;
|
||||
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
|
||||
PageMemoryRegionTree tree;
|
||||
tree.Add(pmr.get());
|
||||
ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().base()));
|
||||
ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().end() - 1));
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().base() - 1));
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().end()));
|
||||
tree.Remove(pmr.get());
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().base()));
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().end() - 1));
|
||||
}
|
||||
|
||||
TEST(PageMemoryRegionTreeTest, AddLargeLookupRemove) {
|
||||
v8::base::PageAllocator allocator;
|
||||
constexpr size_t kLargeSize = 5012;
|
||||
auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, kLargeSize);
|
||||
PageMemoryRegionTree tree;
|
||||
tree.Add(pmr.get());
|
||||
ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().base()));
|
||||
ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().end() - 1));
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().base() - 1));
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().end()));
|
||||
tree.Remove(pmr.get());
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().base()));
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().end() - 1));
|
||||
}
|
||||
|
||||
TEST(PageMemoryRegionTreeTest, AddLookupRemoveMultiple) {
|
||||
v8::base::PageAllocator allocator;
|
||||
auto pmr1 = std::make_unique<NormalPageMemoryRegion>(&allocator);
|
||||
constexpr size_t kLargeSize = 3127;
|
||||
auto pmr2 = std::make_unique<LargePageMemoryRegion>(&allocator, kLargeSize);
|
||||
PageMemoryRegionTree tree;
|
||||
tree.Add(pmr1.get());
|
||||
tree.Add(pmr2.get());
|
||||
ASSERT_EQ(pmr1.get(), tree.Lookup(pmr1->reserved_region().base()));
|
||||
ASSERT_EQ(pmr1.get(), tree.Lookup(pmr1->reserved_region().end() - 1));
|
||||
ASSERT_EQ(pmr2.get(), tree.Lookup(pmr2->reserved_region().base()));
|
||||
ASSERT_EQ(pmr2.get(), tree.Lookup(pmr2->reserved_region().end() - 1));
|
||||
tree.Remove(pmr1.get());
|
||||
ASSERT_EQ(pmr2.get(), tree.Lookup(pmr2->reserved_region().base()));
|
||||
ASSERT_EQ(pmr2.get(), tree.Lookup(pmr2->reserved_region().end() - 1));
|
||||
tree.Remove(pmr2.get());
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr2->reserved_region().base()));
|
||||
ASSERT_EQ(nullptr, tree.Lookup(pmr2->reserved_region().end() - 1));
|
||||
}
|
||||
|
||||
TEST(NormalPageMemoryPool, ConstructorEmpty) {
|
||||
v8::base::PageAllocator allocator;
|
||||
NormalPageMemoryPool pool;
|
||||
constexpr size_t kBucket = 0;
|
||||
EXPECT_EQ(NormalPageMemoryPool::Result(nullptr, nullptr), pool.Take(kBucket));
|
||||
}
|
||||
|
||||
TEST(NormalPageMemoryPool, AddTakeSameBucket) {
|
||||
v8::base::PageAllocator allocator;
|
||||
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
|
||||
const PageMemory pm = pmr->GetPageMemory(0);
|
||||
NormalPageMemoryPool pool;
|
||||
constexpr size_t kBucket = 0;
|
||||
pool.Add(kBucket, pmr.get(), pm.writeable_region().base());
|
||||
EXPECT_EQ(
|
||||
NormalPageMemoryPool::Result(pmr.get(), pm.writeable_region().base()),
|
||||
pool.Take(kBucket));
|
||||
}
|
||||
|
||||
TEST(NormalPageMemoryPool, AddTakeNotFoundDifferentBucket) {
|
||||
v8::base::PageAllocator allocator;
|
||||
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
|
||||
const PageMemory pm = pmr->GetPageMemory(0);
|
||||
NormalPageMemoryPool pool;
|
||||
constexpr size_t kFirstBucket = 0;
|
||||
constexpr size_t kSecondBucket = 1;
|
||||
pool.Add(kFirstBucket, pmr.get(), pm.writeable_region().base());
|
||||
EXPECT_EQ(NormalPageMemoryPool::Result(nullptr, nullptr),
|
||||
pool.Take(kSecondBucket));
|
||||
EXPECT_EQ(
|
||||
NormalPageMemoryPool::Result(pmr.get(), pm.writeable_region().base()),
|
||||
pool.Take(kFirstBucket));
|
||||
}
|
||||
|
||||
TEST(PageBackendTest, AllocateNormalUsesPool) {
|
||||
v8::base::PageAllocator allocator;
|
||||
PageBackend backend(&allocator);
|
||||
constexpr size_t kBucket = 0;
|
||||
Address writeable_base1 = backend.AllocateNormalPageMemory(kBucket);
|
||||
EXPECT_NE(nullptr, writeable_base1);
|
||||
backend.FreeNormalPageMemory(kBucket, writeable_base1);
|
||||
Address writeable_base2 = backend.AllocateNormalPageMemory(kBucket);
|
||||
EXPECT_NE(nullptr, writeable_base2);
|
||||
EXPECT_EQ(writeable_base1, writeable_base2);
|
||||
}
|
||||
|
||||
TEST(PageBackendTest, AllocateLarge) {
|
||||
v8::base::PageAllocator allocator;
|
||||
PageBackend backend(&allocator);
|
||||
Address writeable_base1 = backend.AllocateLargePageMemory(13731);
|
||||
EXPECT_NE(nullptr, writeable_base1);
|
||||
Address writeable_base2 = backend.AllocateLargePageMemory(9478);
|
||||
EXPECT_NE(nullptr, writeable_base2);
|
||||
EXPECT_NE(writeable_base1, writeable_base2);
|
||||
backend.FreeLargePageMemory(writeable_base1);
|
||||
backend.FreeLargePageMemory(writeable_base2);
|
||||
}
|
||||
|
||||
TEST(PageBackendTest, LookupNormal) {
|
||||
v8::base::PageAllocator allocator;
|
||||
PageBackend backend(&allocator);
|
||||
constexpr size_t kBucket = 0;
|
||||
Address writeable_base = backend.AllocateNormalPageMemory(kBucket);
|
||||
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
|
||||
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - 1));
|
||||
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base));
|
||||
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base + kPageSize -
|
||||
2 * kGuardPageSize - 1));
|
||||
EXPECT_EQ(nullptr,
|
||||
backend.Lookup(writeable_base + kPageSize - 2 * kGuardPageSize));
|
||||
EXPECT_EQ(nullptr,
|
||||
backend.Lookup(writeable_base - kGuardPageSize + kPageSize - 1));
|
||||
}
|
||||
|
||||
TEST(PageBackendTest, LookupLarge) {
|
||||
v8::base::PageAllocator allocator;
|
||||
PageBackend backend(&allocator);
|
||||
constexpr size_t kSize = 7934;
|
||||
Address writeable_base = backend.AllocateLargePageMemory(kSize);
|
||||
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
|
||||
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - 1));
|
||||
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base));
|
||||
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base + kSize - 1));
|
||||
}
|
||||
|
||||
TEST(PageBackendDeathTest, DestructingBackendDestroysPageMemory) {
|
||||
v8::base::PageAllocator allocator;
|
||||
Address base;
|
||||
{
|
||||
PageBackend backend(&allocator);
|
||||
constexpr size_t kBucket = 0;
|
||||
base = backend.AllocateNormalPageMemory(kBucket);
|
||||
}
|
||||
EXPECT_DEATH_IF_SUPPORTED(access(base[0]), "");
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace cppgc
|
Loading…
Reference in New Issue
Block a user