[ptr-compr] Introduce RegionAllocator and respective unittests.
This is a naive implementation of a class that manages regions allocation/deallocation inside given range of addresses. This code will be used in a follow-up CLs. Bug: v8:8096 Change-Id: I7bea7051a1525cc7f87ba34d67b85b274c5de18a Reviewed-on: https://chromium-review.googlesource.com/1127175 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Commit-Queue: Igor Sheludko <ishell@chromium.org> Cr-Commit-Position: refs/heads/master@{#55531}
This commit is contained in:
parent
c7b15fb7cd
commit
038ce6aa9c
2
BUILD.gn
2
BUILD.gn
@ -3015,6 +3015,8 @@ v8_component("v8_libbase") {
|
||||
"src/base/platform/semaphore.h",
|
||||
"src/base/platform/time.cc",
|
||||
"src/base/platform/time.h",
|
||||
"src/base/region-allocator.cc",
|
||||
"src/base/region-allocator.h",
|
||||
"src/base/ring-buffer.h",
|
||||
"src/base/safe_conversions.h",
|
||||
"src/base/safe_conversions_impl.h",
|
||||
|
@ -146,6 +146,14 @@ constexpr inline bool IsPowerOfTwo(T value) {
|
||||
V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
|
||||
// Same for 64 bit integers. |value| must be <= 2^63
|
||||
V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value);
|
||||
// Same for size_t integers.
|
||||
inline size_t RoundUpToPowerOfTwo(size_t value) {
|
||||
if (sizeof(size_t) == sizeof(uint64_t)) {
|
||||
return RoundUpToPowerOfTwo64(value);
|
||||
} else {
|
||||
return RoundUpToPowerOfTwo32(value);
|
||||
}
|
||||
}
|
||||
|
||||
// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
|
||||
// less than or equal to |value|. If you pass in a |value| that is already a
|
||||
|
@ -389,6 +389,11 @@ constexpr inline T RoundUp(T x) {
|
||||
return RoundDown<m, T>(static_cast<T>(x + m - 1));
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
inline bool IsAligned(T value, U alignment) {
|
||||
return (value & (alignment - 1)) == 0;
|
||||
}
|
||||
|
||||
inline void* AlignedAddress(void* address, size_t alignment) {
|
||||
// The alignment must be a power of two.
|
||||
DCHECK_EQ(alignment & (alignment - 1), 0u);
|
||||
|
269
src/base/region-allocator.cc
Normal file
269
src/base/region-allocator.cc
Normal file
@ -0,0 +1,269 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/base/region-allocator.h"
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/macros.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
// If |free_size| < |region_size| * |kMaxLoadFactorForRandomization| stop trying
|
||||
// to randomize region allocation.
|
||||
constexpr double kMaxLoadFactorForRandomization = 0.40;
|
||||
|
||||
// Max number of attempts to allocate page at random address.
|
||||
constexpr int kMaxRandomizationAttempts = 3;
|
||||
|
||||
RegionAllocator::RegionAllocator(Address memory_region_begin,
|
||||
size_t memory_region_size,
|
||||
size_t min_region_size)
|
||||
: whole_region_(memory_region_begin, memory_region_size, false),
|
||||
region_size_in_min_regions_(size() / min_region_size),
|
||||
max_load_for_randomization_(
|
||||
static_cast<size_t>(size() * kMaxLoadFactorForRandomization)),
|
||||
free_size_(0),
|
||||
min_region_size_(min_region_size) {
|
||||
DCHECK_LT(begin(), end());
|
||||
DCHECK(base::bits::IsPowerOfTwo(min_region_size_));
|
||||
CHECK(IsAligned(size(), min_region_size_));
|
||||
CHECK(IsAligned(begin(), min_region_size_));
|
||||
|
||||
// Initial region.
|
||||
Region* region = new Region(whole_region_);
|
||||
|
||||
all_regions_.insert(region);
|
||||
|
||||
FreeListAddRegion(region);
|
||||
}
|
||||
|
||||
RegionAllocator::~RegionAllocator() {
|
||||
for (Region* region : all_regions_) {
|
||||
delete region;
|
||||
}
|
||||
}
|
||||
|
||||
RegionAllocator::AllRegionsSet::iterator RegionAllocator::FindRegion(
|
||||
Address address) {
|
||||
if (!whole_region_.contains(address)) return all_regions_.end();
|
||||
|
||||
Region key(address, 0, false);
|
||||
AllRegionsSet::iterator iter = all_regions_.upper_bound(&key);
|
||||
// Regions in |all_regions_| are compared by end() values and key's end()
|
||||
// points exactly to the address we are querying, so the upper_bound will
|
||||
// find the region whose |end()| is greater than the requested address.
|
||||
DCHECK_NE(iter, all_regions_.end());
|
||||
DCHECK((*iter)->contains(address));
|
||||
return iter;
|
||||
}
|
||||
|
||||
void RegionAllocator::FreeListAddRegion(Region* region) {
|
||||
free_size_ += region->size();
|
||||
free_regions_.insert(region);
|
||||
}
|
||||
|
||||
RegionAllocator::Region* RegionAllocator::FreeListFindRegion(size_t size) {
|
||||
Region key(0, size, false);
|
||||
auto iter = free_regions_.lower_bound(&key);
|
||||
return iter == free_regions_.end() ? nullptr : *iter;
|
||||
}
|
||||
|
||||
void RegionAllocator::FreeListRemoveRegion(Region* region) {
|
||||
auto iter = free_regions_.find(region);
|
||||
DCHECK_NE(iter, free_regions_.end());
|
||||
DCHECK_LE(region->size(), free_size_);
|
||||
free_size_ -= region->size();
|
||||
free_regions_.erase(iter);
|
||||
}
|
||||
|
||||
RegionAllocator::Region* RegionAllocator::Split(Region* region,
|
||||
size_t new_size) {
|
||||
DCHECK(IsAligned(new_size, min_region_size_));
|
||||
DCHECK_GT(region->size(), new_size);
|
||||
|
||||
// Create new region and put it to the lists after the |region|.
|
||||
Region* new_region =
|
||||
new Region(region->begin() + new_size, region->size() - new_size, false);
|
||||
if (!region->is_used()) {
|
||||
// Remove region from the free list before updating it's size.
|
||||
FreeListRemoveRegion(region);
|
||||
}
|
||||
region->set_size(new_size);
|
||||
|
||||
all_regions_.insert(new_region);
|
||||
|
||||
FreeListAddRegion(region);
|
||||
FreeListAddRegion(new_region);
|
||||
return new_region;
|
||||
}
|
||||
|
||||
void RegionAllocator::Merge(AllRegionsSet::iterator prev_iter,
|
||||
AllRegionsSet::iterator next_iter) {
|
||||
Region* prev = *prev_iter;
|
||||
Region* next = *next_iter;
|
||||
DCHECK_EQ(prev->end(), next->begin());
|
||||
prev->set_size(prev->size() + next->size());
|
||||
|
||||
all_regions_.erase(next_iter); // prev_iter stays valid.
|
||||
|
||||
// The |next| region must already not be in the free list.
|
||||
DCHECK_EQ(free_regions_.find(next), free_regions_.end());
|
||||
delete next;
|
||||
}
|
||||
|
||||
RegionAllocator::Address RegionAllocator::AllocateRegion(size_t size) {
|
||||
DCHECK_NE(size, 0);
|
||||
DCHECK(IsAligned(size, min_region_size_));
|
||||
|
||||
Region* region = FreeListFindRegion(size);
|
||||
if (region == nullptr) return kAllocationFailure;
|
||||
|
||||
if (region->size() != size) {
|
||||
Split(region, size);
|
||||
}
|
||||
DCHECK(IsAligned(region->begin(), min_region_size_));
|
||||
DCHECK_EQ(region->size(), size);
|
||||
|
||||
// Mark region as used.
|
||||
FreeListRemoveRegion(region);
|
||||
region->set_is_used(true);
|
||||
return region->begin();
|
||||
}
|
||||
|
||||
RegionAllocator::Address RegionAllocator::AllocateRegion(
|
||||
RandomNumberGenerator* rng, size_t size) {
|
||||
if (free_size() >= max_load_for_randomization_) {
|
||||
// There is enough free space for trying to randomize the address.
|
||||
size_t random = 0;
|
||||
|
||||
for (int i = 0; i < kMaxRandomizationAttempts; i++) {
|
||||
rng->NextBytes(&random, sizeof(random));
|
||||
size_t random_offset =
|
||||
min_region_size_ * (random % region_size_in_min_regions_);
|
||||
Address address = begin() + random_offset;
|
||||
if (AllocateRegionAt(address, size)) {
|
||||
return address;
|
||||
}
|
||||
}
|
||||
// Fall back to free list allocation.
|
||||
}
|
||||
return AllocateRegion(size);
|
||||
}
|
||||
|
||||
bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) {
|
||||
DCHECK(IsAligned(requested_address, min_region_size_));
|
||||
DCHECK_NE(size, 0);
|
||||
DCHECK(IsAligned(size, min_region_size_));
|
||||
|
||||
Address requested_end = requested_address + size;
|
||||
DCHECK_LE(requested_end, end());
|
||||
|
||||
Region* region;
|
||||
{
|
||||
AllRegionsSet::iterator region_iter = FindRegion(requested_address);
|
||||
if (region_iter == all_regions_.end()) {
|
||||
return false;
|
||||
}
|
||||
region = *region_iter;
|
||||
}
|
||||
if (region->is_used() || region->end() < requested_end) {
|
||||
return false;
|
||||
}
|
||||
// Found free region that includes the requested one.
|
||||
if (region->begin() != requested_address) {
|
||||
// Split the region at the |requested_address| boundary.
|
||||
size_t new_size = requested_address - region->begin();
|
||||
DCHECK(IsAligned(new_size, min_region_size_));
|
||||
region = Split(region, new_size);
|
||||
}
|
||||
if (region->end() != requested_end) {
|
||||
// Split the region at the |requested_end| boundary.
|
||||
Split(region, size);
|
||||
}
|
||||
DCHECK_EQ(region->begin(), requested_address);
|
||||
DCHECK_EQ(region->size(), size);
|
||||
|
||||
// Mark region as used.
|
||||
FreeListRemoveRegion(region);
|
||||
region->set_is_used(true);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t RegionAllocator::FreeRegion(Address address) {
|
||||
AllRegionsSet::iterator region_iter = FindRegion(address);
|
||||
if (region_iter == all_regions_.end()) {
|
||||
return 0;
|
||||
}
|
||||
Region* region = *region_iter;
|
||||
if (region->begin() != address || !region->is_used()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t size = region->size();
|
||||
// The region must not be in the free list.
|
||||
DCHECK_EQ(free_regions_.find(*region_iter), free_regions_.end());
|
||||
|
||||
region->set_is_used(false);
|
||||
|
||||
// Merge current region with the surrounding ones if they are free.
|
||||
if (region->end() != whole_region_.end()) {
|
||||
// There must be a range after the current one.
|
||||
AllRegionsSet::iterator next_iter = std::next(region_iter);
|
||||
DCHECK_NE(next_iter, all_regions_.end());
|
||||
if (!(*next_iter)->is_used()) {
|
||||
// |next| region object will be deleted during merge, remove it from
|
||||
// the free list.
|
||||
FreeListRemoveRegion(*next_iter);
|
||||
Merge(region_iter, next_iter);
|
||||
}
|
||||
}
|
||||
if (region->begin() != whole_region_.begin()) {
|
||||
// There must be a range before the current one.
|
||||
AllRegionsSet::iterator prev_iter = std::prev(region_iter);
|
||||
DCHECK_NE(prev_iter, all_regions_.end());
|
||||
if (!(*prev_iter)->is_used()) {
|
||||
// |prev| region's size will change, we'll have to re-insert it into
|
||||
// the proper place of the free list.
|
||||
FreeListRemoveRegion(*prev_iter);
|
||||
Merge(prev_iter, region_iter);
|
||||
// |prev| region becomes the current region.
|
||||
region_iter = prev_iter;
|
||||
region = *region_iter;
|
||||
}
|
||||
}
|
||||
FreeListAddRegion(region);
|
||||
return size;
|
||||
}
|
||||
|
||||
void RegionAllocator::Region::Print(std::ostream& os) const {
|
||||
std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
|
||||
os << "[" << begin() << ", " << end() << "), size: " << size();
|
||||
os << ", " << (is_used() ? "used" : "free");
|
||||
os.flags(flags);
|
||||
}
|
||||
|
||||
void RegionAllocator::Print(std::ostream& os) const {
|
||||
std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
|
||||
os << "RegionAllocator: [" << begin() << ", " << end() << ")";
|
||||
os << "\nsize: " << size();
|
||||
os << "\nfree_size: " << free_size();
|
||||
os << "\nmin_region_size: " << min_region_size_;
|
||||
|
||||
os << "\nall regions: ";
|
||||
for (const Region* region : all_regions_) {
|
||||
os << "\n ";
|
||||
region->Print(os);
|
||||
}
|
||||
|
||||
os << "\nfree regions: ";
|
||||
for (const Region* region : free_regions_) {
|
||||
os << "\n ";
|
||||
region->Print(os);
|
||||
}
|
||||
os << "\n";
|
||||
os.flags(flags);
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
152
src/base/region-allocator.h
Normal file
152
src/base/region-allocator.h
Normal file
@ -0,0 +1,152 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASE_REGION_ALLOCATOR_H_
|
||||
#define V8_BASE_REGION_ALLOCATOR_H_
|
||||
|
||||
#include <set>
|
||||
|
||||
#include "src/base/utils/random-number-generator.h"
|
||||
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
// Helper class for managing used/free regions within [address, address+size)
|
||||
// region. Minimum allocation unit is |min_region_size|.
|
||||
// Requested allocation size is rounded up to |min_region_size|.
|
||||
// The region allocation algorithm implements best-fit with coalescing strategy:
|
||||
// it tries to find a smallest suitable free region upon allocation and tries
|
||||
// to merge region with its neighbors upon freeing.
|
||||
//
|
||||
// This class does not perform any actual region reservation.
|
||||
// Not thread-safe.
|
||||
class V8_BASE_EXPORT RegionAllocator final {
|
||||
public:
|
||||
typedef uintptr_t Address;
|
||||
|
||||
static constexpr Address kAllocationFailure = static_cast<Address>(-1);
|
||||
|
||||
RegionAllocator(Address address, size_t size, size_t min_region_size);
|
||||
~RegionAllocator();
|
||||
|
||||
// Allocates region of |size| (must be |min_region_size|-aligned). Returns
|
||||
// the address of the region on success or kAllocationFailure.
|
||||
Address AllocateRegion(size_t size);
|
||||
// Same as above but tries to randomize the region displacement.
|
||||
Address AllocateRegion(RandomNumberGenerator* rng, size_t size);
|
||||
|
||||
// Allocates region of |size| at |requested_address| if it's free. Both the
|
||||
// address and the size must be |min_region_size|-aligned. On success returns
|
||||
// true.
|
||||
// This kind of allocation is supposed to be used during setup phase to mark
|
||||
// certain regions as used or for randomizing regions displacement.
|
||||
bool AllocateRegionAt(Address requested_address, size_t size);
|
||||
|
||||
// Frees region at given |address|, returns the size of the region.
|
||||
// The region must be previously allocated. Return 0 on failure.
|
||||
size_t FreeRegion(Address address);
|
||||
|
||||
Address begin() const { return whole_region_.begin(); }
|
||||
Address end() const { return whole_region_.end(); }
|
||||
size_t size() const { return whole_region_.size(); }
|
||||
|
||||
// Total size of not yet aquired regions.
|
||||
size_t free_size() const { return free_size_; }
|
||||
|
||||
void Print(std::ostream& os) const;
|
||||
|
||||
private:
|
||||
class Region {
|
||||
public:
|
||||
Address begin() const { return address_; }
|
||||
Address end() const { return address_ + size_; }
|
||||
|
||||
size_t size() const { return size_; }
|
||||
void set_size(size_t size) { size_ = size; }
|
||||
|
||||
bool contains(Address address) const {
|
||||
STATIC_ASSERT(std::is_unsigned<Address>::value);
|
||||
return (address - begin()) < size();
|
||||
}
|
||||
|
||||
bool is_used() const { return is_used_; }
|
||||
void set_is_used(bool used) { is_used_ = used; }
|
||||
|
||||
Region(Address address, size_t size, bool is_used)
|
||||
: address_(address), size_(size), is_used_(is_used) {}
|
||||
|
||||
void Print(std::ostream& os) const;
|
||||
|
||||
private:
|
||||
Address address_;
|
||||
size_t size_;
|
||||
bool is_used_;
|
||||
};
|
||||
|
||||
// The whole region.
|
||||
const Region whole_region_;
|
||||
|
||||
// Number of |min_region_size_| in the whole region.
|
||||
const size_t region_size_in_min_regions_;
|
||||
|
||||
// If the free size is less than this value - stop trying to randomize the
|
||||
// allocation addresses.
|
||||
const size_t max_load_for_randomization_;
|
||||
|
||||
// Size of all free regions.
|
||||
size_t free_size_;
|
||||
|
||||
// Minimum region size. Must be a pow of 2.
|
||||
const size_t min_region_size_;
|
||||
|
||||
struct AddressEndOrder {
|
||||
bool operator()(const Region* a, const Region* b) const {
|
||||
return a->end() < b->end();
|
||||
}
|
||||
};
|
||||
// All regions ordered by addresses.
|
||||
typedef std::set<Region*, AddressEndOrder> AllRegionsSet;
|
||||
AllRegionsSet all_regions_;
|
||||
|
||||
struct SizeAddressOrder {
|
||||
bool operator()(const Region* a, const Region* b) const {
|
||||
if (a->size() != b->size()) return a->size() < b->size();
|
||||
return a->begin() < b->begin();
|
||||
}
|
||||
};
|
||||
// Free regions ordered by sizes and addresses.
|
||||
std::set<Region*, SizeAddressOrder> free_regions_;
|
||||
|
||||
// Returns region containing given address or nullptr.
|
||||
AllRegionsSet::iterator FindRegion(Address address);
|
||||
|
||||
// Adds given region to the set of free regions.
|
||||
void FreeListAddRegion(Region* region);
|
||||
|
||||
// Finds best-fit free region for given size.
|
||||
Region* FreeListFindRegion(size_t size);
|
||||
|
||||
// Removes given region from the set of free regions.
|
||||
void FreeListRemoveRegion(Region* region);
|
||||
|
||||
// Splits given |region| into two: one of |new_size| size and a new one
|
||||
// having the rest. The new region is returned.
|
||||
Region* Split(Region* region, size_t new_size);
|
||||
|
||||
// For two coalescing regions merges |next| to |prev| and deletes |next|.
|
||||
void Merge(AllRegionsSet::iterator prev_iter,
|
||||
AllRegionsSet::iterator next_iter);
|
||||
|
||||
FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom);
|
||||
FRIEND_TEST(RegionAllocatorTest, Fragmentation);
|
||||
FRIEND_TEST(RegionAllocatorTest, FindRegion);
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASE_REGION_ALLOCATOR_H_
|
@ -72,6 +72,7 @@ v8_source_set("unittests_sources") {
|
||||
"base/platform/platform-unittest.cc",
|
||||
"base/platform/semaphore-unittest.cc",
|
||||
"base/platform/time-unittest.cc",
|
||||
"base/region-allocator-unittest.cc",
|
||||
"base/sys-info-unittest.cc",
|
||||
"base/template-utils-unittest.cc",
|
||||
"base/utils/random-number-generator-unittest.cc",
|
||||
|
326
test/unittests/base/region-allocator-unittest.cc
Normal file
326
test/unittests/base/region-allocator-unittest.cc
Normal file
@ -0,0 +1,326 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/base/region-allocator.h"
|
||||
#include "test/unittests/test-utils.h"
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
using Address = RegionAllocator::Address;
|
||||
using v8::internal::KB;
|
||||
using v8::internal::MB;
|
||||
|
||||
class RegionAllocatorTest : public ::testing::TestWithParam<int> {};
|
||||
|
||||
TEST(RegionAllocatorTest, SimpleAllocateRegionAt) {
|
||||
const size_t kPageSize = 4 * KB;
|
||||
const size_t kPageCount = 16;
|
||||
const size_t kSize = kPageSize * kPageCount;
|
||||
const Address kBegin = static_cast<Address>(kPageSize * 153);
|
||||
const Address kEnd = kBegin + kSize;
|
||||
|
||||
RegionAllocator ra(kBegin, kSize, kPageSize);
|
||||
|
||||
// Allocate the whole region.
|
||||
for (Address address = kBegin; address < kEnd; address += kPageSize) {
|
||||
CHECK_EQ(ra.free_size(), kEnd - address);
|
||||
CHECK(ra.AllocateRegionAt(address, kPageSize));
|
||||
}
|
||||
|
||||
// No free regions left, the allocation should fail.
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
|
||||
|
||||
// Free one region and then the allocation should succeed.
|
||||
CHECK_EQ(ra.FreeRegion(kBegin), kPageSize);
|
||||
CHECK_EQ(ra.free_size(), kPageSize);
|
||||
CHECK(ra.AllocateRegionAt(kBegin, kPageSize));
|
||||
|
||||
// Free all the pages.
|
||||
for (Address address = kBegin; address < kEnd; address += kPageSize) {
|
||||
CHECK_EQ(ra.FreeRegion(address), kPageSize);
|
||||
}
|
||||
|
||||
// Check that the whole region is free and can be fully allocated.
|
||||
CHECK_EQ(ra.free_size(), kSize);
|
||||
CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
|
||||
}
|
||||
|
||||
TEST(RegionAllocatorTest, SimpleAllocateRegion) {
|
||||
const size_t kPageSize = 4 * KB;
|
||||
const size_t kPageCount = 16;
|
||||
const size_t kSize = kPageSize * kPageCount;
|
||||
const Address kBegin = static_cast<Address>(kPageSize * 153);
|
||||
const Address kEnd = kBegin + kSize;
|
||||
|
||||
RegionAllocator ra(kBegin, kSize, kPageSize);
|
||||
|
||||
// Allocate the whole region.
|
||||
for (size_t i = 0; i < kPageCount; i++) {
|
||||
CHECK_EQ(ra.free_size(), kSize - kPageSize * i);
|
||||
Address address = ra.AllocateRegion(kPageSize);
|
||||
CHECK_NE(address, RegionAllocator::kAllocationFailure);
|
||||
CHECK_EQ(address, kBegin + kPageSize * i);
|
||||
}
|
||||
|
||||
// No free regions left, the allocation should fail.
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
|
||||
|
||||
// Try to free one page and ensure that we are able to allocate it again.
|
||||
for (Address address = kBegin; address < kEnd; address += kPageSize) {
|
||||
CHECK_EQ(ra.FreeRegion(address), kPageSize);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), address);
|
||||
}
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
}
|
||||
|
||||
TEST_P(RegionAllocatorTest, AllocateRegionRandom) {
|
||||
const size_t kPageSize = 8 * KB;
|
||||
const size_t kPageCountLog = 16;
|
||||
const size_t kPageCount = (size_t{1} << kPageCountLog);
|
||||
const size_t kSize = kPageSize * kPageCount;
|
||||
const Address kBegin = static_cast<Address>(153 * MB);
|
||||
const Address kEnd = kBegin + kSize;
|
||||
|
||||
base::RandomNumberGenerator rng(GetParam());
|
||||
RegionAllocator ra(kBegin, kSize, kPageSize);
|
||||
|
||||
std::set<Address> allocated_pages;
|
||||
// The page addresses must be randomized this number of allocated pages.
|
||||
const size_t kRandomizationLimit = ra.max_load_for_randomization_ / kPageSize;
|
||||
CHECK_LT(kRandomizationLimit, kPageCount);
|
||||
|
||||
Address last_address = kBegin;
|
||||
bool saw_randomized_pages = false;
|
||||
|
||||
for (size_t i = 0; i < kPageCount; i++) {
|
||||
Address address = ra.AllocateRegion(&rng, kPageSize);
|
||||
CHECK_NE(address, RegionAllocator::kAllocationFailure);
|
||||
CHECK(IsAligned(address, kPageSize));
|
||||
CHECK_LE(kBegin, address);
|
||||
CHECK_LT(address, kEnd);
|
||||
CHECK_EQ(allocated_pages.find(address), allocated_pages.end());
|
||||
allocated_pages.insert(address);
|
||||
|
||||
saw_randomized_pages |= (address < last_address);
|
||||
last_address = address;
|
||||
|
||||
if (i == kRandomizationLimit) {
|
||||
// We must evidence allocation randomization till this point.
|
||||
// The rest of the allocations may still be randomized depending on
|
||||
// the free ranges distribution, however it is not guaranteed.
|
||||
CHECK(saw_randomized_pages);
|
||||
}
|
||||
}
|
||||
|
||||
// No free regions left, the allocation should fail.
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
|
||||
}
|
||||
|
||||
TEST(RegionAllocatorTest, AllocateBigRegions) {
|
||||
const size_t kPageSize = 4 * KB;
|
||||
const size_t kPageCountLog = 10;
|
||||
const size_t kPageCount = (size_t{1} << kPageCountLog) - 1;
|
||||
const size_t kSize = kPageSize * kPageCount;
|
||||
const Address kBegin = static_cast<Address>(kPageSize * 153);
|
||||
|
||||
RegionAllocator ra(kBegin, kSize, kPageSize);
|
||||
|
||||
// Allocate the whole region.
|
||||
for (size_t i = 0; i < kPageCountLog; i++) {
|
||||
Address address = ra.AllocateRegion(kPageSize * (size_t{1} << i));
|
||||
CHECK_NE(address, RegionAllocator::kAllocationFailure);
|
||||
CHECK_EQ(address, kBegin + kPageSize * ((size_t{1} << i) - 1));
|
||||
}
|
||||
|
||||
// No free regions left, the allocation should fail.
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
|
||||
|
||||
// Try to free one page and ensure that we are able to allocate it again.
|
||||
for (size_t i = 0; i < kPageCountLog; i++) {
|
||||
const size_t size = kPageSize * (size_t{1} << i);
|
||||
Address address = kBegin + kPageSize * ((size_t{1} << i) - 1);
|
||||
CHECK_EQ(ra.FreeRegion(address), size);
|
||||
CHECK_EQ(ra.AllocateRegion(size), address);
|
||||
}
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
}
|
||||
|
||||
TEST(RegionAllocatorTest, MergeLeftToRightCoalecsingRegions) {
|
||||
const size_t kPageSize = 4 * KB;
|
||||
const size_t kPageCountLog = 10;
|
||||
const size_t kPageCount = (size_t{1} << kPageCountLog);
|
||||
const size_t kSize = kPageSize * kPageCount;
|
||||
const Address kBegin = static_cast<Address>(kPageSize * 153);
|
||||
|
||||
RegionAllocator ra(kBegin, kSize, kPageSize);
|
||||
|
||||
// Allocate the whole region using the following page size pattern:
|
||||
// |0|1|22|3333|...
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), kBegin);
|
||||
for (size_t i = 0; i < kPageCountLog; i++) {
|
||||
Address address = ra.AllocateRegion(kPageSize * (size_t{1} << i));
|
||||
CHECK_NE(address, RegionAllocator::kAllocationFailure);
|
||||
CHECK_EQ(address, kBegin + kPageSize * (size_t{1} << i));
|
||||
}
|
||||
|
||||
// No free regions left, the allocation should fail.
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
|
||||
|
||||
// Try to free two coalescing regions and ensure the new page of bigger size
|
||||
// can be allocated.
|
||||
size_t current_size = kPageSize;
|
||||
for (size_t i = 0; i < kPageCountLog; i++) {
|
||||
CHECK_EQ(ra.FreeRegion(kBegin), current_size);
|
||||
CHECK_EQ(ra.FreeRegion(kBegin + current_size), current_size);
|
||||
current_size += current_size;
|
||||
CHECK_EQ(ra.AllocateRegion(current_size), kBegin);
|
||||
}
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
}
|
||||
|
||||
TEST_P(RegionAllocatorTest, MergeRightToLeftCoalecsingRegions) {
|
||||
base::RandomNumberGenerator rng(GetParam());
|
||||
const size_t kPageSize = 4 * KB;
|
||||
const size_t kPageCountLog = 10;
|
||||
const size_t kPageCount = (size_t{1} << kPageCountLog);
|
||||
const size_t kSize = kPageSize * kPageCount;
|
||||
const Address kBegin = static_cast<Address>(kPageSize * 153);
|
||||
|
||||
RegionAllocator ra(kBegin, kSize, kPageSize);
|
||||
|
||||
// Allocate the whole region.
|
||||
for (size_t i = 0; i < kPageCount; i++) {
|
||||
Address address = ra.AllocateRegion(kPageSize);
|
||||
CHECK_NE(address, RegionAllocator::kAllocationFailure);
|
||||
CHECK_EQ(address, kBegin + kPageSize * i);
|
||||
}
|
||||
|
||||
// No free regions left, the allocation should fail.
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
|
||||
|
||||
// Free pages with even indices left-to-right.
|
||||
for (size_t i = 0; i < kPageCount; i += 2) {
|
||||
Address address = kBegin + kPageSize * i;
|
||||
CHECK_EQ(ra.FreeRegion(address), kPageSize);
|
||||
}
|
||||
|
||||
// Free pages with odd indices right-to-left.
|
||||
for (size_t i = 1; i < kPageCount; i += 2) {
|
||||
Address address = kBegin + kPageSize * (kPageCount - i);
|
||||
CHECK_EQ(ra.FreeRegion(address), kPageSize);
|
||||
// Now we should be able to allocate a double-sized page.
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize * 2), address - kPageSize);
|
||||
// .. but there's a window for only one such page.
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize * 2),
|
||||
RegionAllocator::kAllocationFailure);
|
||||
}
|
||||
|
||||
// Free all the double-sized pages.
|
||||
for (size_t i = 0; i < kPageCount; i += 2) {
|
||||
Address address = kBegin + kPageSize * i;
|
||||
CHECK_EQ(ra.FreeRegion(address), kPageSize * 2);
|
||||
}
|
||||
|
||||
// Check that the whole region is free and can be fully allocated.
|
||||
CHECK_EQ(ra.free_size(), kSize);
|
||||
CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
|
||||
}
|
||||
|
||||
TEST(RegionAllocatorTest, Fragmentation) {
|
||||
const size_t kPageSize = 64 * KB;
|
||||
const size_t kPageCount = 9;
|
||||
const size_t kSize = kPageSize * kPageCount;
|
||||
const Address kBegin = static_cast<Address>(kPageSize * 153);
|
||||
|
||||
RegionAllocator ra(kBegin, kSize, kPageSize);
|
||||
|
||||
// Allocate the whole region.
|
||||
for (size_t i = 0; i < kPageCount; i++) {
|
||||
Address address = ra.AllocateRegion(kPageSize);
|
||||
CHECK_NE(address, RegionAllocator::kAllocationFailure);
|
||||
CHECK_EQ(address, kBegin + kPageSize * i);
|
||||
}
|
||||
|
||||
// No free regions left, the allocation should fail.
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
|
||||
|
||||
// Free pages in the following order and check the freed size.
|
||||
struct {
|
||||
size_t page_index_to_free;
|
||||
size_t expected_page_count;
|
||||
} testcase[] = { // .........
|
||||
{0, 9}, // x........
|
||||
{2, 9}, // x.x......
|
||||
{4, 9}, // x.x.x....
|
||||
{6, 9}, // x.x.x.x..
|
||||
{8, 9}, // x.x.x.x.x
|
||||
{1, 7}, // xxx.x.x.x
|
||||
{7, 5}, // xxx.x.xxx
|
||||
{3, 3}, // xxxxx.xxx
|
||||
{5, 1}}; // xxxxxxxxx
|
||||
CHECK_EQ(kPageCount, arraysize(testcase));
|
||||
|
||||
CHECK_EQ(ra.all_regions_.size(), kPageCount);
|
||||
for (size_t i = 0; i < kPageCount; i++) {
|
||||
Address address = kBegin + kPageSize * testcase[i].page_index_to_free;
|
||||
CHECK_EQ(ra.FreeRegion(address), kPageSize);
|
||||
CHECK_EQ(ra.all_regions_.size(), testcase[i].expected_page_count);
|
||||
}
|
||||
|
||||
// Check that the whole region is free and can be fully allocated.
|
||||
CHECK_EQ(ra.free_size(), kSize);
|
||||
CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
|
||||
}
|
||||
|
||||
TEST(RegionAllocatorTest, FindRegion) {
|
||||
const size_t kPageSize = 4 * KB;
|
||||
const size_t kPageCount = 16;
|
||||
const size_t kSize = kPageSize * kPageCount;
|
||||
const Address kBegin = static_cast<Address>(kPageSize * 153);
|
||||
const Address kEnd = kBegin + kSize;
|
||||
|
||||
RegionAllocator ra(kBegin, kSize, kPageSize);
|
||||
|
||||
// Allocate the whole region.
|
||||
for (Address address = kBegin; address < kEnd; address += kPageSize) {
|
||||
CHECK_EQ(ra.free_size(), kEnd - address);
|
||||
CHECK(ra.AllocateRegionAt(address, kPageSize));
|
||||
}
|
||||
|
||||
// No free regions left, the allocation should fail.
|
||||
CHECK_EQ(ra.free_size(), 0);
|
||||
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
|
||||
|
||||
// The out-of region requests must return end iterator.
|
||||
CHECK_EQ(ra.FindRegion(kBegin - 1), ra.all_regions_.end());
|
||||
CHECK_EQ(ra.FindRegion(kBegin - kPageSize), ra.all_regions_.end());
|
||||
CHECK_EQ(ra.FindRegion(kBegin / 2), ra.all_regions_.end());
|
||||
CHECK_EQ(ra.FindRegion(kEnd), ra.all_regions_.end());
|
||||
CHECK_EQ(ra.FindRegion(kEnd + kPageSize), ra.all_regions_.end());
|
||||
CHECK_EQ(ra.FindRegion(kEnd * 2), ra.all_regions_.end());
|
||||
|
||||
for (Address address = kBegin; address < kEnd; address += kPageSize / 4) {
|
||||
RegionAllocator::AllRegionsSet::iterator region_iter =
|
||||
ra.FindRegion(address);
|
||||
CHECK_NE(region_iter, ra.all_regions_.end());
|
||||
RegionAllocator::Region* region = *region_iter;
|
||||
Address region_start = RoundDown(address, kPageSize);
|
||||
CHECK_EQ(region->begin(), region_start);
|
||||
CHECK_LE(region->begin(), address);
|
||||
CHECK_LT(address, region->end());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
Loading…
Reference in New Issue
Block a user