cppgc: young-gen: Reuse SlotSet.
The CL sligthly generalizes SlotSet by parameterizing it with slot size. SlotSet is abstracted into BasicSlotSet, which is moved to heap::base::. V8 GC related parts stay in slot-set.h Bug: chromium:1029379 Change-Id: I093332b77682d2b31e61a91d4b0110fa95b5c908 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3695595 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Commit-Queue: Anton Bikineev <bikineev@chromium.org> Cr-Commit-Position: refs/heads/main@{#83553}
This commit is contained in:
parent
524e681b5d
commit
fec788a0f9
@ -3132,6 +3132,7 @@ filegroup(
|
||||
srcs = [
|
||||
"src/heap/base/active-system-pages.cc",
|
||||
"src/heap/base/active-system-pages.h",
|
||||
"src/heap/base/basic-slot-set.h",
|
||||
"src/heap/base/stack.cc",
|
||||
"src/heap/base/stack.h",
|
||||
"src/heap/base/worklist.cc",
|
||||
|
1
BUILD.gn
1
BUILD.gn
@ -5710,6 +5710,7 @@ v8_source_set("v8_bigint") {
|
||||
v8_header_set("v8_heap_base_headers") {
|
||||
sources = [
|
||||
"src/heap/base/active-system-pages.h",
|
||||
"src/heap/base/basic-slot-set.h",
|
||||
"src/heap/base/stack.h",
|
||||
"src/heap/base/worklist.h",
|
||||
]
|
||||
|
464
src/heap/base/basic-slot-set.h
Normal file
464
src/heap/base/basic-slot-set.h
Normal file
@ -0,0 +1,464 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_BASE_BASIC_SLOT_SET_H_
|
||||
#define V8_HEAP_BASE_BASIC_SLOT_SET_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
#include "src/base/atomic-utils.h"
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/platform/memory.h"
|
||||
|
||||
namespace heap {
|
||||
namespace base {
|
||||
|
||||
enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
|
||||
|
||||
// Data structure for maintaining a set of slots in a standard (non-large)
|
||||
// page.
|
||||
// The data structure assumes that the slots are pointer size aligned and
|
||||
// splits the valid slot offset range into buckets.
|
||||
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
|
||||
template <size_t SlotGranularity>
|
||||
class BasicSlotSet {
|
||||
static constexpr auto kSystemPointerSize = sizeof(void*);
|
||||
|
||||
public:
|
||||
using Address = uintptr_t;
|
||||
|
||||
enum AccessMode : uint8_t {
|
||||
ATOMIC,
|
||||
NON_ATOMIC,
|
||||
};
|
||||
|
||||
enum EmptyBucketMode {
|
||||
FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
|
||||
KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
|
||||
};
|
||||
|
||||
BasicSlotSet() = delete;
|
||||
|
||||
static BasicSlotSet* Allocate(size_t buckets) {
|
||||
// BasicSlotSet* slot_set --+
|
||||
// |
|
||||
// v
|
||||
// +-----------------+-------------------------+
|
||||
// | initial buckets | buckets array |
|
||||
// +-----------------+-------------------------+
|
||||
// pointer-sized pointer-sized * buckets
|
||||
//
|
||||
//
|
||||
// The BasicSlotSet pointer points to the beginning of the buckets array for
|
||||
// faster access in the write barrier. The number of buckets is needed for
|
||||
// calculating the size of this data structure.
|
||||
size_t buckets_size = buckets * sizeof(Bucket*);
|
||||
size_t size = kInitialBucketsSize + buckets_size;
|
||||
void* allocation = v8::base::AlignedAlloc(size, kSystemPointerSize);
|
||||
CHECK(allocation);
|
||||
BasicSlotSet* slot_set = reinterpret_cast<BasicSlotSet*>(
|
||||
reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
|
||||
DCHECK(
|
||||
IsAligned(reinterpret_cast<uintptr_t>(slot_set), kSystemPointerSize));
|
||||
#ifdef DEBUG
|
||||
*slot_set->initial_buckets() = buckets;
|
||||
#endif
|
||||
for (size_t i = 0; i < buckets; i++) {
|
||||
*slot_set->bucket(i) = nullptr;
|
||||
}
|
||||
return slot_set;
|
||||
}
|
||||
|
||||
static void Delete(BasicSlotSet* slot_set, size_t buckets) {
|
||||
if (slot_set == nullptr) return;
|
||||
|
||||
for (size_t i = 0; i < buckets; i++) {
|
||||
slot_set->ReleaseBucket(i);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
size_t initial_buckets = *slot_set->initial_buckets();
|
||||
|
||||
for (size_t i = buckets; i < initial_buckets; i++) {
|
||||
DCHECK_NULL(*slot_set->bucket(i));
|
||||
}
|
||||
#endif
|
||||
|
||||
v8::base::AlignedFree(reinterpret_cast<uint8_t*>(slot_set) -
|
||||
kInitialBucketsSize);
|
||||
}
|
||||
|
||||
constexpr static size_t BucketsForSize(size_t size) {
|
||||
return (size + (SlotGranularity * kBitsPerBucket) - 1) /
|
||||
(SlotGranularity * kBitsPerBucket);
|
||||
}
|
||||
|
||||
// Converts the slot offset into bucket index.
|
||||
constexpr static size_t BucketForSlot(size_t slot_offset) {
|
||||
DCHECK(IsAligned(slot_offset, SlotGranularity));
|
||||
return slot_offset / (SlotGranularity * kBitsPerBucket);
|
||||
}
|
||||
|
||||
// The slot offset specifies a slot at address page_start_ + slot_offset.
|
||||
// AccessMode defines whether there can be concurrent access on the buckets
|
||||
// or not.
|
||||
template <AccessMode access_mode>
|
||||
void Insert(size_t slot_offset) {
|
||||
size_t bucket_index;
|
||||
int cell_index, bit_index;
|
||||
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
|
||||
Bucket* bucket = LoadBucket<access_mode>(bucket_index);
|
||||
if (bucket == nullptr) {
|
||||
bucket = new Bucket;
|
||||
if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
|
||||
delete bucket;
|
||||
bucket = LoadBucket<access_mode>(bucket_index);
|
||||
}
|
||||
}
|
||||
// Check that monotonicity is preserved, i.e., once a bucket is set we do
|
||||
// not free it concurrently.
|
||||
DCHECK(bucket != nullptr);
|
||||
DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
|
||||
uint32_t mask = 1u << bit_index;
|
||||
if ((bucket->template LoadCell<access_mode>(cell_index) & mask) == 0) {
|
||||
bucket->template SetCellBits<access_mode>(cell_index, mask);
|
||||
}
|
||||
}
|
||||
|
||||
// The slot offset specifies a slot at address page_start_ + slot_offset.
|
||||
// Returns true if the set contains the slot.
|
||||
bool Contains(size_t slot_offset) {
|
||||
size_t bucket_index;
|
||||
int cell_index, bit_index;
|
||||
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
|
||||
Bucket* bucket = LoadBucket(bucket_index);
|
||||
if (bucket == nullptr) return false;
|
||||
return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
|
||||
}
|
||||
|
||||
// The slot offset specifies a slot at address page_start_ + slot_offset.
|
||||
void Remove(size_t slot_offset) {
|
||||
size_t bucket_index;
|
||||
int cell_index, bit_index;
|
||||
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
|
||||
Bucket* bucket = LoadBucket(bucket_index);
|
||||
if (bucket != nullptr) {
|
||||
uint32_t cell = bucket->LoadCell(cell_index);
|
||||
uint32_t bit_mask = 1u << bit_index;
|
||||
if (cell & bit_mask) {
|
||||
bucket->ClearCellBits(cell_index, bit_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The slot offsets specify a range of slots at addresses:
|
||||
// [page_start_ + start_offset ... page_start_ + end_offset).
|
||||
void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
|
||||
EmptyBucketMode mode) {
|
||||
CHECK_LE(end_offset, buckets * kBitsPerBucket * SlotGranularity);
|
||||
DCHECK_LE(start_offset, end_offset);
|
||||
size_t start_bucket;
|
||||
int start_cell, start_bit;
|
||||
SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
|
||||
size_t end_bucket;
|
||||
int end_cell, end_bit;
|
||||
SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
|
||||
uint32_t start_mask = (1u << start_bit) - 1;
|
||||
uint32_t end_mask = ~((1u << end_bit) - 1);
|
||||
Bucket* bucket;
|
||||
if (start_bucket == end_bucket && start_cell == end_cell) {
|
||||
bucket = LoadBucket(start_bucket);
|
||||
if (bucket != nullptr) {
|
||||
bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
|
||||
}
|
||||
return;
|
||||
}
|
||||
size_t current_bucket = start_bucket;
|
||||
int current_cell = start_cell;
|
||||
bucket = LoadBucket(current_bucket);
|
||||
if (bucket != nullptr) {
|
||||
bucket->ClearCellBits(current_cell, ~start_mask);
|
||||
}
|
||||
current_cell++;
|
||||
if (current_bucket < end_bucket) {
|
||||
if (bucket != nullptr) {
|
||||
ClearBucket(bucket, current_cell, kCellsPerBucket);
|
||||
}
|
||||
// The rest of the current bucket is cleared.
|
||||
// Move on to the next bucket.
|
||||
current_bucket++;
|
||||
current_cell = 0;
|
||||
}
|
||||
DCHECK(current_bucket == end_bucket ||
|
||||
(current_bucket < end_bucket && current_cell == 0));
|
||||
while (current_bucket < end_bucket) {
|
||||
if (mode == FREE_EMPTY_BUCKETS) {
|
||||
ReleaseBucket(current_bucket);
|
||||
} else {
|
||||
DCHECK(mode == KEEP_EMPTY_BUCKETS);
|
||||
bucket = LoadBucket(current_bucket);
|
||||
if (bucket != nullptr) {
|
||||
ClearBucket(bucket, 0, kCellsPerBucket);
|
||||
}
|
||||
}
|
||||
current_bucket++;
|
||||
}
|
||||
// All buckets between start_bucket and end_bucket are cleared.
|
||||
DCHECK(current_bucket == end_bucket);
|
||||
if (current_bucket == buckets) return;
|
||||
bucket = LoadBucket(current_bucket);
|
||||
DCHECK(current_cell <= end_cell);
|
||||
if (bucket == nullptr) return;
|
||||
while (current_cell < end_cell) {
|
||||
bucket->StoreCell(current_cell, 0);
|
||||
current_cell++;
|
||||
}
|
||||
// All cells between start_cell and end_cell are cleared.
|
||||
DCHECK(current_bucket == end_bucket && current_cell == end_cell);
|
||||
bucket->ClearCellBits(end_cell, ~end_mask);
|
||||
}
|
||||
|
||||
// The slot offset specifies a slot at address page_start_ + slot_offset.
|
||||
bool Lookup(size_t slot_offset) {
|
||||
size_t bucket_index;
|
||||
int cell_index, bit_index;
|
||||
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
|
||||
Bucket* bucket = LoadBucket(bucket_index);
|
||||
if (bucket == nullptr) return false;
|
||||
return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
|
||||
}
|
||||
|
||||
// Iterate over all slots in the set and for each slot invoke the callback.
|
||||
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
|
||||
// Returns the new number of slots.
|
||||
//
|
||||
// Iteration can be performed concurrently with other operations that use
|
||||
// atomic access mode such as insertion and removal. However there is no
|
||||
// guarantee about ordering and linearizability.
|
||||
//
|
||||
// Sample usage:
|
||||
// Iterate([](Address slot) {
|
||||
// if (good(slot)) return KEEP_SLOT;
|
||||
// else return REMOVE_SLOT;
|
||||
// });
|
||||
//
|
||||
// Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
|
||||
template <typename Callback>
|
||||
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
|
||||
Callback callback, EmptyBucketMode mode) {
|
||||
return Iterate(chunk_start, start_bucket, end_bucket, callback,
|
||||
[this, mode](size_t bucket_index) {
|
||||
if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
|
||||
ReleaseBucket(bucket_index);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
bool FreeEmptyBuckets(size_t buckets) {
|
||||
bool empty = true;
|
||||
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
|
||||
if (!FreeBucketIfEmpty(bucket_index)) {
|
||||
empty = false;
|
||||
}
|
||||
}
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
||||
static const int kCellsPerBucket = 32;
|
||||
static const int kCellsPerBucketLog2 = 5;
|
||||
static const int kCellSizeBytesLog2 = 2;
|
||||
static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
|
||||
static const int kBitsPerCell = 32;
|
||||
static const int kBitsPerCellLog2 = 5;
|
||||
static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
|
||||
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
|
||||
|
||||
class Bucket final {
|
||||
uint32_t cells_[kCellsPerBucket];
|
||||
|
||||
public:
|
||||
Bucket() {
|
||||
for (int i = 0; i < kCellsPerBucket; i++) {
|
||||
cells_[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t* cells() { return cells_; }
|
||||
uint32_t* cell(int cell_index) { return cells() + cell_index; }
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
uint32_t LoadCell(int cell_index) {
|
||||
DCHECK_LT(cell_index, kCellsPerBucket);
|
||||
if (access_mode == AccessMode::ATOMIC)
|
||||
return v8::base::AsAtomic32::Acquire_Load(cells() + cell_index);
|
||||
return *(cells() + cell_index);
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
void SetCellBits(int cell_index, uint32_t mask) {
|
||||
if (access_mode == AccessMode::ATOMIC) {
|
||||
v8::base::AsAtomic32::SetBits(cell(cell_index), mask, mask);
|
||||
} else {
|
||||
uint32_t* c = cell(cell_index);
|
||||
*c = (*c & ~mask) | mask;
|
||||
}
|
||||
}
|
||||
|
||||
void ClearCellBits(int cell_index, uint32_t mask) {
|
||||
v8::base::AsAtomic32::SetBits(cell(cell_index), 0u, mask);
|
||||
}
|
||||
|
||||
void StoreCell(int cell_index, uint32_t value) {
|
||||
v8::base::AsAtomic32::Release_Store(cell(cell_index), value);
|
||||
}
|
||||
|
||||
bool IsEmpty() {
|
||||
for (int i = 0; i < kCellsPerBucket; i++) {
|
||||
if (cells_[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
protected:
|
||||
template <typename Callback, typename EmptyBucketCallback>
|
||||
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
|
||||
Callback callback, EmptyBucketCallback empty_bucket_callback) {
|
||||
size_t new_count = 0;
|
||||
for (size_t bucket_index = start_bucket; bucket_index < end_bucket;
|
||||
bucket_index++) {
|
||||
Bucket* bucket = LoadBucket(bucket_index);
|
||||
if (bucket != nullptr) {
|
||||
size_t in_bucket_count = 0;
|
||||
size_t cell_offset = bucket_index << kBitsPerBucketLog2;
|
||||
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
|
||||
uint32_t cell = bucket->LoadCell(i);
|
||||
if (cell) {
|
||||
uint32_t old_cell = cell;
|
||||
uint32_t mask = 0;
|
||||
while (cell) {
|
||||
int bit_offset = v8::base::bits::CountTrailingZeros(cell);
|
||||
uint32_t bit_mask = 1u << bit_offset;
|
||||
Address slot = (cell_offset + bit_offset) * SlotGranularity;
|
||||
if (callback(chunk_start + slot) == KEEP_SLOT) {
|
||||
++in_bucket_count;
|
||||
} else {
|
||||
mask |= bit_mask;
|
||||
}
|
||||
cell ^= bit_mask;
|
||||
}
|
||||
uint32_t new_cell = old_cell & ~mask;
|
||||
if (old_cell != new_cell) {
|
||||
bucket->ClearCellBits(i, mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (in_bucket_count == 0) {
|
||||
empty_bucket_callback(bucket_index);
|
||||
}
|
||||
new_count += in_bucket_count;
|
||||
}
|
||||
}
|
||||
return new_count;
|
||||
}
|
||||
|
||||
bool FreeBucketIfEmpty(size_t bucket_index) {
|
||||
Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
|
||||
if (bucket != nullptr) {
|
||||
if (bucket->IsEmpty()) {
|
||||
ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
|
||||
DCHECK_GE(start_cell, 0);
|
||||
DCHECK_LE(end_cell, kCellsPerBucket);
|
||||
int current_cell = start_cell;
|
||||
while (current_cell < kCellsPerBucket) {
|
||||
bucket->StoreCell(current_cell, 0);
|
||||
current_cell++;
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
void ReleaseBucket(size_t bucket_index) {
|
||||
Bucket* bucket = LoadBucket<access_mode>(bucket_index);
|
||||
StoreBucket<access_mode>(bucket_index, nullptr);
|
||||
delete bucket;
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
Bucket* LoadBucket(Bucket** bucket) {
|
||||
if (access_mode == AccessMode::ATOMIC)
|
||||
return v8::base::AsAtomicPointer::Acquire_Load(bucket);
|
||||
return *bucket;
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
Bucket* LoadBucket(size_t bucket_index) {
|
||||
return LoadBucket(bucket(bucket_index));
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
void StoreBucket(Bucket** bucket, Bucket* value) {
|
||||
if (access_mode == AccessMode::ATOMIC) {
|
||||
v8::base::AsAtomicPointer::Release_Store(bucket, value);
|
||||
} else {
|
||||
*bucket = value;
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
void StoreBucket(size_t bucket_index, Bucket* value) {
|
||||
StoreBucket(bucket(bucket_index), value);
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
|
||||
Bucket** b = bucket(bucket_index);
|
||||
if (access_mode == AccessMode::ATOMIC) {
|
||||
return v8::base::AsAtomicPointer::Release_CompareAndSwap(
|
||||
b, nullptr, value) == nullptr;
|
||||
} else {
|
||||
DCHECK_NULL(*b);
|
||||
*b = value;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Converts the slot offset into bucket/cell/bit index.
|
||||
static void SlotToIndices(size_t slot_offset, size_t* bucket_index,
|
||||
int* cell_index, int* bit_index) {
|
||||
DCHECK(IsAligned(slot_offset, SlotGranularity));
|
||||
size_t slot = slot_offset / SlotGranularity;
|
||||
*bucket_index = slot >> kBitsPerBucketLog2;
|
||||
*cell_index =
|
||||
static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
|
||||
*bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
|
||||
}
|
||||
|
||||
Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
|
||||
Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
|
||||
|
||||
#ifdef DEBUG
|
||||
size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
|
||||
static const int kInitialBucketsSize = sizeof(size_t);
|
||||
#else
|
||||
static const int kInitialBucketsSize = 0;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
} // namespace heap
|
||||
|
||||
#endif // V8_HEAP_BASE_BASIC_SLOT_SET_H_
|
@ -31,7 +31,10 @@ class RememberedSetOperations {
|
||||
static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
|
||||
DCHECK(chunk->Contains(slot_addr));
|
||||
uintptr_t offset = slot_addr - chunk->address();
|
||||
slot_set->Insert<access_mode>(offset);
|
||||
slot_set->Insert<access_mode == v8::internal::AccessMode::ATOMIC
|
||||
? v8::internal::SlotSet::AccessMode::ATOMIC
|
||||
: v8::internal::SlotSet::AccessMode::NON_ATOMIC>(
|
||||
offset);
|
||||
}
|
||||
|
||||
template <typename Callback>
|
||||
|
@ -10,9 +10,8 @@
|
||||
#include <stack>
|
||||
#include <vector>
|
||||
|
||||
#include "src/base/atomic-utils.h"
|
||||
#include "src/base/bit-field.h"
|
||||
#include "src/base/bits.h"
|
||||
#include "src/heap/base/basic-slot-set.h"
|
||||
#include "src/objects/compressed-slots.h"
|
||||
#include "src/objects/slots.h"
|
||||
#include "src/utils/allocation.h"
|
||||
@ -22,7 +21,9 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
|
||||
using ::heap::base::KEEP_SLOT;
|
||||
using ::heap::base::REMOVE_SLOT;
|
||||
using ::heap::base::SlotCallbackResult;
|
||||
|
||||
// Possibly empty buckets (buckets that do not contain any slots) are discovered
|
||||
// by the scavenger. Buckets might become non-empty when promoting objects later
|
||||
@ -126,256 +127,46 @@ class PossiblyEmptyBuckets {
|
||||
static_assert(std::is_standard_layout<PossiblyEmptyBuckets>::value);
|
||||
static_assert(sizeof(PossiblyEmptyBuckets) == kSystemPointerSize);
|
||||
|
||||
// Data structure for maintaining a set of slots in a standard (non-large)
|
||||
// page.
|
||||
// The data structure assumes that the slots are pointer size aligned and
|
||||
// splits the valid slot offset range into buckets.
|
||||
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
|
||||
class SlotSet {
|
||||
public:
|
||||
enum EmptyBucketMode {
|
||||
FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
|
||||
KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
|
||||
};
|
||||
class SlotSet final : public ::heap::base::BasicSlotSet<kTaggedSize> {
|
||||
using BasicSlotSet = ::heap::base::BasicSlotSet<kTaggedSize>;
|
||||
|
||||
SlotSet() = delete;
|
||||
public:
|
||||
static const int kBucketsRegularPage =
|
||||
(1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;
|
||||
|
||||
static SlotSet* Allocate(size_t buckets) {
|
||||
// SlotSet* slot_set --+
|
||||
// |
|
||||
// v
|
||||
// +-----------------+-------------------------+
|
||||
// | initial buckets | buckets array |
|
||||
// +-----------------+-------------------------+
|
||||
// pointer-sized pointer-sized * buckets
|
||||
//
|
||||
//
|
||||
// The SlotSet pointer points to the beginning of the buckets array for
|
||||
// faster access in the write barrier. The number of buckets is needed for
|
||||
// calculating the size of this data structure.
|
||||
size_t buckets_size = buckets * sizeof(Bucket*);
|
||||
size_t size = kInitialBucketsSize + buckets_size;
|
||||
void* allocation = AlignedAllocWithRetry(size, kSystemPointerSize);
|
||||
SlotSet* slot_set = reinterpret_cast<SlotSet*>(
|
||||
reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
|
||||
DCHECK(
|
||||
IsAligned(reinterpret_cast<uintptr_t>(slot_set), kSystemPointerSize));
|
||||
#ifdef DEBUG
|
||||
*slot_set->initial_buckets() = buckets;
|
||||
#endif
|
||||
for (size_t i = 0; i < buckets; i++) {
|
||||
*slot_set->bucket(i) = nullptr;
|
||||
}
|
||||
return slot_set;
|
||||
return static_cast<SlotSet*>(BasicSlotSet::Allocate(buckets));
|
||||
}
|
||||
|
||||
static void Delete(SlotSet* slot_set, size_t buckets) {
|
||||
if (slot_set == nullptr) return;
|
||||
|
||||
for (size_t i = 0; i < buckets; i++) {
|
||||
slot_set->ReleaseBucket(i);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
size_t initial_buckets = *slot_set->initial_buckets();
|
||||
|
||||
for (size_t i = buckets; i < initial_buckets; i++) {
|
||||
DCHECK_NULL(*slot_set->bucket(i));
|
||||
}
|
||||
#endif
|
||||
|
||||
AlignedFree(reinterpret_cast<uint8_t*>(slot_set) - kInitialBucketsSize);
|
||||
}
|
||||
|
||||
static size_t BucketsForSize(size_t size) {
|
||||
return (size + (kTaggedSize * kBitsPerBucket) - 1) >>
|
||||
(kTaggedSizeLog2 + kBitsPerBucketLog2);
|
||||
}
|
||||
|
||||
// Converts the slot offset into bucket index.
|
||||
static size_t BucketForSlot(size_t slot_offset) {
|
||||
DCHECK(IsAligned(slot_offset, kTaggedSize));
|
||||
return slot_offset >> (kTaggedSizeLog2 + kBitsPerBucketLog2);
|
||||
}
|
||||
|
||||
// The slot offset specifies a slot at address page_start_ + slot_offset.
|
||||
// AccessMode defines whether there can be concurrent access on the buckets
|
||||
// or not.
|
||||
template <AccessMode access_mode>
|
||||
void Insert(size_t slot_offset) {
|
||||
size_t bucket_index;
|
||||
int cell_index, bit_index;
|
||||
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
|
||||
Bucket* bucket = LoadBucket<access_mode>(bucket_index);
|
||||
if (bucket == nullptr) {
|
||||
bucket = new Bucket;
|
||||
if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
|
||||
delete bucket;
|
||||
bucket = LoadBucket<access_mode>(bucket_index);
|
||||
}
|
||||
}
|
||||
// Check that monotonicity is preserved, i.e., once a bucket is set we do
|
||||
// not free it concurrently.
|
||||
DCHECK(bucket != nullptr);
|
||||
DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
|
||||
uint32_t mask = 1u << bit_index;
|
||||
if ((bucket->LoadCell<access_mode>(cell_index) & mask) == 0) {
|
||||
bucket->SetCellBits<access_mode>(cell_index, mask);
|
||||
}
|
||||
}
|
||||
|
||||
// The slot offset specifies a slot at address page_start_ + slot_offset.
|
||||
// Returns true if the set contains the slot.
|
||||
bool Contains(size_t slot_offset) {
|
||||
size_t bucket_index;
|
||||
int cell_index, bit_index;
|
||||
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
|
||||
Bucket* bucket = LoadBucket(bucket_index);
|
||||
if (bucket == nullptr) return false;
|
||||
return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
|
||||
}
|
||||
|
||||
// The slot offset specifies a slot at address page_start_ + slot_offset.
|
||||
void Remove(size_t slot_offset) {
|
||||
size_t bucket_index;
|
||||
int cell_index, bit_index;
|
||||
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
|
||||
Bucket* bucket = LoadBucket(bucket_index);
|
||||
if (bucket != nullptr) {
|
||||
uint32_t cell = bucket->LoadCell(cell_index);
|
||||
uint32_t bit_mask = 1u << bit_index;
|
||||
if (cell & bit_mask) {
|
||||
bucket->ClearCellBits(cell_index, bit_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The slot offsets specify a range of slots at addresses:
|
||||
// [page_start_ + start_offset ... page_start_ + end_offset).
|
||||
void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
|
||||
EmptyBucketMode mode) {
|
||||
CHECK_LE(end_offset, buckets * kBitsPerBucket * kTaggedSize);
|
||||
DCHECK_LE(start_offset, end_offset);
|
||||
size_t start_bucket;
|
||||
int start_cell, start_bit;
|
||||
SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
|
||||
size_t end_bucket;
|
||||
int end_cell, end_bit;
|
||||
SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
|
||||
uint32_t start_mask = (1u << start_bit) - 1;
|
||||
uint32_t end_mask = ~((1u << end_bit) - 1);
|
||||
Bucket* bucket;
|
||||
if (start_bucket == end_bucket && start_cell == end_cell) {
|
||||
bucket = LoadBucket(start_bucket);
|
||||
if (bucket != nullptr) {
|
||||
bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
|
||||
}
|
||||
return;
|
||||
}
|
||||
size_t current_bucket = start_bucket;
|
||||
int current_cell = start_cell;
|
||||
bucket = LoadBucket(current_bucket);
|
||||
if (bucket != nullptr) {
|
||||
bucket->ClearCellBits(current_cell, ~start_mask);
|
||||
}
|
||||
current_cell++;
|
||||
if (current_bucket < end_bucket) {
|
||||
if (bucket != nullptr) {
|
||||
ClearBucket(bucket, current_cell, kCellsPerBucket);
|
||||
}
|
||||
// The rest of the current bucket is cleared.
|
||||
// Move on to the next bucket.
|
||||
current_bucket++;
|
||||
current_cell = 0;
|
||||
}
|
||||
DCHECK(current_bucket == end_bucket ||
|
||||
(current_bucket < end_bucket && current_cell == 0));
|
||||
while (current_bucket < end_bucket) {
|
||||
if (mode == FREE_EMPTY_BUCKETS) {
|
||||
ReleaseBucket(current_bucket);
|
||||
} else {
|
||||
DCHECK(mode == KEEP_EMPTY_BUCKETS);
|
||||
bucket = LoadBucket(current_bucket);
|
||||
if (bucket != nullptr) {
|
||||
ClearBucket(bucket, 0, kCellsPerBucket);
|
||||
}
|
||||
}
|
||||
current_bucket++;
|
||||
}
|
||||
// All buckets between start_bucket and end_bucket are cleared.
|
||||
DCHECK(current_bucket == end_bucket);
|
||||
if (current_bucket == buckets) return;
|
||||
bucket = LoadBucket(current_bucket);
|
||||
DCHECK(current_cell <= end_cell);
|
||||
if (bucket == nullptr) return;
|
||||
while (current_cell < end_cell) {
|
||||
bucket->StoreCell(current_cell, 0);
|
||||
current_cell++;
|
||||
}
|
||||
// All cells between start_cell and end_cell are cleared.
|
||||
DCHECK(current_bucket == end_bucket && current_cell == end_cell);
|
||||
bucket->ClearCellBits(end_cell, ~end_mask);
|
||||
}
|
||||
|
||||
// The slot offset specifies a slot at address page_start_ + slot_offset.
|
||||
bool Lookup(size_t slot_offset) {
|
||||
size_t bucket_index;
|
||||
int cell_index, bit_index;
|
||||
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
|
||||
Bucket* bucket = LoadBucket(bucket_index);
|
||||
if (bucket == nullptr) return false;
|
||||
return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
|
||||
}
|
||||
|
||||
// Iterate over all slots in the set and for each slot invoke the callback.
|
||||
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
|
||||
// Returns the new number of slots.
|
||||
//
|
||||
// Iteration can be performed concurrently with other operations that use
|
||||
// atomic access mode such as insertion and removal. However there is no
|
||||
// guarantee about ordering and linearizability.
|
||||
//
|
||||
// Sample usage:
|
||||
// Iterate([](MaybeObjectSlot slot) {
|
||||
// if (good(slot)) return KEEP_SLOT;
|
||||
// else return REMOVE_SLOT;
|
||||
// });
|
||||
//
|
||||
// Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
|
||||
// Similar to BasicSlotSet::Iterate() but Callback takes the parameter of type
|
||||
// MaybeObjectSlot.
|
||||
template <typename Callback>
|
||||
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
|
||||
Callback callback, EmptyBucketMode mode) {
|
||||
return Iterate(chunk_start, start_bucket, end_bucket, callback,
|
||||
[this, mode](size_t bucket_index) {
|
||||
if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
|
||||
ReleaseBucket(bucket_index);
|
||||
}
|
||||
});
|
||||
return BasicSlotSet::Iterate(
|
||||
chunk_start, start_bucket, end_bucket,
|
||||
[&callback](Address slot) { return callback(MaybeObjectSlot(slot)); },
|
||||
[this, mode](size_t bucket_index) {
|
||||
if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
|
||||
ReleaseBucket(bucket_index);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Similar to Iterate but marks potentially empty buckets internally. Stores
|
||||
// true in empty_bucket_found in case a potentially empty bucket was found.
|
||||
// Assumes that the possibly empty-array was already cleared by
|
||||
// CheckPossiblyEmptyBuckets.
|
||||
// Similar to SlotSet::Iterate() but marks potentially empty buckets
|
||||
// internally. Stores true in empty_bucket_found in case a potentially empty
|
||||
// bucket was found. Assumes that the possibly empty-array was already cleared
|
||||
// by CheckPossiblyEmptyBuckets.
|
||||
template <typename Callback>
|
||||
size_t IterateAndTrackEmptyBuckets(
|
||||
Address chunk_start, size_t start_bucket, size_t end_bucket,
|
||||
Callback callback, PossiblyEmptyBuckets* possibly_empty_buckets) {
|
||||
return Iterate(chunk_start, start_bucket, end_bucket, callback,
|
||||
[possibly_empty_buckets, end_bucket](size_t bucket_index) {
|
||||
possibly_empty_buckets->Insert(bucket_index, end_bucket);
|
||||
});
|
||||
}
|
||||
|
||||
bool FreeEmptyBuckets(size_t buckets) {
|
||||
bool empty = true;
|
||||
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
|
||||
if (!FreeBucketIfEmpty(bucket_index)) {
|
||||
empty = false;
|
||||
}
|
||||
}
|
||||
|
||||
return empty;
|
||||
return BasicSlotSet::Iterate(
|
||||
chunk_start, start_bucket, end_bucket,
|
||||
[&callback](Address slot) { return callback(MaybeObjectSlot(slot)); },
|
||||
[possibly_empty_buckets, end_bucket](size_t bucket_index) {
|
||||
possibly_empty_buckets->Insert(bucket_index, end_bucket);
|
||||
});
|
||||
}
|
||||
|
||||
// Check whether possibly empty buckets are really empty. Empty buckets are
|
||||
@ -406,198 +197,6 @@ class SlotSet {
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
||||
static const int kCellsPerBucket = 32;
|
||||
static const int kCellsPerBucketLog2 = 5;
|
||||
static const int kCellSizeBytesLog2 = 2;
|
||||
static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
|
||||
static const int kBitsPerCell = 32;
|
||||
static const int kBitsPerCellLog2 = 5;
|
||||
static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
|
||||
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
|
||||
static const int kBucketsRegularPage =
|
||||
(1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;
|
||||
|
||||
class Bucket : public Malloced {
|
||||
uint32_t cells_[kCellsPerBucket];
|
||||
|
||||
public:
|
||||
Bucket() {
|
||||
for (int i = 0; i < kCellsPerBucket; i++) {
|
||||
cells_[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t* cells() { return cells_; }
|
||||
uint32_t* cell(int cell_index) { return cells() + cell_index; }
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
uint32_t LoadCell(int cell_index) {
|
||||
DCHECK_LT(cell_index, kCellsPerBucket);
|
||||
if (access_mode == AccessMode::ATOMIC)
|
||||
return base::AsAtomic32::Acquire_Load(cells() + cell_index);
|
||||
return *(cells() + cell_index);
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
void SetCellBits(int cell_index, uint32_t mask) {
|
||||
if (access_mode == AccessMode::ATOMIC) {
|
||||
base::AsAtomic32::SetBits(cell(cell_index), mask, mask);
|
||||
} else {
|
||||
uint32_t* c = cell(cell_index);
|
||||
*c = (*c & ~mask) | mask;
|
||||
}
|
||||
}
|
||||
|
||||
void ClearCellBits(int cell_index, uint32_t mask) {
|
||||
base::AsAtomic32::SetBits(cell(cell_index), 0u, mask);
|
||||
}
|
||||
|
||||
void StoreCell(int cell_index, uint32_t value) {
|
||||
base::AsAtomic32::Release_Store(cell(cell_index), value);
|
||||
}
|
||||
|
||||
bool IsEmpty() {
|
||||
for (int i = 0; i < kCellsPerBucket; i++) {
|
||||
if (cells_[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
template <typename Callback, typename EmptyBucketCallback>
|
||||
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
|
||||
Callback callback, EmptyBucketCallback empty_bucket_callback) {
|
||||
size_t new_count = 0;
|
||||
for (size_t bucket_index = start_bucket; bucket_index < end_bucket;
|
||||
bucket_index++) {
|
||||
Bucket* bucket = LoadBucket(bucket_index);
|
||||
if (bucket != nullptr) {
|
||||
size_t in_bucket_count = 0;
|
||||
size_t cell_offset = bucket_index << kBitsPerBucketLog2;
|
||||
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
|
||||
uint32_t cell = bucket->LoadCell(i);
|
||||
if (cell) {
|
||||
uint32_t old_cell = cell;
|
||||
uint32_t mask = 0;
|
||||
while (cell) {
|
||||
int bit_offset = base::bits::CountTrailingZeros(cell);
|
||||
uint32_t bit_mask = 1u << bit_offset;
|
||||
Address slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
|
||||
if (callback(MaybeObjectSlot(chunk_start + slot)) == KEEP_SLOT) {
|
||||
++in_bucket_count;
|
||||
} else {
|
||||
mask |= bit_mask;
|
||||
}
|
||||
cell ^= bit_mask;
|
||||
}
|
||||
uint32_t new_cell = old_cell & ~mask;
|
||||
if (old_cell != new_cell) {
|
||||
bucket->ClearCellBits(i, mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (in_bucket_count == 0) {
|
||||
empty_bucket_callback(bucket_index);
|
||||
}
|
||||
new_count += in_bucket_count;
|
||||
}
|
||||
}
|
||||
return new_count;
|
||||
}
|
||||
|
||||
bool FreeBucketIfEmpty(size_t bucket_index) {
|
||||
Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
|
||||
if (bucket != nullptr) {
|
||||
if (bucket->IsEmpty()) {
|
||||
ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
|
||||
DCHECK_GE(start_cell, 0);
|
||||
DCHECK_LE(end_cell, kCellsPerBucket);
|
||||
int current_cell = start_cell;
|
||||
while (current_cell < kCellsPerBucket) {
|
||||
bucket->StoreCell(current_cell, 0);
|
||||
current_cell++;
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
void ReleaseBucket(size_t bucket_index) {
|
||||
Bucket* bucket = LoadBucket<access_mode>(bucket_index);
|
||||
StoreBucket<access_mode>(bucket_index, nullptr);
|
||||
delete bucket;
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
Bucket* LoadBucket(Bucket** bucket) {
|
||||
if (access_mode == AccessMode::ATOMIC)
|
||||
return base::AsAtomicPointer::Acquire_Load(bucket);
|
||||
return *bucket;
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
Bucket* LoadBucket(size_t bucket_index) {
|
||||
return LoadBucket(bucket(bucket_index));
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
void StoreBucket(Bucket** bucket, Bucket* value) {
|
||||
if (access_mode == AccessMode::ATOMIC) {
|
||||
base::AsAtomicPointer::Release_Store(bucket, value);
|
||||
} else {
|
||||
*bucket = value;
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
void StoreBucket(size_t bucket_index, Bucket* value) {
|
||||
StoreBucket(bucket(bucket_index), value);
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::ATOMIC>
|
||||
bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
|
||||
Bucket** b = bucket(bucket_index);
|
||||
if (access_mode == AccessMode::ATOMIC) {
|
||||
return base::AsAtomicPointer::Release_CompareAndSwap(b, nullptr, value) ==
|
||||
nullptr;
|
||||
} else {
|
||||
DCHECK_NULL(*b);
|
||||
*b = value;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Converts the slot offset into bucket/cell/bit index.
|
||||
static void SlotToIndices(size_t slot_offset, size_t* bucket_index,
|
||||
int* cell_index, int* bit_index) {
|
||||
DCHECK(IsAligned(slot_offset, kTaggedSize));
|
||||
size_t slot = slot_offset >> kTaggedSizeLog2;
|
||||
*bucket_index = slot >> kBitsPerBucketLog2;
|
||||
*cell_index =
|
||||
static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
|
||||
*bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
|
||||
}
|
||||
|
||||
Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
|
||||
Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
|
||||
|
||||
#ifdef DEBUG
|
||||
size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
|
||||
static const int kInitialBucketsSize = sizeof(size_t);
|
||||
#else
|
||||
static const int kInitialBucketsSize = 0;
|
||||
#endif
|
||||
};
|
||||
|
||||
static_assert(std::is_standard_layout<SlotSet>::value);
|
||||
|
@ -59,6 +59,7 @@ v8_source_set("v8_heap_base_unittests_sources") {
|
||||
|
||||
sources = [
|
||||
"heap/base/active-system-pages-unittest.cc",
|
||||
"heap/base/basic-slot-set-unittest.cc",
|
||||
"heap/base/worklist-unittest.cc",
|
||||
]
|
||||
|
||||
|
198
test/unittests/heap/base/basic-slot-set-unittest.cc
Normal file
198
test/unittests/heap/base/basic-slot-set-unittest.cc
Normal file
@ -0,0 +1,198 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/base/basic-slot-set.h"
|
||||
|
||||
#include <limits>
|
||||
#include <map>
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace heap {
|
||||
namespace base {
|
||||
|
||||
static constexpr size_t kTestGranularity = sizeof(void*);
|
||||
using TestSlotSet = ::heap::base::BasicSlotSet<kTestGranularity>;
|
||||
static constexpr size_t kTestPageSize = 1 << 17;
|
||||
static constexpr size_t kBucketsTestPage =
|
||||
TestSlotSet::BucketsForSize(kTestPageSize);
|
||||
|
||||
TEST(BasicSlotSet, InsertAndLookup1) {
|
||||
TestSlotSet* set = TestSlotSet::Allocate(kBucketsTestPage);
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
set->Insert<TestSlotSet::AccessMode::ATOMIC>(i);
|
||||
}
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
}
|
||||
TestSlotSet::Delete(set, kBucketsTestPage);
|
||||
}
|
||||
|
||||
TEST(BasicSlotSet, InsertAndLookup2) {
|
||||
TestSlotSet* set = TestSlotSet::Allocate(kBucketsTestPage);
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i % 7 == 0) {
|
||||
set->Insert<TestSlotSet::AccessMode::ATOMIC>(i);
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i % 7 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
TestSlotSet::Delete(set, kBucketsTestPage);
|
||||
}
|
||||
|
||||
TEST(BasicSlotSet, Iterate) {
|
||||
TestSlotSet* set = TestSlotSet::Allocate(kBucketsTestPage);
|
||||
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i % 7 == 0) {
|
||||
set->Insert<TestSlotSet::AccessMode::ATOMIC>(i);
|
||||
}
|
||||
}
|
||||
|
||||
set->Iterate(
|
||||
0, 0, kBucketsTestPage,
|
||||
[](uintptr_t slot) {
|
||||
if (slot % 3 == 0) {
|
||||
return KEEP_SLOT;
|
||||
} else {
|
||||
return REMOVE_SLOT;
|
||||
}
|
||||
},
|
||||
TestSlotSet::KEEP_EMPTY_BUCKETS);
|
||||
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i % 21 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
|
||||
TestSlotSet::Delete(set, kBucketsTestPage);
|
||||
}
|
||||
|
||||
TEST(BasicSlotSet, IterateFromHalfway) {
|
||||
TestSlotSet* set = TestSlotSet::Allocate(kBucketsTestPage);
|
||||
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i % 7 == 0) {
|
||||
set->Insert<TestSlotSet::AccessMode::ATOMIC>(i);
|
||||
}
|
||||
}
|
||||
|
||||
set->Iterate(
|
||||
0, kBucketsTestPage / 2, kBucketsTestPage,
|
||||
[](uintptr_t slot) {
|
||||
if (slot % 3 == 0) {
|
||||
return KEEP_SLOT;
|
||||
} else {
|
||||
return REMOVE_SLOT;
|
||||
}
|
||||
},
|
||||
TestSlotSet::KEEP_EMPTY_BUCKETS);
|
||||
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i < kTestPageSize / 2 && i % 7 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else if (i >= kTestPageSize / 2 && i % 21 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
|
||||
TestSlotSet::Delete(set, kBucketsTestPage);
|
||||
}
|
||||
|
||||
TEST(BasicSlotSet, Remove) {
|
||||
TestSlotSet* set = TestSlotSet::Allocate(kBucketsTestPage);
|
||||
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i % 7 == 0) {
|
||||
set->Insert<TestSlotSet::AccessMode::ATOMIC>(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i % 3 != 0) {
|
||||
set->Remove(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
if (i % 21 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
|
||||
TestSlotSet::Delete(set, kBucketsTestPage);
|
||||
}
|
||||
|
||||
namespace {
|
||||
void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
|
||||
TestSlotSet* set = TestSlotSet::Allocate(kBucketsTestPage);
|
||||
uint32_t first = start == 0 ? 0 : start - kTestGranularity;
|
||||
uint32_t last = end == kTestPageSize ? end - kTestGranularity : end;
|
||||
for (const auto mode :
|
||||
{TestSlotSet::FREE_EMPTY_BUCKETS, TestSlotSet::KEEP_EMPTY_BUCKETS}) {
|
||||
for (uint32_t i = first; i <= last; i += kTestGranularity) {
|
||||
set->Insert<TestSlotSet::AccessMode::ATOMIC>(i);
|
||||
}
|
||||
set->RemoveRange(start, end, kBucketsTestPage, mode);
|
||||
if (first != start) {
|
||||
EXPECT_TRUE(set->Lookup(first));
|
||||
}
|
||||
if (last == end) {
|
||||
EXPECT_TRUE(set->Lookup(last));
|
||||
}
|
||||
for (size_t i = start; i < end; i += kTestGranularity) {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
TestSlotSet::Delete(set, kBucketsTestPage);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(BasicSlotSet, RemoveRange) {
|
||||
CheckRemoveRangeOn(0, kTestPageSize);
|
||||
CheckRemoveRangeOn(1 * kTestGranularity, 1023 * kTestGranularity);
|
||||
for (uint32_t start = 0; start <= 32; start++) {
|
||||
CheckRemoveRangeOn(start * kTestGranularity,
|
||||
(start + 1) * kTestGranularity);
|
||||
CheckRemoveRangeOn(start * kTestGranularity,
|
||||
(start + 2) * kTestGranularity);
|
||||
const uint32_t kEnds[] = {32, 64, 100, 128, 1024, 1500, 2048};
|
||||
for (size_t i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
|
||||
for (int k = -3; k <= 3; k++) {
|
||||
uint32_t end = (kEnds[i] + k);
|
||||
if (start < end) {
|
||||
CheckRemoveRangeOn(start * kTestGranularity, end * kTestGranularity);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
TestSlotSet* set = TestSlotSet::Allocate(kBucketsTestPage);
|
||||
for (const auto mode :
|
||||
{TestSlotSet::FREE_EMPTY_BUCKETS, TestSlotSet::KEEP_EMPTY_BUCKETS}) {
|
||||
set->Insert<TestSlotSet::AccessMode::ATOMIC>(kTestPageSize / 2);
|
||||
set->RemoveRange(0, kTestPageSize, kBucketsTestPage, mode);
|
||||
for (uint32_t i = 0; i < kTestPageSize; i += kTestGranularity) {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
TestSlotSet::Delete(set, kBucketsTestPage);
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace heap
|
@ -2,11 +2,12 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/slot-set.h"
|
||||
|
||||
#include <limits>
|
||||
#include <map>
|
||||
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/objects/slots.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
@ -44,128 +45,6 @@ TEST(SlotSet, BucketsForSize) {
|
||||
SlotSet::BucketsForSize(Page::kPageSize * 2));
|
||||
}
|
||||
|
||||
TEST(SlotSet, InsertAndLookup1) {
|
||||
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
set->Insert<AccessMode::ATOMIC>(i);
|
||||
}
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
}
|
||||
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
|
||||
}
|
||||
|
||||
TEST(SlotSet, InsertAndLookup2) {
|
||||
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i % 7 == 0) {
|
||||
set->Insert<AccessMode::ATOMIC>(i);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i % 7 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
|
||||
}
|
||||
|
||||
TEST(SlotSet, Iterate) {
|
||||
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
|
||||
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i % 7 == 0) {
|
||||
set->Insert<AccessMode::ATOMIC>(i);
|
||||
}
|
||||
}
|
||||
|
||||
set->Iterate(
|
||||
kNullAddress, 0, SlotSet::kBucketsRegularPage,
|
||||
[](MaybeObjectSlot slot) {
|
||||
if (slot.address() % 3 == 0) {
|
||||
return KEEP_SLOT;
|
||||
} else {
|
||||
return REMOVE_SLOT;
|
||||
}
|
||||
},
|
||||
SlotSet::KEEP_EMPTY_BUCKETS);
|
||||
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i % 21 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
|
||||
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
|
||||
}
|
||||
|
||||
TEST(SlotSet, IterateFromHalfway) {
|
||||
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
|
||||
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i % 7 == 0) {
|
||||
set->Insert<AccessMode::ATOMIC>(i);
|
||||
}
|
||||
}
|
||||
|
||||
set->Iterate(
|
||||
kNullAddress, SlotSet::kBucketsRegularPage / 2,
|
||||
SlotSet::kBucketsRegularPage,
|
||||
[](MaybeObjectSlot slot) {
|
||||
if (slot.address() % 3 == 0) {
|
||||
return KEEP_SLOT;
|
||||
} else {
|
||||
return REMOVE_SLOT;
|
||||
}
|
||||
},
|
||||
SlotSet::KEEP_EMPTY_BUCKETS);
|
||||
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i < Page::kPageSize / 2 && i % 7 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else if (i >= Page::kPageSize / 2 && i % 21 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
|
||||
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
|
||||
}
|
||||
|
||||
TEST(SlotSet, Remove) {
|
||||
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
|
||||
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i % 7 == 0) {
|
||||
set->Insert<AccessMode::ATOMIC>(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i % 3 != 0) {
|
||||
set->Remove(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
if (i % 21 == 0) {
|
||||
EXPECT_TRUE(set->Lookup(i));
|
||||
} else {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
|
||||
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
|
||||
}
|
||||
|
||||
TEST(PossiblyEmptyBuckets, ContainsAndInsert) {
|
||||
static const int kBuckets = 100;
|
||||
PossiblyEmptyBuckets possibly_empty_buckets;
|
||||
@ -180,57 +59,6 @@ TEST(PossiblyEmptyBuckets, ContainsAndInsert) {
|
||||
EXPECT_TRUE(possibly_empty_buckets.Contains(last + 1));
|
||||
}
|
||||
|
||||
void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
|
||||
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
|
||||
uint32_t first = start == 0 ? 0 : start - kTaggedSize;
|
||||
uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end;
|
||||
for (const auto mode :
|
||||
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
|
||||
for (uint32_t i = first; i <= last; i += kTaggedSize) {
|
||||
set->Insert<AccessMode::ATOMIC>(i);
|
||||
}
|
||||
set->RemoveRange(start, end, SlotSet::kBucketsRegularPage, mode);
|
||||
if (first != start) {
|
||||
EXPECT_TRUE(set->Lookup(first));
|
||||
}
|
||||
if (last == end) {
|
||||
EXPECT_TRUE(set->Lookup(last));
|
||||
}
|
||||
for (uint32_t i = start; i < end; i += kTaggedSize) {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
|
||||
}
|
||||
|
||||
TEST(SlotSet, RemoveRange) {
|
||||
CheckRemoveRangeOn(0, Page::kPageSize);
|
||||
CheckRemoveRangeOn(1 * kTaggedSize, 1023 * kTaggedSize);
|
||||
for (uint32_t start = 0; start <= 32; start++) {
|
||||
CheckRemoveRangeOn(start * kTaggedSize, (start + 1) * kTaggedSize);
|
||||
CheckRemoveRangeOn(start * kTaggedSize, (start + 2) * kTaggedSize);
|
||||
const uint32_t kEnds[] = {32, 64, 100, 128, 1024, 1500, 2048};
|
||||
for (size_t i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
|
||||
for (int k = -3; k <= 3; k++) {
|
||||
uint32_t end = (kEnds[i] + k);
|
||||
if (start < end) {
|
||||
CheckRemoveRangeOn(start * kTaggedSize, end * kTaggedSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
|
||||
for (const auto mode :
|
||||
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
|
||||
set->Insert<AccessMode::ATOMIC>(Page::kPageSize / 2);
|
||||
set->RemoveRange(0, Page::kPageSize, SlotSet::kBucketsRegularPage, mode);
|
||||
for (uint32_t i = 0; i < Page::kPageSize; i += kTaggedSize) {
|
||||
EXPECT_FALSE(set->Lookup(i));
|
||||
}
|
||||
}
|
||||
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
|
||||
}
|
||||
|
||||
TEST(TypedSlotSet, Iterate) {
|
||||
TypedSlotSet set(0);
|
||||
// These two constants must be static as a workaround
|
||||
|
Loading…
Reference in New Issue
Block a user