[sandbox] Move ExternalPointerTable entry logic into new Entry class
This CL introduces a new ExternalPointerTable::Entry class and moves all low-level logic related to entry management into this class. Bug: v8:10391 Change-Id: Ib7eb05da1d277cb665503e98b3f074520e572bad Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3829485 Reviewed-by: Igor Sheludko <ishell@chromium.org> Commit-Queue: Samuel Groß <saelo@chromium.org> Cr-Commit-Position: refs/heads/main@{#82825}
This commit is contained in:
parent
75391be247
commit
d843cda769
@ -422,18 +422,18 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = {
|
||||
(HasMarkBit ? kExternalPointerMarkBit : 0))
|
||||
enum ExternalPointerTag : uint64_t {
|
||||
// Empty tag value. Mostly used as placeholder.
|
||||
kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
|
||||
kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
|
||||
// Tag to use for unsandboxed external pointers, which are still stored as
|
||||
// raw pointers on the heap.
|
||||
kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000),
|
||||
kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000),
|
||||
// External pointer tag that will match any external pointer. Use with care!
|
||||
kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
|
||||
kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
|
||||
// The free entry tag has all type bits set so every type check with a
|
||||
// different type fails. It also doesn't have the mark bit set as free
|
||||
// entries are (by definition) not alive.
|
||||
kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111),
|
||||
kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111),
|
||||
// Evacuation entries are used during external pointer table compaction.
|
||||
kEvacuationEntryTag = MAKE_TAG(1, 0b11100111),
|
||||
kExternalPointerEvacuationEntryTag = MAKE_TAG(1, 0b11100111),
|
||||
|
||||
ALL_EXTERNAL_POINTER_TAGS(EXTERNAL_POINTER_TAG_ENUM)
|
||||
};
|
||||
|
@ -17,33 +17,34 @@ namespace internal {
|
||||
|
||||
Address ExternalPointerTable::Get(ExternalPointerHandle handle,
|
||||
ExternalPointerTag tag) const {
|
||||
uint32_t index = handle_to_index(handle);
|
||||
Address entry = load_atomic(index);
|
||||
DCHECK(!is_free(entry));
|
||||
|
||||
return entry & ~tag;
|
||||
uint32_t index = HandleToIndex(handle);
|
||||
Entry entry = RelaxedLoad(index);
|
||||
DCHECK(entry.IsRegularEntry());
|
||||
return entry.Untag(tag);
|
||||
}
|
||||
|
||||
void ExternalPointerTable::Set(ExternalPointerHandle handle, Address value,
|
||||
ExternalPointerTag tag) {
|
||||
DCHECK_NE(kNullExternalPointerHandle, handle);
|
||||
DCHECK_EQ(0, value & kExternalPointerTagMask);
|
||||
DCHECK(is_marked(tag));
|
||||
DCHECK(tag & kExternalPointerMarkBit);
|
||||
|
||||
uint32_t index = handle_to_index(handle);
|
||||
store_atomic(index, value | tag);
|
||||
uint32_t index = HandleToIndex(handle);
|
||||
Entry entry = Entry::MakeRegularEntry(value, tag);
|
||||
RelaxedStore(index, entry);
|
||||
}
|
||||
|
||||
Address ExternalPointerTable::Exchange(ExternalPointerHandle handle,
|
||||
Address value, ExternalPointerTag tag) {
|
||||
DCHECK_NE(kNullExternalPointerHandle, handle);
|
||||
DCHECK_EQ(0, value & kExternalPointerTagMask);
|
||||
DCHECK(is_marked(tag));
|
||||
DCHECK(tag & kExternalPointerMarkBit);
|
||||
|
||||
uint32_t index = handle_to_index(handle);
|
||||
Address entry = exchange_atomic(index, value | tag);
|
||||
DCHECK(!is_free(entry));
|
||||
return entry & ~tag;
|
||||
uint32_t index = HandleToIndex(handle);
|
||||
Entry new_entry = Entry::MakeRegularEntry(value, tag);
|
||||
Entry old_entry = RelaxedExchange(index, new_entry);
|
||||
DCHECK(old_entry.IsRegularEntry());
|
||||
return old_entry.Untag(tag);
|
||||
}
|
||||
|
||||
ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry(
|
||||
@ -78,17 +79,19 @@ ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry(
|
||||
DCHECK_LT(freelist_head, capacity());
|
||||
index = freelist_head;
|
||||
|
||||
Address entry = load_atomic(index);
|
||||
uint32_t new_freelist_head = extract_next_entry_from_freelist_entry(entry);
|
||||
Entry entry = RelaxedLoad(index);
|
||||
DCHECK(entry.IsFreelistEntry());
|
||||
uint32_t new_freelist_head = entry.ExtractNextFreelistEntry();
|
||||
|
||||
uint32_t old_val = base::Relaxed_CompareAndSwap(
|
||||
&freelist_head_, freelist_head, new_freelist_head);
|
||||
success = old_val == freelist_head;
|
||||
}
|
||||
|
||||
store_atomic(index, initial_value | tag);
|
||||
Entry entry = Entry::MakeRegularEntry(initial_value, tag);
|
||||
RelaxedStore(index, entry);
|
||||
|
||||
return index_to_handle(index);
|
||||
return IndexToHandle(index);
|
||||
}
|
||||
|
||||
ExternalPointerHandle ExternalPointerTable::AllocateEvacuationEntry(
|
||||
@ -111,26 +114,26 @@ ExternalPointerHandle ExternalPointerTable::AllocateEvacuationEntry(
|
||||
|
||||
if (index >= start_of_evacuation_area) return kNullExternalPointerHandle;
|
||||
|
||||
Address entry = load_atomic(index);
|
||||
uint32_t new_freelist_head = extract_next_entry_from_freelist_entry(entry);
|
||||
Entry entry = RelaxedLoad(index);
|
||||
DCHECK(entry.IsFreelistEntry());
|
||||
uint32_t new_freelist_head = entry.ExtractNextFreelistEntry();
|
||||
|
||||
uint32_t old_val = base::Relaxed_CompareAndSwap(
|
||||
&freelist_head_, freelist_head, new_freelist_head);
|
||||
success = old_val == freelist_head;
|
||||
}
|
||||
|
||||
return index_to_handle(index);
|
||||
return IndexToHandle(index);
|
||||
}
|
||||
|
||||
uint32_t ExternalPointerTable::FreelistSize() {
|
||||
Address entry = 0;
|
||||
while (!is_free(entry)) {
|
||||
Entry entry;
|
||||
do {
|
||||
uint32_t freelist_head = base::Relaxed_Load(&freelist_head_);
|
||||
if (!freelist_head) {
|
||||
return 0;
|
||||
}
|
||||
entry = load_atomic(freelist_head);
|
||||
}
|
||||
uint32_t freelist_size = extract_freelist_size_from_freelist_entry(entry);
|
||||
if (!freelist_head) return 0;
|
||||
entry = RelaxedLoad(freelist_head);
|
||||
} while (!entry.IsFreelistEntry());
|
||||
uint32_t freelist_size = entry.ExtractFreelistSize();
|
||||
DCHECK_LE(freelist_size, capacity());
|
||||
return freelist_size;
|
||||
}
|
||||
@ -140,7 +143,7 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
|
||||
static_assert(sizeof(base::Atomic64) == sizeof(Address));
|
||||
DCHECK_EQ(handle, *reinterpret_cast<ExternalPointerHandle*>(handle_location));
|
||||
|
||||
uint32_t index = handle_to_index(handle);
|
||||
uint32_t index = HandleToIndex(handle);
|
||||
|
||||
// Check if the entry should be evacuated for table compaction.
|
||||
// The current value of the start of the evacuation area is cached in a local
|
||||
@ -153,11 +156,11 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
|
||||
ExternalPointerHandle new_handle =
|
||||
AllocateEvacuationEntry(current_start_of_evacuation_area);
|
||||
if (new_handle) {
|
||||
DCHECK_LT(handle_to_index(new_handle), current_start_of_evacuation_area);
|
||||
uint32_t index = handle_to_index(new_handle);
|
||||
DCHECK_LT(HandleToIndex(new_handle), current_start_of_evacuation_area);
|
||||
uint32_t index = HandleToIndex(new_handle);
|
||||
// No need for an atomic store as the entry will only be accessed during
|
||||
// sweeping.
|
||||
store(index, make_evacuation_entry(handle_location));
|
||||
Store(index, Entry::MakeEvacuationEntry(handle_location));
|
||||
#ifdef DEBUG
|
||||
// Mark the handle as visited in debug builds to detect double
|
||||
// initialization of external pointer fields.
|
||||
@ -181,18 +184,19 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
|
||||
// Even if the entry is marked for evacuation, it still needs to be marked as
|
||||
// alive as it may be visited during sweeping before being evacuation.
|
||||
|
||||
base::Atomic64 old_val = load_atomic(index);
|
||||
base::Atomic64 new_val = set_mark_bit(old_val);
|
||||
DCHECK(!is_free(old_val));
|
||||
Entry old_entry = RelaxedLoad(index);
|
||||
DCHECK(old_entry.IsRegularEntry());
|
||||
|
||||
Entry new_entry = old_entry;
|
||||
new_entry.SetMarkBit();
|
||||
|
||||
// We don't need to perform the CAS in a loop: if the new value is not equal
|
||||
// to the old value, then the mutator must've just written a new value into
|
||||
// the entry. This in turn must've set the marking bit already (see
|
||||
// ExternalPointerTable::Set), so we don't need to do it again.
|
||||
base::Atomic64* ptr = reinterpret_cast<base::Atomic64*>(entry_address(index));
|
||||
base::Atomic64 val = base::Relaxed_CompareAndSwap(ptr, old_val, new_val);
|
||||
DCHECK((val == old_val) || is_marked(val));
|
||||
USE(val);
|
||||
Entry entry = RelaxedCompareAndSwap(index, old_entry, new_entry);
|
||||
DCHECK((entry == old_entry) || entry.IsMarked());
|
||||
USE(entry);
|
||||
}
|
||||
|
||||
bool ExternalPointerTable::IsCompacting() {
|
||||
|
@ -66,7 +66,7 @@ void ExternalPointerTable::Init(Isolate* isolate) {
|
||||
// Set up the special null entry. This entry must contain nullptr so that
|
||||
// empty EmbedderDataSlots represent nullptr.
|
||||
static_assert(kNullExternalPointerHandle == 0);
|
||||
store(kNullExternalPointerHandle, kNullAddress);
|
||||
Store(0, Entry::MakeNullEntry());
|
||||
}
|
||||
|
||||
void ExternalPointerTable::TearDown() {
|
||||
@ -145,24 +145,25 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
|
||||
|
||||
// Skip the special null entry. This also guarantees that the first block
|
||||
// will never be decommitted.
|
||||
// The null entry may have been marked as alive (if any live object was
|
||||
// referencing it), which is fine, the entry will just keep the bit set.
|
||||
DCHECK_GE(capacity(), 1);
|
||||
uint32_t table_end = last_in_use_block + kEntriesPerBlock;
|
||||
DCHECK(IsAligned(table_end, kEntriesPerBlock));
|
||||
for (uint32_t i = table_end - 1; i > 0; i--) {
|
||||
// No other threads are active during sweep, so there is no need to use
|
||||
// atomic operations here.
|
||||
Address entry = load(i);
|
||||
if (is_evacuation_entry(entry)) {
|
||||
Entry entry = Load(i);
|
||||
if (entry.IsEvacuationEntry()) {
|
||||
// Resolve the evacuation entry: take the pointer to the handle from the
|
||||
// evacuation entry, copy the entry to its new location, and finally
|
||||
// update the handle to point to the new entry.
|
||||
Address evacuation_entry = load(i);
|
||||
ExternalPointerHandle* handle_location =
|
||||
reinterpret_cast<ExternalPointerHandle*>(
|
||||
extract_handle_location_from_evacuation_entry(evacuation_entry));
|
||||
entry.ExtractHandleLocation());
|
||||
|
||||
ExternalPointerHandle old_handle = *handle_location;
|
||||
ExternalPointerHandle new_handle = index_to_handle(i);
|
||||
ExternalPointerHandle new_handle = IndexToHandle(i);
|
||||
|
||||
// For the compaction algorithm to work optimally, double initialization
|
||||
// of entries is forbidden, see below. This DCHECK can detect double
|
||||
@ -178,11 +179,12 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
|
||||
// external pointer slot is re-initialized, in which case the old_handle
|
||||
// may now also point before the evacuation area. For that reason,
|
||||
// re-initialization of external pointer slots is forbidden.
|
||||
DCHECK_GE(handle_to_index(old_handle), first_block_of_evacuation_area);
|
||||
DCHECK_LT(handle_to_index(new_handle), first_block_of_evacuation_area);
|
||||
DCHECK_GE(HandleToIndex(old_handle), first_block_of_evacuation_area);
|
||||
DCHECK_LT(HandleToIndex(new_handle), first_block_of_evacuation_area);
|
||||
|
||||
Address entry_to_evacuate = load(handle_to_index(old_handle));
|
||||
store(i, clear_mark_bit(entry_to_evacuate));
|
||||
Entry entry_to_evacuate = Load(HandleToIndex(old_handle));
|
||||
entry_to_evacuate.ClearMarkBit();
|
||||
Store(i, entry_to_evacuate);
|
||||
*handle_location = new_handle;
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -191,8 +193,9 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
|
||||
// barriers, so we'd like to avoid them. See the compaction algorithm
|
||||
// explanation in external-pointer-table.h for more details.
|
||||
constexpr Address kClobberedEntryMarker = static_cast<Address>(-1);
|
||||
DCHECK_NE(entry_to_evacuate, kClobberedEntryMarker);
|
||||
store(handle_to_index(old_handle), kClobberedEntryMarker);
|
||||
const Entry kClobberedEntry = Entry::Decode(kClobberedEntryMarker);
|
||||
DCHECK_NE(entry_to_evacuate, kClobberedEntry);
|
||||
Store(HandleToIndex(old_handle), kClobberedEntry);
|
||||
#endif // DEBUG
|
||||
|
||||
// While we know that the old entry is now free, we don't add it to (the
|
||||
@ -201,14 +204,15 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
|
||||
// that the blocks out of which entries are evacuated will all be
|
||||
// decommitted anyway after this loop, which is usually the case unless
|
||||
// compaction was already aborted during marking.
|
||||
} else if (!is_marked(entry)) {
|
||||
} else if (!entry.IsMarked()) {
|
||||
current_freelist_size++;
|
||||
Address entry =
|
||||
make_freelist_entry(current_freelist_head, current_freelist_size);
|
||||
store(i, entry);
|
||||
Entry entry = Entry::MakeFreelistEntry(current_freelist_head,
|
||||
current_freelist_size);
|
||||
Store(i, entry);
|
||||
current_freelist_head = i;
|
||||
} else {
|
||||
store(i, clear_mark_bit(entry));
|
||||
entry.ClearMarkBit();
|
||||
Store(i, entry);
|
||||
}
|
||||
|
||||
if (last_in_use_block == i) {
|
||||
@ -311,9 +315,9 @@ uint32_t ExternalPointerTable::Grow(Isolate* isolate) {
|
||||
uint32_t current_freelist_size = 1;
|
||||
for (uint32_t i = start; i < last; i++) {
|
||||
uint32_t next_entry = i + 1;
|
||||
store(i, make_freelist_entry(next_entry, current_freelist_size++));
|
||||
Store(i, Entry::MakeFreelistEntry(next_entry, current_freelist_size++));
|
||||
}
|
||||
store(last, make_freelist_entry(0, current_freelist_size));
|
||||
Store(last, Entry::MakeFreelistEntry(0, current_freelist_size));
|
||||
|
||||
// This must be a release store to prevent reordering of the preceeding
|
||||
// stores to the freelist from being reordered past this store. See
|
||||
|
@ -113,9 +113,9 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
|
||||
// The freelist entries encode the freelist size and the next entry on the
|
||||
// list, so this routine fetches the first entry on the freelist and returns
|
||||
// the size encoded in it.
|
||||
// As entries may be allocated from background threads while this method
|
||||
// executes, its result should only be treated as an approximation of the
|
||||
// real size.
|
||||
// As table entries can be allocated from other threads, the freelist size
|
||||
// may have changed by the time this method returns. As such, the returned
|
||||
// value should only be treated as an approximation.
|
||||
inline uint32_t FreelistSize();
|
||||
|
||||
// Marks the specified entry as alive.
|
||||
@ -193,6 +193,9 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
|
||||
// Required for Isolate::CheckIsolateLayout().
|
||||
friend class Isolate;
|
||||
|
||||
//
|
||||
// ExternalPointerTable constants.
|
||||
//
|
||||
// An external pointer table grows and shrinks in blocks of this size. This
|
||||
// is also the initial size of the table.
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
@ -232,25 +235,15 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
|
||||
static constexpr uint32_t kVisitedHandleMarker = 0x1;
|
||||
static_assert(kExternalPointerIndexShift >= 1);
|
||||
|
||||
// Outcome of external pointer table compaction to use for the
|
||||
// ExternalPointerTableCompactionOutcome histogram.
|
||||
enum class TableCompactionOutcome {
|
||||
// Table compaction was successful.
|
||||
kSuccess = 0,
|
||||
// Table compaction was partially successful: marking finished successfully,
|
||||
// but not all blocks that we wanted to free could be freed because some new
|
||||
// entries had already been allocated in them again.
|
||||
kPartialSuccess = 1,
|
||||
// Table compaction was aborted during marking because the freelist grew to
|
||||
// short.
|
||||
kAbortedDuringMarking = 2,
|
||||
};
|
||||
|
||||
//
|
||||
// Internal methods.
|
||||
//
|
||||
// Returns true if this external pointer table has been initialized.
|
||||
bool is_initialized() { return buffer_ != kNullAddress; }
|
||||
|
||||
// Table capacity accesors.
|
||||
// The capacity is expressed in number of entries.
|
||||
//
|
||||
// The capacity of the table may increase during entry allocation (if the
|
||||
// table is grown) and may decrease during sweeping (if blocks at the end are
|
||||
// free). As the former may happen concurrently, the capacity can only be
|
||||
@ -297,7 +290,22 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
|
||||
// Stop compacting at the end of sweeping.
|
||||
void StopCompacting();
|
||||
|
||||
inline uint32_t handle_to_index(ExternalPointerHandle handle) const {
|
||||
// Outcome of external pointer table compaction to use for the
|
||||
// ExternalPointerTableCompactionOutcome histogram.
|
||||
enum class TableCompactionOutcome {
|
||||
// Table compaction was successful.
|
||||
kSuccess = 0,
|
||||
// Table compaction was partially successful: marking finished successfully,
|
||||
// but not all blocks that we wanted to free could be freed because some new
|
||||
// entries had already been allocated in them again.
|
||||
kPartialSuccess = 1,
|
||||
// Table compaction was aborted during marking because the freelist grew to
|
||||
// short.
|
||||
kAbortedDuringMarking = 2,
|
||||
};
|
||||
|
||||
// Handle <-> Table index conversion.
|
||||
inline uint32_t HandleToIndex(ExternalPointerHandle handle) const {
|
||||
uint32_t index = handle >> kExternalPointerIndexShift;
|
||||
DCHECK_EQ(handle & ~kVisitedHandleMarker,
|
||||
index << kExternalPointerIndexShift);
|
||||
@ -305,7 +313,7 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
|
||||
return index;
|
||||
}
|
||||
|
||||
inline ExternalPointerHandle index_to_handle(uint32_t index) const {
|
||||
inline ExternalPointerHandle IndexToHandle(uint32_t index) const {
|
||||
ExternalPointerHandle handle = index << kExternalPointerIndexShift;
|
||||
DCHECK_EQ(index, handle >> kExternalPointerIndexShift);
|
||||
return handle;
|
||||
@ -317,6 +325,133 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
|
||||
}
|
||||
#endif // DEBUG
|
||||
|
||||
//
|
||||
// Entries of an ExternalPointerTable.
|
||||
//
|
||||
class Entry {
|
||||
public:
|
||||
// Construct a null entry.
|
||||
Entry() : value_(kNullAddress) {}
|
||||
|
||||
// Returns the payload of this entry. If the provided tag does not match
|
||||
// the tag of the entry, the returned pointer cannot be dereferenced as
|
||||
// some of its top bits will be set.
|
||||
Address Untag(ExternalPointerTag tag) { return value_ & ~tag; }
|
||||
|
||||
// Return the payload of this entry without performing a tag check. The
|
||||
// caller must ensure that the pointer is not used in an unsafe way.
|
||||
Address UncheckedUntag() { return value_ & ~kExternalPointerTagMask; }
|
||||
|
||||
// Returns true if this entry is tagged with the given tag.
|
||||
bool HasTag(ExternalPointerTag tag) const {
|
||||
return (value_ & kExternalPointerTagMask) == tag;
|
||||
}
|
||||
|
||||
// Check, set, and clear the marking bit of this entry.
|
||||
bool IsMarked() const { return (value_ & kExternalPointerMarkBit) != 0; }
|
||||
void SetMarkBit() { value_ |= kExternalPointerMarkBit; }
|
||||
void ClearMarkBit() { value_ &= ~kExternalPointerMarkBit; }
|
||||
|
||||
// Returns true if this entry is part of the freelist, in which case
|
||||
// ExtractNextFreelistEntry and ExtractFreelistSize may be used.
|
||||
bool IsFreelistEntry() const {
|
||||
return HasTag(kExternalPointerFreeEntryTag);
|
||||
}
|
||||
|
||||
// Returns true if this is a evacuation entry, in which case
|
||||
// ExtractHandleLocation may be used.
|
||||
bool IsEvacuationEntry() const {
|
||||
return HasTag(kExternalPointerEvacuationEntryTag);
|
||||
}
|
||||
|
||||
// Returns true if this is neither a freelist- nor an evacuation entry.
|
||||
bool IsRegularEntry() const {
|
||||
return !IsFreelistEntry() && !IsEvacuationEntry();
|
||||
}
|
||||
|
||||
// Extract the index of the next entry on the freelist. Must only be called
|
||||
// if this is a freelist entry. See also MakeFreelistEntry.
|
||||
uint32_t ExtractNextFreelistEntry() const {
|
||||
DCHECK(IsFreelistEntry());
|
||||
return static_cast<uint32_t>(value_) & 0x00ffffff;
|
||||
}
|
||||
|
||||
// Extract the size of the freelist following this entry. Must only be
|
||||
// called if this is a freelist entry. See also MakeFreelistEntry.
|
||||
uint32_t ExtractFreelistSize() const {
|
||||
DCHECK(IsFreelistEntry());
|
||||
return static_cast<uint32_t>(value_ >> 24) & 0x00ffffff;
|
||||
}
|
||||
|
||||
// An evacuation entry contains the address of the Handle to a (regular)
|
||||
// entry that is to be evacuated (into this entry). This method extracts
|
||||
// that address. Must only be called if this is an evacuation entry.
|
||||
Address ExtractHandleLocation() {
|
||||
DCHECK(IsEvacuationEntry());
|
||||
return Untag(kExternalPointerEvacuationEntryTag);
|
||||
}
|
||||
|
||||
// Constructs an entry containing all zeroes.
|
||||
static Entry MakeNullEntry() { return Entry(kNullAddress); }
|
||||
|
||||
// Constructs a regular entry by tagging the pointer with the tag.
|
||||
static Entry MakeRegularEntry(Address value, ExternalPointerTag tag) {
|
||||
DCHECK_NE(tag, kExternalPointerFreeEntryTag);
|
||||
DCHECK_NE(tag, kExternalPointerEvacuationEntryTag);
|
||||
return Entry(value | tag);
|
||||
}
|
||||
|
||||
// Constructs a freelist entry given the current freelist head and size.
|
||||
static Entry MakeFreelistEntry(uint32_t current_freelist_head,
|
||||
uint32_t current_freelist_size) {
|
||||
// The next freelist entry is stored in the lower 24 bits of the entry.
|
||||
// The freelist size is stored in the next 24 bits. If we ever need larger
|
||||
// tables, and therefore larger indices to encode the next free entry, we
|
||||
// can make the freelist size an approximation and drop some of the bottom
|
||||
// bits of the value when encoding it.
|
||||
// We could also keep the freelist size as an additional uint32_t member,
|
||||
// but encoding it in this way saves one atomic compare-exchange on every
|
||||
// entry allocation.
|
||||
static_assert(kMaxExternalPointers <= (1ULL << 24));
|
||||
static_assert(kExternalPointerFreeEntryTag >= (1ULL << 48));
|
||||
DCHECK_LT(current_freelist_head, kMaxExternalPointers);
|
||||
DCHECK_LT(current_freelist_size, kMaxExternalPointers);
|
||||
|
||||
Address value = current_freelist_size;
|
||||
value <<= 24;
|
||||
value |= current_freelist_head;
|
||||
value |= kExternalPointerFreeEntryTag;
|
||||
return Entry(value);
|
||||
}
|
||||
|
||||
// Constructs an evacuation entry containing the given handle location.
|
||||
static Entry MakeEvacuationEntry(Address handle_location) {
|
||||
return Entry(handle_location | kExternalPointerEvacuationEntryTag);
|
||||
}
|
||||
|
||||
// Encodes this entry into a pointer-sized word for storing it in the
|
||||
// external pointer table.
|
||||
Address Encode() const { return value_; }
|
||||
|
||||
// Decodes a previously encoded entry.
|
||||
static Entry Decode(Address value) { return Entry(value); }
|
||||
|
||||
bool operator==(Entry other) const { return value_ == other.value_; }
|
||||
bool operator!=(Entry other) const { return value_ != other.value_; }
|
||||
|
||||
private:
|
||||
explicit Entry(Address value) : value_(value) {}
|
||||
|
||||
// The raw content of an entry. The top bits will contain the tag and
|
||||
// marking bit, the lower bits contain the pointer payload. Refer to the
|
||||
// ExternalPointerTag and kExternalPointerMarkBit definitions to see which
|
||||
// bits are used for what purpose.
|
||||
Address value_;
|
||||
};
|
||||
|
||||
//
|
||||
// Low-level entry accessors.
|
||||
//
|
||||
// Computes the address of the specified entry.
|
||||
inline Address entry_address(uint32_t index) const {
|
||||
return buffer_ + index * sizeof(Address);
|
||||
@ -327,111 +462,69 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
|
||||
// index. This is necessary because LSan is unable to scan the pointers in
|
||||
// the main table due to the pointer tagging scheme (the values don't "look
|
||||
// like" pointers). So instead it can scan the pointers in the shadow table.
|
||||
inline void lsan_record_ptr(uint32_t index, Address value) {
|
||||
// This only works because the payload part of an external pointer is only
|
||||
// modified on one thread (but e.g. the marking bit may be modified from
|
||||
// background threads). Otherwise this would always be racy as the Store
|
||||
// methods below are no longer atomic.
|
||||
inline void RecordEntryForLSan(uint32_t index, Entry entry) {
|
||||
#if defined(LEAK_SANITIZER)
|
||||
base::Memory<Address>(entry_address(index) +
|
||||
kExternalPointerTableReservationSize) =
|
||||
value & ~kExternalPointerTagMask;
|
||||
auto addr = entry_address(index) + kExternalPointerTableReservationSize;
|
||||
base::Memory<Address>(addr) = entry.UncheckedUntag();
|
||||
#endif // LEAK_SANITIZER
|
||||
}
|
||||
|
||||
// Loads the value at the given index. This method is non-atomic, only use it
|
||||
// Loads the entry at the given index. This method is non-atomic, only use it
|
||||
// when no other threads can currently access the table.
|
||||
inline Address load(uint32_t index) const {
|
||||
return base::Memory<Address>(entry_address(index));
|
||||
Entry Load(uint32_t index) const {
|
||||
auto raw_value = base::Memory<Address>(entry_address(index));
|
||||
return Entry::Decode(raw_value);
|
||||
}
|
||||
|
||||
// Stores the provided value at the given index. This method is non-atomic,
|
||||
// Stores the provided entry at the given index. This method is non-atomic,
|
||||
// only use it when no other threads can currently access the table.
|
||||
inline void store(uint32_t index, Address value) {
|
||||
lsan_record_ptr(index, value);
|
||||
base::Memory<Address>(entry_address(index)) = value;
|
||||
void Store(uint32_t index, Entry entry) {
|
||||
RecordEntryForLSan(index, entry);
|
||||
base::Memory<Address>(entry_address(index)) = entry.Encode();
|
||||
}
|
||||
|
||||
// Atomically loads the value at the given index.
|
||||
inline Address load_atomic(uint32_t index) const {
|
||||
// Atomically loads the entry at the given index.
|
||||
Entry RelaxedLoad(uint32_t index) const {
|
||||
auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
|
||||
return base::Relaxed_Load(addr);
|
||||
auto raw_value = base::Relaxed_Load(addr);
|
||||
return Entry::Decode(raw_value);
|
||||
}
|
||||
|
||||
// Atomically stores the provided value at the given index.
|
||||
inline void store_atomic(uint32_t index, Address value) {
|
||||
lsan_record_ptr(index, value);
|
||||
// Atomically stores the provided entry at the given index.
|
||||
void RelaxedStore(uint32_t index, Entry entry) {
|
||||
RecordEntryForLSan(index, entry);
|
||||
auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
|
||||
base::Relaxed_Store(addr, value);
|
||||
base::Relaxed_Store(addr, entry.Encode());
|
||||
}
|
||||
|
||||
// Atomically exchanges the value at the given index with the provided value.
|
||||
inline Address exchange_atomic(uint32_t index, Address value) {
|
||||
lsan_record_ptr(index, value);
|
||||
Entry RelaxedCompareAndSwap(uint32_t index, Entry old_entry,
|
||||
Entry new_entry) {
|
||||
// This is not calling RecordEntryForLSan as that would be racy. This is ok
|
||||
// however because this this method is only used to set the marking bit.
|
||||
auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
|
||||
return static_cast<Address>(base::Relaxed_AtomicExchange(addr, value));
|
||||
auto raw_value = base::Relaxed_CompareAndSwap(addr, old_entry.Encode(),
|
||||
new_entry.Encode());
|
||||
return Entry::Decode(raw_value);
|
||||
}
|
||||
|
||||
static bool is_marked(Address entry) {
|
||||
return (entry & kExternalPointerMarkBit) == kExternalPointerMarkBit;
|
||||
// Atomically exchanges the entry at the given index with the provided entry.
|
||||
Entry RelaxedExchange(uint32_t index, Entry entry) {
|
||||
RecordEntryForLSan(index, entry);
|
||||
auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
|
||||
auto raw_value = base::Relaxed_AtomicExchange(addr, entry.Encode());
|
||||
return Entry::Decode(raw_value);
|
||||
}
|
||||
|
||||
static Address set_mark_bit(Address entry) {
|
||||
return entry | kExternalPointerMarkBit;
|
||||
}
|
||||
|
||||
static Address clear_mark_bit(Address entry) {
|
||||
return entry & ~kExternalPointerMarkBit;
|
||||
}
|
||||
|
||||
static bool is_free(Address entry) {
|
||||
return (entry & kExternalPointerFreeEntryTag) ==
|
||||
kExternalPointerFreeEntryTag;
|
||||
}
|
||||
|
||||
static uint32_t extract_next_entry_from_freelist_entry(Address entry) {
|
||||
// See make_freelist_entry below.
|
||||
return static_cast<uint32_t>(entry) & 0x00ffffff;
|
||||
}
|
||||
|
||||
static uint32_t extract_freelist_size_from_freelist_entry(Address entry) {
|
||||
// See make_freelist_entry below.
|
||||
return static_cast<uint32_t>(entry >> 24) & 0x00ffffff;
|
||||
}
|
||||
|
||||
static Address make_freelist_entry(uint32_t current_freelist_head,
|
||||
uint32_t current_freelist_size) {
|
||||
// The next freelist entry is stored in the lower 24 bits of the entry. The
|
||||
// freelist size is stored in the next 24 bits. If we ever need larger
|
||||
// tables, and therefore larger indices to encode the next free entry, we
|
||||
// can make the freelist size an approximation and drop some of the bottom
|
||||
// bits of the value when encoding it.
|
||||
// We could also keep the freelist size as an additional uint32_t member,
|
||||
// but encoding it in this way saves one atomic compare-exchange on every
|
||||
// entry allocation.
|
||||
static_assert(kMaxExternalPointers <= (1ULL << 24));
|
||||
static_assert(kExternalPointerFreeEntryTag >= (1ULL << 48));
|
||||
DCHECK_LT(current_freelist_head, kMaxExternalPointers);
|
||||
DCHECK_LT(current_freelist_size, kMaxExternalPointers);
|
||||
|
||||
Address entry = current_freelist_size;
|
||||
entry <<= 24;
|
||||
entry |= current_freelist_head;
|
||||
entry |= kExternalPointerFreeEntryTag;
|
||||
return entry;
|
||||
}
|
||||
|
||||
static bool is_evacuation_entry(Address entry) {
|
||||
return (entry & kExternalPointerTagMask) == kEvacuationEntryTag;
|
||||
}
|
||||
|
||||
static Address extract_handle_location_from_evacuation_entry(Address entry) {
|
||||
return entry & ~kEvacuationEntryTag;
|
||||
}
|
||||
|
||||
static Address make_evacuation_entry(Address handle_location) {
|
||||
return handle_location | kEvacuationEntryTag;
|
||||
}
|
||||
|
||||
// The buffer backing this table. This is const after initialization. Should
|
||||
// only be accessed using the load_x() and store_x() methods, which take care
|
||||
// of atomicicy if necessary.
|
||||
//
|
||||
// ExternalPointerTable fields.
|
||||
//
|
||||
// The buffer backing this table. Essentially an array of Entry instances.
|
||||
// This is const after initialization. Should only be accessed using the
|
||||
// Load() and Store() methods, which take care of atomicicy if necessary.
|
||||
Address buffer_ = kNullAddress;
|
||||
|
||||
// The current capacity of this table, which is the number of usable entries.
|
||||
|
Loading…
Reference in New Issue
Block a user