[sandbox] Implement GC for the external pointer table
The external pointer table is now managed by the GC, which marks entries that are alive during major GC, then sweeps the table afterwards to free all dead entries and build a free list from them. For now, only major GCs are supported, Scavenger GCs do not interact with the external pointer table. In more detail, garbage collection of the external pointer table works as follows: 1. The external pointer table now reserves a large region of virtual address space for its backing buffer and is then never reallocated, only grown in place until the maximum size is reached. 2. When the GC's marking visitor marks a HeapObject with an external pointer as alive, it also marks the corresponding external pointer table entry as alive. This can happen on a background thread. 3. For that, it uses the MSB of each entry in the table to indicate whether the entry has been marked or not. This works because the MSB is always cleared during the AND-based type check performed when accessing an external pointer. 4. After marking, the external pointer table is swept while the mutator is stopped. This builds an inline, singly-linked freelist of all newly-dead and previously-free entries. 5. When allocating an entry from the table, the first entry on the freelist is used. If the freelist is empty, the table grows, populating the freelist with the new entries. 6. Every newly-allocated entry is marked as alive, and every store to an existing entry also automatically marks that entry as alive (by also setting the MSB). This simplifies the design of the table GC with regards to concurrency (See ExternalPointerTable::Mark). Bug: v8:10391 Change-Id: I8877fdf5576af3761bde65298951bb09e601bd14 Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3359625 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Maya Lekova <mslekova@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Commit-Queue: Samuel Groß <saelo@chromium.org> Cr-Commit-Position: refs/heads/main@{#78708}
This commit is contained in:
parent
dc97b45058
commit
4a3e41c5ca
@ -1144,7 +1144,6 @@ filegroup(
|
||||
"src/common/high-allocation-throughput-scope.h",
|
||||
"src/common/message-template.h",
|
||||
"src/common/ptr-compr-inl.h",
|
||||
"src/common/ptr-compr.h",
|
||||
"src/compiler-dispatcher/lazy-compile-dispatcher.cc",
|
||||
"src/compiler-dispatcher/lazy-compile-dispatcher.h",
|
||||
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
|
||||
@ -1923,6 +1922,7 @@ filegroup(
|
||||
"src/sandbox/external-pointer-inl.h",
|
||||
"src/sandbox/external-pointer.h",
|
||||
"src/sandbox/external-pointer-table.cc",
|
||||
"src/sandbox/external-pointer-table-inl.h",
|
||||
"src/sandbox/external-pointer-table.h",
|
||||
"src/sandbox/sandbox.cc",
|
||||
"src/sandbox/sandbox.h",
|
||||
|
2
BUILD.gn
2
BUILD.gn
@ -2717,7 +2717,6 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/common/high-allocation-throughput-scope.h",
|
||||
"src/common/message-template.h",
|
||||
"src/common/ptr-compr-inl.h",
|
||||
"src/common/ptr-compr.h",
|
||||
"src/compiler-dispatcher/lazy-compile-dispatcher.h",
|
||||
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
|
||||
"src/compiler/access-builder.h",
|
||||
@ -3337,6 +3336,7 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/runtime/runtime-utils.h",
|
||||
"src/runtime/runtime.h",
|
||||
"src/sandbox/external-pointer-inl.h",
|
||||
"src/sandbox/external-pointer-table-inl.h",
|
||||
"src/sandbox/external-pointer-table.h",
|
||||
"src/sandbox/external-pointer.h",
|
||||
"src/sandbox/sandbox.h",
|
||||
|
@ -29,6 +29,13 @@ class Isolate;
|
||||
typedef uintptr_t Address;
|
||||
static const Address kNullAddress = 0;
|
||||
|
||||
constexpr int KB = 1024;
|
||||
constexpr int MB = KB * 1024;
|
||||
constexpr int GB = MB * 1024;
|
||||
#ifdef V8_TARGET_ARCH_X64
|
||||
constexpr size_t TB = size_t{GB} * 1024;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Configuration of tagging scheme.
|
||||
*/
|
||||
@ -109,6 +116,11 @@ struct SmiTagging<8> {
|
||||
};
|
||||
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
// See v8:7703 or src/common/ptr-compr-inl.h for details about pointer
|
||||
// compression.
|
||||
constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
|
||||
constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
|
||||
|
||||
static_assert(
|
||||
kApiSystemPointerSize == kApiInt64Size,
|
||||
"Pointer compression can be enabled only for 64-bit architectures");
|
||||
@ -121,36 +133,6 @@ constexpr bool PointerCompressionIsEnabled() {
|
||||
return kApiTaggedSize != kApiSystemPointerSize;
|
||||
}
|
||||
|
||||
constexpr bool SandboxedExternalPointersAreEnabled() {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
using ExternalPointer_t = Address;
|
||||
|
||||
// If sandboxed external pointers are enabled, these tag values will be ORed
|
||||
// with the external pointers in the external pointer table to prevent use of
|
||||
// pointers of the wrong type. When a pointer is loaded, it is ANDed with the
|
||||
// inverse of the expected type's tag. The tags are constructed in a way that
|
||||
// guarantees that a failed type check will result in one or more of the top
|
||||
// bits of the pointer to be set, rendering the pointer inacessible. This
|
||||
// construction allows performing the type check and removing GC marking bits
|
||||
// from the pointer at the same time.
|
||||
enum ExternalPointerTag : uint64_t {
|
||||
kExternalPointerNullTag = 0x0000000000000000,
|
||||
kExternalStringResourceTag = 0x00ff000000000000, // 0b000000011111111
|
||||
kExternalStringResourceDataTag = 0x017f000000000000, // 0b000000101111111
|
||||
kForeignForeignAddressTag = 0x01bf000000000000, // 0b000000110111111
|
||||
kNativeContextMicrotaskQueueTag = 0x01df000000000000, // 0b000000111011111
|
||||
kEmbedderDataSlotPayloadTag = 0x01ef000000000000, // 0b000000111101111
|
||||
kCodeEntryPointTag = 0x01f7000000000000, // 0b000000111110111
|
||||
};
|
||||
|
||||
constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
|
||||
|
||||
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
|
||||
using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
|
||||
#else
|
||||
@ -171,6 +153,148 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
|
||||
kSmiTag;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sandbox related types, constants, and functions.
|
||||
*/
|
||||
constexpr bool SandboxIsEnabled() {
|
||||
#ifdef V8_SANDBOX
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
constexpr bool SandboxedExternalPointersAreEnabled() {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// SandboxedPointers are guaranteed to point into the sandbox. This is achieved
|
||||
// for example by storing them as offset rather than as raw pointers.
|
||||
using SandboxedPointer_t = Address;
|
||||
|
||||
// ExternalPointers point to objects located outside the sandbox. When sandboxed
|
||||
// external pointers are enabled, these are stored in an external pointer table
|
||||
// and referenced from HeapObjects through indices.
|
||||
using ExternalPointer_t = Address;
|
||||
|
||||
#ifdef V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
// Size of the sandbox, excluding the guard regions surrounding it.
|
||||
constexpr size_t kSandboxSizeLog2 = 40; // 1 TB
|
||||
constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2;
|
||||
|
||||
// Required alignment of the sandbox. For simplicity, we require the
|
||||
// size of the guard regions to be a multiple of this, so that this specifies
|
||||
// the alignment of the sandbox including and excluding surrounding guard
|
||||
// regions. The alignment requirement is due to the pointer compression cage
|
||||
// being located at the start of the sandbox.
|
||||
constexpr size_t kSandboxAlignment = kPtrComprCageBaseAlignment;
|
||||
|
||||
// Sandboxed pointers are stored inside the heap as offset from the sandbox
|
||||
// base shifted to the left. This way, it is guaranteed that the offset is
|
||||
// smaller than the sandbox size after shifting it to the right again. This
|
||||
// constant specifies the shift amount.
|
||||
constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2;
|
||||
|
||||
// Size of the guard regions surrounding the sandbox. This assumes a worst-case
|
||||
// scenario of a 32-bit unsigned index used to access an array of 64-bit
|
||||
// values.
|
||||
constexpr size_t kSandboxGuardRegionSize = 32ULL * GB;
|
||||
|
||||
static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0,
|
||||
"The size of the guard regions around the sandbox must be a "
|
||||
"multiple of its required alignment.");
|
||||
|
||||
// Minimum size of the sandbox, excluding the guard regions surrounding it. If
|
||||
// the virtual memory reservation for the sandbox fails, its size is currently
|
||||
// halved until either the reservation succeeds or the minimum size is reached.
|
||||
// A minimum of 32GB allows the 4GB pointer compression region as well as the
|
||||
// ArrayBuffer partition and two 10GB WASM memory cages to fit into the
|
||||
// sandbox. 32GB should also be the minimum possible size of the userspace
|
||||
// address space as there are some machine configurations with only 36 virtual
|
||||
// address bits.
|
||||
constexpr size_t kSandboxMinimumSize = 32ULL * GB;
|
||||
|
||||
static_assert(kSandboxMinimumSize <= kSandboxSize,
|
||||
"The minimal size of the sandbox must be smaller or equal to the "
|
||||
"regular size.");
|
||||
|
||||
// On OSes where reserving virtual memory is too expensive to reserve the
|
||||
// entire address space backing the sandbox, notably Windows pre 8.1, we create
|
||||
// a partially reserved sandbox that doesn't actually reserve most of the
|
||||
// memory, and so doesn't have the desired security properties as unrelated
|
||||
// memory allocations could end up inside of it, but which still ensures that
|
||||
// objects that should be located inside the sandbox are allocated within
|
||||
// kSandboxSize bytes from the start of the sandbox. The minimum size of the
|
||||
// region that is actually reserved for such a sandbox is specified by this
|
||||
// constant and should be big enough to contain the pointer compression cage as
|
||||
// well as the ArrayBuffer partition.
|
||||
constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB;
|
||||
|
||||
static_assert(kSandboxMinimumSize > kPtrComprCageReservationSize,
|
||||
"The sandbox must be larger than the pointer compression cage "
|
||||
"contained within it.");
|
||||
static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize,
|
||||
"The minimum reservation size for a sandbox must be larger than "
|
||||
"the pointer compression cage contained within it.");
|
||||
|
||||
// For now, even if the sandbox is enabled, we still allow backing stores to be
|
||||
// allocated outside of it as fallback. This will simplify the initial rollout.
|
||||
// However, if sandboxed pointers are also enabled, we must always place
|
||||
// backing stores inside the sandbox as they will be referenced though them.
|
||||
#ifdef V8_SANDBOXED_POINTERS
|
||||
constexpr bool kAllowBackingStoresOutsideSandbox = false;
|
||||
#else
|
||||
constexpr bool kAllowBackingStoresOutsideSandbox = true;
|
||||
#endif // V8_SANDBOXED_POINTERS
|
||||
|
||||
#endif // V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
// If sandboxed external pointers are enabled, these tag values will be ORed
|
||||
// with the external pointers in the external pointer table to prevent use of
|
||||
// pointers of the wrong type. When a pointer is loaded, it is ANDed with the
|
||||
// inverse of the expected type's tag. The tags are constructed in a way that
|
||||
// guarantees that a failed type check will result in one or more of the top
|
||||
// bits of the pointer to be set, rendering the pointer inacessible. Besides
|
||||
// the type tag bits (48 through 62), the tags also have the GC mark bit (63)
|
||||
// set, so that the mark bit is automatically set when a pointer is written
|
||||
// into the external pointer table (in which case it is clearly alive) and is
|
||||
// cleared when the pointer is loaded. The exception to this is the free entry
|
||||
// tag, which doesn't have the mark bit set, as the entry is not alive. This
|
||||
// construction allows performing the type check and removing GC marking bits
|
||||
// (the MSB) from the pointer at the same time.
|
||||
// Note: this scheme assumes a 48-bit address space and will likely break if
|
||||
// more virtual address bits are used.
|
||||
// clang-format off
|
||||
enum ExternalPointerTag : uint64_t {
|
||||
kExternalPointerNullTag = 0b0000000000000000ULL << 48,
|
||||
kExternalPointerFreeEntryTag = 0b0111111110000000ULL << 48,
|
||||
kExternalStringResourceTag = 0b1000000011111111ULL << 48,
|
||||
kExternalStringResourceDataTag = 0b1000000101111111ULL << 48,
|
||||
kForeignForeignAddressTag = 0b1000000110111111ULL << 48,
|
||||
kNativeContextMicrotaskQueueTag = 0b1000000111011111ULL << 48,
|
||||
kEmbedderDataSlotPayloadTag = 0b1000000111101111ULL << 48,
|
||||
kCodeEntryPointTag = 0b1000000111110111ULL << 48,
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
|
||||
|
||||
// The size of the virtual memory reservation for an external pointer table.
|
||||
// This determines the maximum number of entries in a table. Using a maximum
|
||||
// size allows omitting bounds checks on table accesses if the indices are
|
||||
// guaranteed (e.g. through shifting) to be below the maximum index. This
|
||||
// value must be a power of two.
|
||||
static const size_t kExternalPointerTableReservationSize = 128 * MB;
|
||||
|
||||
// The maximum number of entries in an external pointer table.
|
||||
static const size_t kMaxSandboxedExternalPointers =
|
||||
kExternalPointerTableReservationSize / kApiSystemPointerSize;
|
||||
|
||||
// Converts encoded external pointer to address.
|
||||
V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate,
|
||||
ExternalPointer_t pointer,
|
||||
@ -250,10 +374,10 @@ class Internals {
|
||||
kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
|
||||
|
||||
static const int kExternalPointerTableBufferOffset = 0;
|
||||
static const int kExternalPointerTableLengthOffset =
|
||||
kExternalPointerTableBufferOffset + kApiSystemPointerSize;
|
||||
static const int kExternalPointerTableCapacityOffset =
|
||||
kExternalPointerTableLengthOffset + kApiInt32Size;
|
||||
kExternalPointerTableBufferOffset + kApiSystemPointerSize;
|
||||
static const int kExternalPointerTableFreelistHeadOffset =
|
||||
kExternalPointerTableCapacityOffset + kApiInt32Size;
|
||||
|
||||
static const int kUndefinedValueRootIndex = 4;
|
||||
static const int kTheHoleValueRootIndex = 5;
|
||||
@ -467,10 +591,6 @@ class Internals {
|
||||
}
|
||||
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
// See v8:7703 or src/ptr-compr.* for details about pointer compression.
|
||||
static constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
|
||||
static constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
|
||||
|
||||
V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress(
|
||||
internal::Address addr) {
|
||||
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
|
||||
@ -486,98 +606,6 @@ class Internals {
|
||||
#endif // V8_COMPRESS_POINTERS
|
||||
};
|
||||
|
||||
constexpr bool SandboxIsEnabled() {
|
||||
#ifdef V8_SANDBOX
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// SandboxedPointers are guaranteed to point into the sandbox. This is achieved
|
||||
// for example by storing them as offset rather than as raw pointers.
|
||||
using SandboxedPointer_t = Address;
|
||||
|
||||
#ifdef V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
#define GB (1ULL << 30)
|
||||
#define TB (1ULL << 40)
|
||||
|
||||
// Size of the sandbox, excluding the guard regions surrounding it.
|
||||
constexpr size_t kSandboxSizeLog2 = 40; // 1 TB
|
||||
constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2;
|
||||
|
||||
// Required alignment of the sandbox. For simplicity, we require the
|
||||
// size of the guard regions to be a multiple of this, so that this specifies
|
||||
// the alignment of the sandbox including and excluding surrounding guard
|
||||
// regions. The alignment requirement is due to the pointer compression cage
|
||||
// being located at the start of the sandbox.
|
||||
constexpr size_t kSandboxAlignment = Internals::kPtrComprCageBaseAlignment;
|
||||
|
||||
// Sandboxed pointers are stored inside the heap as offset from the sandbox
|
||||
// base shifted to the left. This way, it is guaranteed that the offset is
|
||||
// smaller than the sandbox size after shifting it to the right again. This
|
||||
// constant specifies the shift amount.
|
||||
constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2;
|
||||
|
||||
// Size of the guard regions surrounding the sandbox. This assumes a worst-case
|
||||
// scenario of a 32-bit unsigned index used to access an array of 64-bit
|
||||
// values.
|
||||
constexpr size_t kSandboxGuardRegionSize = 32ULL * GB;
|
||||
|
||||
static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0,
|
||||
"The size of the guard regions around the sandbox must be a "
|
||||
"multiple of its required alignment.");
|
||||
|
||||
// Minimum size of the sandbox, excluding the guard regions surrounding it. If
|
||||
// the virtual memory reservation for the sandbox fails, its size is currently
|
||||
// halved until either the reservation succeeds or the minimum size is reached.
|
||||
// A minimum of 32GB allows the 4GB pointer compression region as well as the
|
||||
// ArrayBuffer partition and two 10GB WASM memory cages to fit into the
|
||||
// sandbox. 32GB should also be the minimum possible size of the userspace
|
||||
// address space as there are some machine configurations with only 36 virtual
|
||||
// address bits.
|
||||
constexpr size_t kSandboxMinimumSize = 32ULL * GB;
|
||||
|
||||
static_assert(kSandboxMinimumSize <= kSandboxSize,
|
||||
"The minimal size of the sandbox must be smaller or equal to the "
|
||||
"regular size.");
|
||||
|
||||
// On OSes where reserving virtual memory is too expensive to reserve the
|
||||
// entire address space backing the sandbox, notably Windows pre 8.1, we create
|
||||
// a partially reserved sandbox that doesn't actually reserve most of the
|
||||
// memory, and so doesn't have the desired security properties as unrelated
|
||||
// memory allocations could end up inside of it, but which still ensures that
|
||||
// objects that should be located inside the sandbox are allocated within
|
||||
// kSandboxSize bytes from the start of the sandbox. The minimum size of the
|
||||
// region that is actually reserved for such a sandbox is specified by this
|
||||
// constant and should be big enough to contain the pointer compression cage as
|
||||
// well as the ArrayBuffer partition.
|
||||
constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB;
|
||||
|
||||
static_assert(kSandboxMinimumSize > Internals::kPtrComprCageReservationSize,
|
||||
"The sandbox must be larger than the pointer compression cage "
|
||||
"contained within it.");
|
||||
static_assert(kSandboxMinimumReservationSize >
|
||||
Internals::kPtrComprCageReservationSize,
|
||||
"The minimum reservation size for a sandbox must be larger than "
|
||||
"the pointer compression cage contained within it.");
|
||||
|
||||
// For now, even if the sandbox is enabled, we still allow backing stores to be
|
||||
// allocated outside of it as fallback. This will simplify the initial rollout.
|
||||
// However, if sandboxed pointers are also enabled, we must always place
|
||||
// backing stores inside the sandbox as they will be referenced though them.
|
||||
#ifdef V8_SANDBOXED_POINTERS
|
||||
constexpr bool kAllowBackingStoresOutsideSandbox = false;
|
||||
#else
|
||||
constexpr bool kAllowBackingStoresOutsideSandbox = true;
|
||||
#endif // V8_SANDBOXED_POINTERS
|
||||
|
||||
#undef GB
|
||||
#undef TB
|
||||
|
||||
#endif // V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
// Only perform cast check for types derived from v8::Data since
|
||||
// other types do not implement the Cast method.
|
||||
template <bool PerformCheck>
|
||||
|
@ -585,6 +585,10 @@ V8 shared library set USING_V8_SHARED.
|
||||
#define V8_SANDBOX_IS_AVAILABLE
|
||||
#endif
|
||||
|
||||
#if defined(V8_SANDBOX) && !defined(V8_SANDBOX_IS_AVAILABLE)
|
||||
#error Inconsistent configuration: sandbox is enabled but not available
|
||||
#endif
|
||||
|
||||
// From C++17 onwards, static constexpr member variables are defined to be
|
||||
// "inline", and adding a separate definition for them can trigger deprecation
|
||||
// warnings. For C++14 and below, however, these definitions are required.
|
||||
|
@ -1599,39 +1599,65 @@ TNode<Uint32T> CodeStubAssembler::ChangeExternalPointerToUint32(
|
||||
void CodeStubAssembler::InitializeExternalPointerField(TNode<HeapObject> object,
|
||||
TNode<IntPtrT> offset) {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
TVARIABLE(Uint32T, index);
|
||||
|
||||
TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
|
||||
ExternalReference::external_pointer_table_address(isolate()));
|
||||
TNode<Uint32T> table_length = UncheckedCast<Uint32T>(
|
||||
Load(MachineType::Uint32(), external_pointer_table_address,
|
||||
UintPtrConstant(Internals::kExternalPointerTableLengthOffset)));
|
||||
TNode<Uint32T> table_capacity = UncheckedCast<Uint32T>(
|
||||
Load(MachineType::Uint32(), external_pointer_table_address,
|
||||
UintPtrConstant(Internals::kExternalPointerTableCapacityOffset)));
|
||||
TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
|
||||
Load(MachineType::Pointer(), external_pointer_table_address,
|
||||
UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
|
||||
// Note: if external pointer table entries are ever allocated from a
|
||||
// background thread, this logic must become atomic, for example by doing an
|
||||
// atomic load of the currentl freelist head, then writing back the new
|
||||
// freelist head in a CAS loop.
|
||||
TNode<Uint32T> freelist_head = UncheckedCast<Uint32T>(Load(
|
||||
MachineType::Uint32(), external_pointer_table_address,
|
||||
UintPtrConstant(Internals::kExternalPointerTableFreelistHeadOffset)));
|
||||
|
||||
Label grow_table(this, Label::kDeferred), finish(this);
|
||||
Label take_from_freelist(this), call_runtime(this, Label::kDeferred),
|
||||
done(this);
|
||||
TNode<BoolT> compare = Word32Equal(freelist_head, Uint32Constant(0));
|
||||
Branch(compare, &call_runtime, &take_from_freelist);
|
||||
|
||||
TNode<BoolT> compare = Uint32LessThan(table_length, table_capacity);
|
||||
Branch(compare, &finish, &grow_table);
|
||||
|
||||
BIND(&grow_table);
|
||||
BIND(&take_from_freelist);
|
||||
{
|
||||
TNode<ExternalReference> table_grow_function = ExternalConstant(
|
||||
ExternalReference::external_pointer_table_grow_table_function());
|
||||
CallCFunction(
|
||||
table_grow_function, MachineType::Pointer(),
|
||||
std::make_pair(MachineType::Pointer(), external_pointer_table_address));
|
||||
Goto(&finish);
|
||||
index = freelist_head;
|
||||
|
||||
// The next freelist entry is stored in the lower 32 bits of the entry.
|
||||
TNode<IntPtrT> entry_offset = ElementOffsetFromIndex(
|
||||
ChangeUint32ToWord(index.value()), SYSTEM_POINTER_ELEMENTS, 0);
|
||||
TNode<UintPtrT> entry = UncheckedCast<UintPtrT>(
|
||||
Load(MachineType::Pointer(), table, entry_offset));
|
||||
TNode<Uint32T> next_freelist_elem = Unsigned(TruncateWordToInt32(entry));
|
||||
StoreNoWriteBarrier(
|
||||
MachineRepresentation::kWord32, external_pointer_table_address,
|
||||
UintPtrConstant(Internals::kExternalPointerTableFreelistHeadOffset),
|
||||
next_freelist_elem);
|
||||
|
||||
Goto(&done);
|
||||
}
|
||||
BIND(&finish);
|
||||
|
||||
TNode<Uint32T> new_table_length = Uint32Add(table_length, Uint32Constant(1));
|
||||
StoreNoWriteBarrier(
|
||||
MachineRepresentation::kWord32, external_pointer_table_address,
|
||||
UintPtrConstant(Internals::kExternalPointerTableLengthOffset),
|
||||
new_table_length);
|
||||
BIND(&call_runtime);
|
||||
{
|
||||
TNode<ExternalReference> table_allocate_function = ExternalConstant(
|
||||
ExternalReference::external_pointer_table_allocate_entry());
|
||||
index = UncheckedCast<Uint32T>(
|
||||
CallCFunction(table_allocate_function, MachineType::Uint32(),
|
||||
std::make_pair(MachineType::Pointer(),
|
||||
external_pointer_table_address)));
|
||||
|
||||
TNode<Uint32T> index = table_length;
|
||||
TNode<ExternalPointerT> encoded = ChangeUint32ToExternalPointer(index);
|
||||
Goto(&done);
|
||||
}
|
||||
BIND(&done);
|
||||
|
||||
// Currently, we assume that the caller will immediately initialize the entry
|
||||
// through StoreExternalPointerToObject after allocating it. That way, we
|
||||
// avoid initializing the entry twice (once with nullptr, then again with the
|
||||
// real value). TODO(saelo) initialize the entry with zero here and switch
|
||||
// callers to a version that initializes the entry with a given pointer.
|
||||
|
||||
TNode<ExternalPointerT> encoded =
|
||||
ChangeUint32ToExternalPointer(index.value());
|
||||
StoreObjectFieldNoWriteBarrier<ExternalPointerT>(object, offset, encoded);
|
||||
#endif
|
||||
}
|
||||
|
@ -1376,8 +1376,8 @@ FUNCTION_REFERENCE(
|
||||
JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap)
|
||||
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
FUNCTION_REFERENCE(external_pointer_table_grow_table_function,
|
||||
ExternalPointerTable::GrowTable)
|
||||
FUNCTION_REFERENCE(external_pointer_table_allocate_entry,
|
||||
ExternalPointerTable::AllocateEntry)
|
||||
#endif
|
||||
|
||||
bool operator==(ExternalReference lhs, ExternalReference rhs) {
|
||||
|
@ -322,8 +322,8 @@ class StatsCounter;
|
||||
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
#define EXTERNAL_REFERENCE_LIST_SANDBOXED_EXTERNAL_POINTERS(V) \
|
||||
V(external_pointer_table_grow_table_function, \
|
||||
"ExternalPointerTable::GrowTable")
|
||||
V(external_pointer_table_allocate_entry, \
|
||||
"ExternalPointerTable::AllocateEntry")
|
||||
#else
|
||||
#define EXTERNAL_REFERENCE_LIST_SANDBOXED_EXTERNAL_POINTERS(V)
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
|
@ -30,11 +30,6 @@ class RecursiveMutex;
|
||||
|
||||
namespace internal {
|
||||
|
||||
constexpr int KB = 1024;
|
||||
constexpr int MB = KB * 1024;
|
||||
constexpr int GB = MB * 1024;
|
||||
constexpr int64_t TB = static_cast<int64_t>(GB) * 1024;
|
||||
|
||||
// Determine whether we are running in a simulated environment.
|
||||
// Setting USE_SIMULATOR explicitly from the build script will force
|
||||
// the use of a simulated environment.
|
||||
|
@ -6,7 +6,6 @@
|
||||
#define V8_COMMON_PTR_COMPR_INL_H_
|
||||
|
||||
#include "include/v8-internal.h"
|
||||
#include "src/common/ptr-compr.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/execution/local-isolate-inl.h"
|
||||
|
||||
@ -68,11 +67,6 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
|
||||
return DecompressTaggedPointer(on_heap_addr, raw_value);
|
||||
}
|
||||
|
||||
STATIC_ASSERT(kPtrComprCageReservationSize ==
|
||||
Internals::kPtrComprCageReservationSize);
|
||||
STATIC_ASSERT(kPtrComprCageBaseAlignment ==
|
||||
Internals::kPtrComprCageBaseAlignment);
|
||||
|
||||
#else
|
||||
|
||||
V8_INLINE Tagged_t CompressTagged(Address tagged) { UNREACHABLE(); }
|
||||
|
@ -1,24 +0,0 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMMON_PTR_COMPR_H_
|
||||
#define V8_COMMON_PTR_COMPR_H_
|
||||
|
||||
#include "src/common/globals.h"
|
||||
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// See v8:7703 for details about how pointer compression works.
|
||||
constexpr size_t kPtrComprCageReservationSize = size_t{4} * GB;
|
||||
constexpr size_t kPtrComprCageBaseAlignment = size_t{4} * GB;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPRESS_POINTERS
|
||||
|
||||
#endif // V8_COMMON_PTR_COMPR_H_
|
@ -430,7 +430,7 @@ Node* MemoryLowering::DecodeExternalPointer(
|
||||
Node* table = __ Load(MachineType::Pointer(), external_pointer_table_address,
|
||||
Internals::kExternalPointerTableBufferOffset);
|
||||
// TODO(v8:10391, saelo): bounds check if table is not caged
|
||||
Node* offset = __ Int32Mul(index, __ Int32Constant(8));
|
||||
Node* offset = __ Int32Mul(index, __ Int32Constant(sizeof(Address)));
|
||||
Node* decoded_ptr =
|
||||
__ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
|
||||
if (external_pointer_tag != 0) {
|
||||
|
@ -52,14 +52,14 @@ class Isolate;
|
||||
/* Linear allocation areas for the heap's new and old space */ \
|
||||
V(kNewAllocationInfo, LinearAllocationArea::kSize, new_allocation_info) \
|
||||
V(kOldAllocationInfo, LinearAllocationArea::kSize, old_allocation_info) \
|
||||
ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
|
||||
ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V) \
|
||||
V(kStackIsIterableOffset, kUInt8Size, stack_is_iterable)
|
||||
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
#define ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
|
||||
V(kExternalPointerTableOffset, kSystemPointerSize * 3, external_pointer_table)
|
||||
#define ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V) \
|
||||
V(kExternalPointerTableOffset, kSystemPointerSize * 2, external_pointer_table)
|
||||
#else
|
||||
#define ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V)
|
||||
#define ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V)
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
|
||||
// This class contains a collection of data accessible from both C++ runtime
|
||||
@ -250,7 +250,7 @@ void IsolateData::AssertPredictableLayout() {
|
||||
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
|
||||
}
|
||||
|
||||
#undef ISOLATE_DATA_FIELDS_HEAP_SANDBOX
|
||||
#undef ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS
|
||||
#undef ISOLATE_DATA_FIELDS
|
||||
|
||||
} // namespace internal
|
||||
|
@ -3274,10 +3274,10 @@ void Isolate::CheckIsolateLayout() {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, buffer_)),
|
||||
Internals::kExternalPointerTableBufferOffset);
|
||||
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, length_)),
|
||||
Internals::kExternalPointerTableLengthOffset);
|
||||
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, capacity_)),
|
||||
Internals::kExternalPointerTableCapacityOffset);
|
||||
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, freelist_head_)),
|
||||
Internals::kExternalPointerTableFreelistHeadOffset);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -3434,6 +3434,10 @@ void Isolate::Deinit() {
|
||||
|
||||
ClearSerializerData();
|
||||
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
external_pointer_table().TearDown();
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
|
||||
{
|
||||
base::MutexGuard lock_guard(&thread_data_table_mutex_);
|
||||
thread_data_table_.RemoveAllThreads();
|
||||
@ -3953,6 +3957,10 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
|
||||
|
||||
isolate_data_.external_reference_table()->Init(this);
|
||||
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
external_pointer_table().Init(this);
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
wasm::GetWasmEngine()->AddIsolate(this);
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
|
@ -2434,6 +2434,14 @@ void MarkCompactCollector::ClearNonLiveReferences() {
|
||||
|
||||
MarkDependentCodeForDeoptimization();
|
||||
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
{
|
||||
TRACE_GC(heap()->tracer(),
|
||||
GCTracer::Scope::MC_SWEEP_EXTERNAL_POINTER_TABLE);
|
||||
isolate()->external_pointer_table().Sweep(isolate());
|
||||
}
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
|
||||
DCHECK(weak_objects_.transition_arrays.IsEmpty());
|
||||
DCHECK(weak_objects_.weak_references.IsEmpty());
|
||||
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
|
||||
|
@ -625,6 +625,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
// Free unmarked ArrayBufferExtensions.
|
||||
void SweepArrayBufferExtensions();
|
||||
|
||||
// Free unmarked entries in the ExternalPointerTable.
|
||||
void SweepExternalPointerTable();
|
||||
|
||||
void MarkLiveObjects() override;
|
||||
|
||||
// Marks the object grey and adds it to the marking work list.
|
||||
|
@ -140,7 +140,13 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
|
||||
code_flush_mode_(code_flush_mode),
|
||||
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
|
||||
should_keep_ages_unchanged_(should_keep_ages_unchanged),
|
||||
is_shared_heap_(heap->IsShared()) {}
|
||||
is_shared_heap_(heap->IsShared())
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
,
|
||||
external_pointer_table_(&heap->isolate()->external_pointer_table())
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
{
|
||||
}
|
||||
|
||||
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
|
||||
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
|
||||
@ -190,6 +196,26 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
|
||||
// reconstructed after GC.
|
||||
}
|
||||
|
||||
V8_INLINE void VisitExternalPointer(HeapObject host,
|
||||
ExternalPointer_t ptr) final {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
external_pointer_table_->Mark(static_cast<uint32_t>(ptr));
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
}
|
||||
|
||||
V8_INLINE void VisitEmbedderDataSlot(HeapObject host,
|
||||
EmbedderDataSlot slot) final {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
// When sandboxed external pointers are enabled, EmbedderDataSlots may
|
||||
// contain an external pointer, which must be marked as alive.
|
||||
uint32_t maybe_index = base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(
|
||||
slot.address() + EmbedderDataSlot::kRawPayloadOffset));
|
||||
if (external_pointer_table_->IsValidIndex(maybe_index)) {
|
||||
external_pointer_table_->Mark(maybe_index);
|
||||
}
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
}
|
||||
|
||||
protected:
|
||||
ConcreteVisitor* concrete_visitor() {
|
||||
return static_cast<ConcreteVisitor*>(this);
|
||||
@ -240,6 +266,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
|
||||
const bool is_embedder_tracing_enabled_;
|
||||
const bool should_keep_ages_unchanged_;
|
||||
const bool is_shared_heap_;
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
ExternalPointerTable* const external_pointer_table_;
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -133,6 +133,19 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
|
||||
if (visitor->ShouldVisitMapPointer()) {
|
||||
visitor->VisitMapPointer(object);
|
||||
}
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
// The following types have external pointers, which must be visited.
|
||||
// TODO(v8:10391) Consider adding custom visitor IDs for these.
|
||||
if (object.IsExternalOneByteString()) {
|
||||
ExternalOneByteString::BodyDescriptor::IterateBody(map, object, size,
|
||||
visitor);
|
||||
} else if (object.IsExternalTwoByteString()) {
|
||||
ExternalTwoByteString::BodyDescriptor::IterateBody(map, object, size,
|
||||
visitor);
|
||||
} else if (object.IsForeign()) {
|
||||
Foreign::BodyDescriptor::IterateBody(map, object, size, visitor);
|
||||
}
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
return static_cast<ResultType>(size);
|
||||
}
|
||||
|
||||
|
@ -6173,14 +6173,6 @@ Genesis::Genesis(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(v8:10391): The reason is that the NativeContext::microtask_queue
|
||||
// serialization is not actually supported, and therefore the field is
|
||||
// serialized as raw data instead of being serialized as ExternalReference.
|
||||
// As a result, when sandboxed external pointers are enabled, the external
|
||||
// pointer entry is not allocated for microtask queue field during
|
||||
// deserialization, so we allocate it manually here.
|
||||
native_context()->AllocateExternalPointerEntries(isolate);
|
||||
|
||||
native_context()->set_microtask_queue(
|
||||
isolate, microtask_queue ? static_cast<MicrotaskQueue*>(microtask_queue)
|
||||
: isolate->default_microtask_queue());
|
||||
|
@ -521,6 +521,7 @@
|
||||
F(MC_CLEAR_WEAK_COLLECTIONS) \
|
||||
F(MC_CLEAR_WEAK_LISTS) \
|
||||
F(MC_CLEAR_WEAK_REFERENCES) \
|
||||
F(MC_SWEEP_EXTERNAL_POINTER_TABLE) \
|
||||
F(MC_COMPLETE_SWEEP_ARRAY_BUFFERS) \
|
||||
F(MC_COMPLETE_SWEEPING) \
|
||||
F(MC_EVACUATE_CANDIDATES) \
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include "src/init/isolate-allocator.h"
|
||||
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/common/ptr-compr.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/code-range.h"
|
||||
#include "src/sandbox/sandbox.h"
|
||||
|
@ -110,7 +110,11 @@ namespace internal {
|
||||
HR(caged_memory_allocation_outcome, V8.CagedMemoryAllocationOutcome, 0, 2, \
|
||||
3) \
|
||||
/* number of times a cache event is triggered for a wasm module */ \
|
||||
HR(wasm_cache_count, V8.WasmCacheCount, 0, 100, 101)
|
||||
HR(wasm_cache_count, V8.WasmCacheCount, 0, 100, 101) \
|
||||
/* Number of in-use external pointers in the external pointer table */ \
|
||||
/* Counted after sweeping the table at the end of mark-compact GC */ \
|
||||
HR(sandboxed_external_pointers_count, V8.SandboxedExternalPointersCount, 0, \
|
||||
kMaxSandboxedExternalPointers, 101)
|
||||
|
||||
#define NESTED_TIMED_HISTOGRAM_LIST(HT) \
|
||||
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
|
||||
|
@ -935,7 +935,7 @@ void CodeDataContainer::set_code_cage_base(Address code_cage_base) {
|
||||
|
||||
void CodeDataContainer::AllocateExternalPointerEntries(Isolate* isolate) {
|
||||
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
|
||||
InitExternalPointerField(kCodeEntryPointOffset, isolate);
|
||||
InitExternalPointerField(kCodeEntryPointOffset, isolate, kCodeEntryPointTag);
|
||||
}
|
||||
|
||||
Code CodeDataContainer::code() const {
|
||||
|
@ -277,7 +277,8 @@ DEF_GETTER(NativeContext, microtask_queue, MicrotaskQueue*) {
|
||||
}
|
||||
|
||||
void NativeContext::AllocateExternalPointerEntries(Isolate* isolate) {
|
||||
InitExternalPointerField(kMicrotaskQueueOffset, isolate);
|
||||
InitExternalPointerField(kMicrotaskQueueOffset, isolate,
|
||||
kNativeContextMicrotaskQueueTag);
|
||||
}
|
||||
|
||||
void NativeContext::set_microtask_queue(Isolate* isolate,
|
||||
|
@ -34,7 +34,7 @@ void EmbedderDataSlot::AllocateExternalPointerEntry(Isolate* isolate) {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
// TODO(v8:10391, saelo): Use InitExternalPointerField() once
|
||||
// ExternalPointer_t is 4-bytes.
|
||||
uint32_t index = isolate->external_pointer_table().allocate();
|
||||
uint32_t index = isolate->external_pointer_table().Allocate();
|
||||
// Object slots don't support storing raw values, so we just "reinterpret
|
||||
// cast" the index value to Object.
|
||||
Object index_as_object(index);
|
||||
@ -77,8 +77,7 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
|
||||
.Relaxed_Store(value);
|
||||
WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value);
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
// See gc_safe_store() for the reasons behind two stores and why the second is
|
||||
// only done if !V8_SANDBOXED_EXTERNAL_POINTERS_BOOL
|
||||
// See gc_safe_store() for the reasons behind two stores.
|
||||
ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset))
|
||||
.Relaxed_Store(Smi::zero());
|
||||
#endif
|
||||
@ -93,8 +92,8 @@ bool EmbedderDataSlot::ToAlignedPointer(Isolate* isolate,
|
||||
Address raw_value;
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
|
||||
raw_value = isolate->external_pointer_table().get(index) &
|
||||
~kEmbedderDataSlotPayloadTag;
|
||||
raw_value =
|
||||
isolate->external_pointer_table().Get(index, kEmbedderDataSlotPayloadTag);
|
||||
#else
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
|
||||
@ -115,9 +114,9 @@ bool EmbedderDataSlot::ToAlignedPointerSafe(Isolate* isolate,
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
|
||||
Address raw_value;
|
||||
if (isolate->external_pointer_table().is_valid_index(index)) {
|
||||
raw_value = isolate->external_pointer_table().get(index) &
|
||||
~kEmbedderDataSlotPayloadTag;
|
||||
if (isolate->external_pointer_table().IsValidIndex(index)) {
|
||||
raw_value = isolate->external_pointer_table().Get(
|
||||
index, kEmbedderDataSlotPayloadTag);
|
||||
*out_pointer = reinterpret_cast<void*>(raw_value);
|
||||
return true;
|
||||
}
|
||||
@ -132,14 +131,16 @@ bool EmbedderDataSlot::store_aligned_pointer(Isolate* isolate, void* ptr) {
|
||||
if (!HAS_SMI_TAG(value)) return false;
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) {
|
||||
DCHECK_EQ(0, value & kExternalPointerTagMask);
|
||||
AllocateExternalPointerEntry(isolate);
|
||||
// Raw payload contains the table index. Object slots don't support loading
|
||||
// of raw values, so we just "reinterpret cast" Object value to index.
|
||||
Object index_as_object =
|
||||
ObjectSlot(address() + kRawPayloadOffset).Relaxed_Load();
|
||||
uint32_t index = static_cast<uint32_t>(index_as_object.ptr());
|
||||
isolate->external_pointer_table().set(index,
|
||||
value | kEmbedderDataSlotPayloadTag);
|
||||
// This also mark the entry as alive until the next GC.
|
||||
isolate->external_pointer_table().Set(index, value,
|
||||
kEmbedderDataSlotPayloadTag);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
@ -34,7 +34,8 @@ DEF_GETTER(Foreign, foreign_address, Address) {
|
||||
}
|
||||
|
||||
void Foreign::AllocateExternalPointerEntries(Isolate* isolate) {
|
||||
InitExternalPointerField(kForeignAddressOffset, isolate);
|
||||
InitExternalPointerField(kForeignAddressOffset, isolate,
|
||||
kForeignForeignAddressTag);
|
||||
}
|
||||
|
||||
void Foreign::set_foreign_address(Isolate* isolate, Address value) {
|
||||
|
@ -161,6 +161,7 @@ class HeapObject : public Object {
|
||||
inline ObjectSlot RawField(int byte_offset) const;
|
||||
inline MaybeObjectSlot RawMaybeWeakField(int byte_offset) const;
|
||||
inline CodeObjectSlot RawCodeField(int byte_offset) const;
|
||||
inline ExternalPointer_t RawExternalPointerField(int byte_offset) const;
|
||||
|
||||
DECL_CAST(HeapObject)
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "src/objects/call-site-info.h"
|
||||
#include "src/objects/cell.h"
|
||||
#include "src/objects/data-handler.h"
|
||||
#include "src/objects/embedder-data-array-inl.h"
|
||||
#include "src/objects/fixed-array.h"
|
||||
#include "src/objects/foreign-inl.h"
|
||||
#include "src/objects/free-space-inl.h"
|
||||
@ -106,6 +107,11 @@ void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
|
||||
STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
|
||||
#endif
|
||||
IteratePointers(obj, start_offset, end_offset, v);
|
||||
|
||||
JSObject js_obj = JSObject::cast(obj);
|
||||
for (int i = 0; i < js_obj.GetEmbedderFieldCount(); i++) {
|
||||
v->VisitEmbedderDataSlot(obj, EmbedderDataSlot(js_obj, i));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ObjectVisitor>
|
||||
@ -635,6 +641,8 @@ class Foreign::BodyDescriptor final : public BodyDescriptorBase {
|
||||
v->VisitExternalReference(
|
||||
Foreign::cast(obj), reinterpret_cast<Address*>(
|
||||
obj.RawField(kForeignAddressOffset).address()));
|
||||
v->VisitExternalPointer(obj,
|
||||
obj.RawExternalPointerField(kForeignAddressOffset));
|
||||
}
|
||||
|
||||
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
|
||||
@ -783,7 +791,14 @@ class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
|
||||
|
||||
template <typename ObjectVisitor>
|
||||
static inline void IterateBody(Map map, HeapObject obj, int object_size,
|
||||
ObjectVisitor* v) {}
|
||||
ObjectVisitor* v) {
|
||||
ExternalString string = ExternalString::cast(obj);
|
||||
v->VisitExternalPointer(obj,
|
||||
string.RawExternalPointerField(kResourceOffset));
|
||||
if (string.is_uncached()) return;
|
||||
v->VisitExternalPointer(
|
||||
obj, string.RawExternalPointerField(kResourceDataOffset));
|
||||
}
|
||||
|
||||
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
|
||||
};
|
||||
@ -794,7 +809,14 @@ class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
|
||||
|
||||
template <typename ObjectVisitor>
|
||||
static inline void IterateBody(Map map, HeapObject obj, int object_size,
|
||||
ObjectVisitor* v) {}
|
||||
ObjectVisitor* v) {
|
||||
ExternalString string = ExternalString::cast(obj);
|
||||
v->VisitExternalPointer(obj,
|
||||
string.RawExternalPointerField(kResourceOffset));
|
||||
if (string.is_uncached()) return;
|
||||
v->VisitExternalPointer(
|
||||
obj, string.RawExternalPointerField(kResourceDataOffset));
|
||||
}
|
||||
|
||||
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
|
||||
};
|
||||
@ -916,6 +938,8 @@ class NativeContext::BodyDescriptor final : public BodyDescriptorBase {
|
||||
NativeContext::kEndOfStrongFieldsOffset, v);
|
||||
IterateCustomWeakPointers(obj, NativeContext::kStartOfWeakFieldsOffset,
|
||||
NativeContext::kEndOfWeakFieldsOffset, v);
|
||||
v->VisitExternalPointer(obj,
|
||||
obj.RawExternalPointerField(kMicrotaskQueueOffset));
|
||||
}
|
||||
|
||||
static inline int SizeOf(Map map, HeapObject object) {
|
||||
@ -941,6 +965,7 @@ class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
|
||||
|
||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||
v->VisitCodePointer(obj, obj.RawCodeField(kCodeOffset));
|
||||
v->VisitExternalPointer(obj, obj.RawExternalPointerField(kCodeOffset));
|
||||
}
|
||||
}
|
||||
|
||||
@ -984,6 +1009,13 @@ class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
|
||||
STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
|
||||
IteratePointers(obj, EmbedderDataArray::kHeaderSize, object_size, v);
|
||||
#endif
|
||||
|
||||
EmbedderDataArray array = EmbedderDataArray::cast(obj);
|
||||
EmbedderDataSlot start(array, 0);
|
||||
EmbedderDataSlot end(array, array.length());
|
||||
for (EmbedderDataSlot slot = start; slot < end; ++slot) {
|
||||
v->VisitEmbedderDataSlot(obj, slot);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int SizeOf(Map map, HeapObject object) {
|
||||
|
@ -661,8 +661,9 @@ void Object::WriteSandboxedPointerField(size_t offset, Isolate* isolate,
|
||||
PtrComprCageBase(isolate), value);
|
||||
}
|
||||
|
||||
void Object::InitExternalPointerField(size_t offset, Isolate* isolate) {
|
||||
i::InitExternalPointerField(field_address(offset), isolate);
|
||||
void Object::InitExternalPointerField(size_t offset, Isolate* isolate,
|
||||
ExternalPointerTag tag) {
|
||||
i::InitExternalPointerField(field_address(offset), isolate, tag);
|
||||
}
|
||||
|
||||
void Object::InitExternalPointerField(size_t offset, Isolate* isolate,
|
||||
@ -692,6 +693,10 @@ CodeObjectSlot HeapObject::RawCodeField(int byte_offset) const {
|
||||
return CodeObjectSlot(field_address(byte_offset));
|
||||
}
|
||||
|
||||
ExternalPointer_t HeapObject::RawExternalPointerField(int byte_offset) const {
|
||||
return ReadRawExternalPointerField(field_address(byte_offset));
|
||||
}
|
||||
|
||||
MapWord MapWord::FromMap(const Map map) {
|
||||
DCHECK(map.is_null() || !MapWord::IsPacked(map.ptr()));
|
||||
#ifdef V8_MAP_PACKING
|
||||
|
@ -723,7 +723,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
|
||||
//
|
||||
// ExternalPointer_t field accessors.
|
||||
//
|
||||
inline void InitExternalPointerField(size_t offset, Isolate* isolate);
|
||||
inline void InitExternalPointerField(size_t offset, Isolate* isolate,
|
||||
ExternalPointerTag tag);
|
||||
inline void InitExternalPointerField(size_t offset, Isolate* isolate,
|
||||
Address value, ExternalPointerTag tag);
|
||||
inline Address ReadExternalPointerField(size_t offset, Isolate* isolate,
|
||||
|
@ -1059,9 +1059,11 @@ bool ExternalString::is_uncached() const {
|
||||
}
|
||||
|
||||
void ExternalString::AllocateExternalPointerEntries(Isolate* isolate) {
|
||||
InitExternalPointerField(kResourceOffset, isolate);
|
||||
InitExternalPointerField(kResourceOffset, isolate,
|
||||
kExternalStringResourceTag);
|
||||
if (is_uncached()) return;
|
||||
InitExternalPointerField(kResourceDataOffset, isolate);
|
||||
InitExternalPointerField(kResourceDataOffset, isolate,
|
||||
kExternalStringResourceDataTag);
|
||||
}
|
||||
|
||||
DEF_GETTER(ExternalString, resource_as_address, Address) {
|
||||
|
@ -180,6 +180,12 @@ class ObjectVisitor {
|
||||
|
||||
// Visits the object's map pointer, decoding as necessary
|
||||
virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
|
||||
|
||||
// Visits an external pointer.
|
||||
virtual void VisitExternalPointer(HeapObject host, ExternalPointer_t ptr) {}
|
||||
|
||||
// Visits an EmbedderDataslot.
|
||||
virtual void VisitEmbedderDataSlot(HeapObject host, EmbedderDataSlot slot) {}
|
||||
};
|
||||
|
||||
// Helper version of ObjectVisitor that also takes care of caching base values
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "include/v8-internal.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/sandbox/external-pointer-table-inl.h"
|
||||
#include "src/sandbox/external-pointer.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -18,32 +19,23 @@ V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
|
||||
STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
uint32_t index = static_cast<uint32_t>(encoded_pointer);
|
||||
return isolate->external_pointer_table().get(index) & ~tag;
|
||||
return isolate->external_pointer_table().Get(index, tag);
|
||||
#else
|
||||
return encoded_pointer;
|
||||
#endif
|
||||
}
|
||||
|
||||
V8_INLINE void InitExternalPointerField(Address field_address,
|
||||
Isolate* isolate) {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
static_assert(kExternalPointerSize == kSystemPointerSize,
|
||||
"Review the code below, once kExternalPointerSize is 4-byte "
|
||||
"the address of the field will always be aligned");
|
||||
ExternalPointer_t index = isolate->external_pointer_table().allocate();
|
||||
base::WriteUnalignedValue<ExternalPointer_t>(field_address, index);
|
||||
#else
|
||||
// Nothing to do.
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
|
||||
ExternalPointerTag tag) {
|
||||
InitExternalPointerField(field_address, isolate, kNullExternalPointer, tag);
|
||||
}
|
||||
|
||||
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
|
||||
Address value, ExternalPointerTag tag) {
|
||||
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
DCHECK_EQ(value & kExternalPointerTagMask, 0);
|
||||
ExternalPointer_t index = isolate->external_pointer_table().allocate();
|
||||
isolate->external_pointer_table().set(static_cast<uint32_t>(index),
|
||||
value | tag);
|
||||
ExternalPointer_t index = isolate->external_pointer_table().Allocate();
|
||||
isolate->external_pointer_table().Set(static_cast<uint32_t>(index), value,
|
||||
tag);
|
||||
static_assert(kExternalPointerSize == kSystemPointerSize,
|
||||
"Review the code below, once kExternalPointerSize is 4-byte "
|
||||
"the address of the field will always be aligned");
|
||||
@ -61,18 +53,24 @@ V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
|
||||
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
|
||||
}
|
||||
|
||||
V8_INLINE Address ReadExternalPointerField(Address field_address,
|
||||
const Isolate* isolate,
|
||||
ExternalPointerTag tag) {
|
||||
V8_INLINE ExternalPointer_t ReadRawExternalPointerField(Address field_address) {
|
||||
static_assert(kExternalPointerSize == kSystemPointerSize,
|
||||
"Review the code below, once kExternalPointerSize is 4-byte "
|
||||
"the address of the field will always be aligned");
|
||||
// Pointer compression causes types larger than kTaggedSize to be unaligned.
|
||||
constexpr bool v8_pointer_compression_unaligned =
|
||||
kExternalPointerSize > kTaggedSize;
|
||||
ExternalPointer_t encoded_value;
|
||||
if (v8_pointer_compression_unaligned) {
|
||||
encoded_value = base::ReadUnalignedValue<ExternalPointer_t>(field_address);
|
||||
return base::ReadUnalignedValue<ExternalPointer_t>(field_address);
|
||||
} else {
|
||||
encoded_value = base::Memory<ExternalPointer_t>(field_address);
|
||||
return base::Memory<ExternalPointer_t>(field_address);
|
||||
}
|
||||
}
|
||||
|
||||
V8_INLINE Address ReadExternalPointerField(Address field_address,
|
||||
const Isolate* isolate,
|
||||
ExternalPointerTag tag) {
|
||||
ExternalPointer_t encoded_value = ReadRawExternalPointerField(field_address);
|
||||
return DecodeExternalPointer(isolate, encoded_value, tag);
|
||||
}
|
||||
|
||||
@ -83,12 +81,10 @@ V8_INLINE void WriteExternalPointerField(Address field_address,
|
||||
static_assert(kExternalPointerSize == kSystemPointerSize,
|
||||
"Review the code below, once kExternalPointerSize is 4-byte "
|
||||
"the address of the field will always be aligned");
|
||||
DCHECK_EQ(value & kExternalPointerTagMask, 0);
|
||||
|
||||
ExternalPointer_t index =
|
||||
base::ReadUnalignedValue<ExternalPointer_t>(field_address);
|
||||
isolate->external_pointer_table().set(static_cast<uint32_t>(index),
|
||||
value | tag);
|
||||
isolate->external_pointer_table().Set(static_cast<uint32_t>(index), value,
|
||||
tag);
|
||||
#else
|
||||
// Pointer compression causes types larger than kTaggedSize to be unaligned.
|
||||
constexpr bool v8_pointer_compression_unaligned =
|
||||
|
122
src/sandbox/external-pointer-table-inl.h
Normal file
122
src/sandbox/external-pointer-table-inl.h
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
|
||||
#define V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
|
||||
|
||||
#include "src/base/atomicops.h"
|
||||
#include "src/sandbox/external-pointer-table.h"
|
||||
#include "src/sandbox/external-pointer.h"
|
||||
#include "src/utils/allocation.h"
|
||||
|
||||
#ifdef V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void ExternalPointerTable::Init(Isolate* isolate) {
|
||||
DCHECK(!is_initialized());
|
||||
|
||||
VirtualAddressSpace* root_space = GetPlatformVirtualAddressSpace();
|
||||
DCHECK(IsAligned(kExternalPointerTableReservationSize,
|
||||
root_space->allocation_granularity()));
|
||||
buffer_ = root_space->AllocatePages(
|
||||
VirtualAddressSpace::kNoHint, kExternalPointerTableReservationSize,
|
||||
root_space->allocation_granularity(), PagePermissions::kNoAccess);
|
||||
if (!buffer_) {
|
||||
V8::FatalProcessOutOfMemory(
|
||||
isolate,
|
||||
"Failed to reserve memory for ExternalPointerTable backing buffer");
|
||||
}
|
||||
|
||||
// Allocate the initial block.
|
||||
Grow();
|
||||
|
||||
// Set up the special null entry. This entry must currently contain nullptr
|
||||
// so that uninitialized EmbedderDataSlots work correctly. TODO(saelo) maybe
|
||||
// make entry non-null once EmbedderDataSlots are properly sandboxified.
|
||||
STATIC_ASSERT(kNullExternalPointer == 0);
|
||||
store(kNullExternalPointer, kNullAddress);
|
||||
}
|
||||
|
||||
void ExternalPointerTable::TearDown() {
|
||||
DCHECK(is_initialized());
|
||||
|
||||
CHECK(GetPlatformVirtualAddressSpace()->FreePages(
|
||||
buffer_, kExternalPointerTableReservationSize));
|
||||
|
||||
buffer_ = kNullAddress;
|
||||
capacity_ = 0;
|
||||
freelist_head_ = 0;
|
||||
}
|
||||
|
||||
Address ExternalPointerTable::Get(uint32_t index,
|
||||
ExternalPointerTag tag) const {
|
||||
DCHECK_LT(index, capacity_);
|
||||
|
||||
Address entry = load_atomic(index);
|
||||
DCHECK(!is_free(entry));
|
||||
|
||||
return entry & ~tag;
|
||||
}
|
||||
|
||||
void ExternalPointerTable::Set(uint32_t index, Address value,
|
||||
ExternalPointerTag tag) {
|
||||
DCHECK_LT(index, capacity_);
|
||||
DCHECK_NE(kNullExternalPointer, index);
|
||||
DCHECK_EQ(0, value & kExternalPointerTagMask);
|
||||
DCHECK(is_marked(tag));
|
||||
|
||||
store_atomic(index, value | tag);
|
||||
}
|
||||
|
||||
bool ExternalPointerTable::IsValidIndex(uint32_t index) const {
|
||||
return index < capacity_ && !is_free(load_atomic(index));
|
||||
}
|
||||
|
||||
uint32_t ExternalPointerTable::Allocate() {
|
||||
DCHECK(is_initialized());
|
||||
|
||||
if (!freelist_head_) {
|
||||
// Freelist is empty so grow the table.
|
||||
Grow();
|
||||
}
|
||||
|
||||
// Note: if external pointer table entries are ever allocated from a
|
||||
// background thread, this logic must become atomic, for example by doing an
|
||||
// atomic load of the currentl freelist head, then writing back the new
|
||||
// freelist head in a CAS loop.
|
||||
|
||||
DCHECK(freelist_head_);
|
||||
uint32_t index = freelist_head_;
|
||||
DCHECK_LT(index, capacity_);
|
||||
// The next free element is stored in the lower 32 bits of the entry.
|
||||
freelist_head_ = static_cast<uint32_t>(load_atomic(index));
|
||||
return index;
|
||||
}
|
||||
|
||||
void ExternalPointerTable::Mark(uint32_t index) {
|
||||
DCHECK_LT(index, capacity_);
|
||||
STATIC_ASSERT(sizeof(base::Atomic64) == sizeof(Address));
|
||||
|
||||
base::Atomic64 old_val = load_atomic(index);
|
||||
DCHECK(!is_free(old_val));
|
||||
base::Atomic64 new_val = set_mark_bit(old_val);
|
||||
|
||||
// We don't need to perform the CAS in a loop: if the new value is not equal
|
||||
// to the old value, then the mutator must've just written a new value into
|
||||
// the entry. This in turn must've set the marking bit already (see
|
||||
// ExternalPointerTable::Set), so we don't need to do it again.
|
||||
base::Atomic64* ptr = reinterpret_cast<base::Atomic64*>(entry_address(index));
|
||||
base::Atomic64 val = base::Relaxed_CompareAndSwap(ptr, old_val, new_val);
|
||||
DCHECK((val == old_val) || is_marked(val));
|
||||
USE(val);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
#endif // V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
|
@ -4,21 +4,84 @@
|
||||
|
||||
#include "src/sandbox/external-pointer-table.h"
|
||||
|
||||
#include "src/base/platform/wrappers.h"
|
||||
#include <algorithm>
|
||||
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/logging/counters.h"
|
||||
#include "src/sandbox/external-pointer-table-inl.h"
|
||||
|
||||
#ifdef V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void ExternalPointerTable::GrowTable(ExternalPointerTable* table) {
|
||||
// TODO(v8:10391, saelo): overflow check here and in the multiplication below
|
||||
uint32_t new_capacity = table->capacity_ + table->capacity_ / 2;
|
||||
table->buffer_ = reinterpret_cast<Address*>(
|
||||
base::Realloc(table->buffer_, new_capacity * sizeof(Address)));
|
||||
CHECK(table->buffer_);
|
||||
memset(&table->buffer_[table->capacity_], 0,
|
||||
(new_capacity - table->capacity_) * sizeof(Address));
|
||||
table->capacity_ = new_capacity;
|
||||
// static
|
||||
uint32_t ExternalPointerTable::AllocateEntry(ExternalPointerTable* table) {
|
||||
return table->Allocate();
|
||||
}
|
||||
|
||||
uint32_t ExternalPointerTable::Sweep(Isolate* isolate) {
|
||||
// Sweep top to bottom and rebuild the freelist from newly dead and
|
||||
// previously freed entries. This way, the freelist ends up sorted by index,
|
||||
// which helps defragment the table. This method must run either on the
|
||||
// mutator thread or while the mutator is stopped. Also clear marking bits on
|
||||
// live entries.
|
||||
// TODO(v8:10391, saelo) could also shrink the table using DecommitPages() if
|
||||
// elements at the end are free. This might require some form of compaction.
|
||||
uint32_t freelist_size = 0;
|
||||
uint32_t current_freelist_head = 0;
|
||||
|
||||
// Skip the special null entry.
|
||||
DCHECK_GE(capacity_, 1);
|
||||
for (uint32_t i = capacity_ - 1; i > 0; i--) {
|
||||
// No other threads are active during sweep, so there is no need to use
|
||||
// atomic operations here.
|
||||
Address entry = load(i);
|
||||
if (!is_marked(entry)) {
|
||||
store(i, make_freelist_entry(current_freelist_head));
|
||||
current_freelist_head = i;
|
||||
freelist_size++;
|
||||
} else {
|
||||
store(i, clear_mark_bit(entry));
|
||||
}
|
||||
}
|
||||
|
||||
freelist_head_ = current_freelist_head;
|
||||
|
||||
uint32_t num_active_entries = capacity_ - freelist_size;
|
||||
isolate->counters()->sandboxed_external_pointers_count()->AddSample(
|
||||
num_active_entries);
|
||||
return num_active_entries;
|
||||
}
|
||||
|
||||
void ExternalPointerTable::Grow() {
|
||||
// Freelist should be empty.
|
||||
DCHECK_EQ(0, freelist_head_);
|
||||
|
||||
// Grow the table by one block.
|
||||
uint32_t old_capacity = capacity_;
|
||||
uint32_t new_capacity = old_capacity + kEntriesPerBlock;
|
||||
CHECK_LE(new_capacity, kMaxSandboxedExternalPointers);
|
||||
|
||||
// Failure likely means OOM. TODO(saelo) handle this.
|
||||
VirtualAddressSpace* root_space = GetPlatformVirtualAddressSpace();
|
||||
DCHECK(IsAligned(kBlockSize, root_space->page_size()));
|
||||
CHECK(root_space->SetPagePermissions(buffer_ + old_capacity * sizeof(Address),
|
||||
kBlockSize,
|
||||
PagePermissions::kReadWrite));
|
||||
capacity_ = new_capacity;
|
||||
|
||||
// Build freelist bottom to top, which might be more cache friendly.
|
||||
uint32_t start = std::max<uint32_t>(old_capacity, 1); // Skip entry zero
|
||||
uint32_t last = new_capacity - 1;
|
||||
for (uint32_t i = start; i < last; i++) {
|
||||
store(i, make_freelist_entry(i + 1));
|
||||
}
|
||||
store(last, make_freelist_entry(0));
|
||||
freelist_head_ = start;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_SANDBOX_IS_AVAILABLE
|
||||
|
@ -5,70 +5,180 @@
|
||||
#ifndef V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
|
||||
#define V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
|
||||
|
||||
#include "src/base/platform/wrappers.h"
|
||||
#include "src/sandbox/external-pointer.h"
|
||||
#include "src/utils/utils.h"
|
||||
#include "include/v8config.h"
|
||||
#include "src/base/atomicops.h"
|
||||
#include "src/base/memory.h"
|
||||
#include "src/common/globals.h"
|
||||
|
||||
#ifdef V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Isolate;
|
||||
|
||||
/**
|
||||
* A table storing pointers to objects outside the sandbox.
|
||||
*
|
||||
* An external pointer table provides the basic mechanisms to ensure
|
||||
* memory-safe access to objects located outside the sandbox, but referenced
|
||||
* from within it. When an external pointer table is used, objects located
|
||||
* inside the sandbox reference outside objects through indices into the table.
|
||||
*
|
||||
* Type safety can be ensured by using type-specific tags for the external
|
||||
* pointers. These tags will be ORed into the unused top bits of the pointer
|
||||
* when storing them and will be ANDed away when loading the pointer later
|
||||
* again. If a pointer of the wrong type is accessed, some of the top bits will
|
||||
* remain in place, rendering the pointer inaccessible.
|
||||
*
|
||||
* Temporal memory safety is achieved through garbage collection of the table,
|
||||
* which ensures that every entry is either an invalid pointer or a valid
|
||||
* pointer pointing to a live object.
|
||||
*
|
||||
* Spatial memory safety can, if necessary, be ensured by storing the size of a
|
||||
* referenced object together with the object itself outside the sandbox, and
|
||||
* referencing both through a single entry in the table.
|
||||
*
|
||||
* The garbage collection algorithm for the table works as follows:
|
||||
* - The top bit of every entry is reserved for the marking bit.
|
||||
* - Every store to an entry automatically sets the marking bit when ORing
|
||||
* with the tag. This avoids the need for write barriers.
|
||||
* - Every load of an entry automatically removes the marking bit when ANDing
|
||||
* with the inverted tag.
|
||||
* - When the GC marking visitor finds a live object with an external pointer,
|
||||
* it marks the corresponding entry as alive through Mark(), which sets the
|
||||
* marking bit using an atomic CAS operation.
|
||||
* - When marking is finished, Sweep() iterates of the table once while the
|
||||
* mutator is stopped and builds a freelist from all dead entries while also
|
||||
* removing the marking bit from any live entry.
|
||||
*
|
||||
* The freelist is a singly-linked list, using the lower 32 bits of each entry
|
||||
* to store the index of the next free entry. When the freelist is empty and a
|
||||
* new entry is allocated, the table grows in place and the freelist is
|
||||
* re-populated from the newly added entries.
|
||||
*/
|
||||
class V8_EXPORT_PRIVATE ExternalPointerTable {
|
||||
public:
|
||||
static const int kExternalPointerTableInitialCapacity = 1024;
|
||||
ExternalPointerTable() = default;
|
||||
|
||||
ExternalPointerTable()
|
||||
: buffer_(reinterpret_cast<Address*>(base::Calloc(
|
||||
kExternalPointerTableInitialCapacity, sizeof(Address)))),
|
||||
length_(1),
|
||||
capacity_(kExternalPointerTableInitialCapacity),
|
||||
freelist_head_(0) {
|
||||
// Explicitly setup the invalid nullptr entry.
|
||||
STATIC_ASSERT(kNullExternalPointer == 0);
|
||||
buffer_[kNullExternalPointer] = kNullAddress;
|
||||
}
|
||||
// Initializes this external pointer table by reserving the backing memory
|
||||
// and initializing the freelist.
|
||||
inline void Init(Isolate* isolate);
|
||||
|
||||
~ExternalPointerTable() { base::Free(buffer_); }
|
||||
// Resets this external pointer table and deletes all associated memory.
|
||||
inline void TearDown();
|
||||
|
||||
Address get(uint32_t index) const {
|
||||
CHECK_LT(index, length_);
|
||||
return buffer_[index];
|
||||
}
|
||||
// Retrieves the entry at the given index.
|
||||
inline Address Get(uint32_t index, ExternalPointerTag tag) const;
|
||||
|
||||
void set(uint32_t index, Address value) {
|
||||
DCHECK_NE(kNullExternalPointer, index);
|
||||
CHECK_LT(index, length_);
|
||||
buffer_[index] = value;
|
||||
}
|
||||
// Sets the entry at the given index to the given value.
|
||||
inline void Set(uint32_t index, Address value, ExternalPointerTag tag);
|
||||
|
||||
uint32_t allocate() {
|
||||
uint32_t index = length_++;
|
||||
if (index >= capacity_) {
|
||||
GrowTable(this);
|
||||
}
|
||||
DCHECK_NE(kNullExternalPointer, index);
|
||||
return index;
|
||||
}
|
||||
// Returns true if the entry exists and isn't free.
|
||||
inline bool IsValidIndex(uint32_t index) const;
|
||||
|
||||
// Returns true if the entry exists in the table and therefore it can be read.
|
||||
bool is_valid_index(uint32_t index) const {
|
||||
// TODO(v8:10391, saelo): also check here if entry is free
|
||||
return index < length_;
|
||||
}
|
||||
// Allocates a new entry in the external pointer table. The caller must
|
||||
// initialize the entry afterwards through set(). In particular, the caller is
|
||||
// responsible for setting the mark bit of the new entry.
|
||||
// TODO(saelo) this can fail, in which case we should probably do GC + retry.
|
||||
inline uint32_t Allocate();
|
||||
|
||||
uint32_t size() const { return length_; }
|
||||
// Runtime function called from CSA. Internally just calls Allocate().
|
||||
static uint32_t AllocateEntry(ExternalPointerTable* table);
|
||||
|
||||
static void GrowTable(ExternalPointerTable* table);
|
||||
// Marks the specified entry as alive.
|
||||
// Called on the GC thread, so has to CAS to avoid races with the mutator.
|
||||
inline void Mark(uint32_t index);
|
||||
|
||||
// Frees unmarked entries. Must be called on the mutator thread or while that
|
||||
// thread is stopped. Returns the number of live entries after sweeping.
|
||||
uint32_t Sweep(Isolate* isolate);
|
||||
|
||||
private:
|
||||
// Required for Isolate::CheckIsolateLayout().
|
||||
friend class Isolate;
|
||||
|
||||
Address* buffer_;
|
||||
uint32_t length_;
|
||||
uint32_t capacity_;
|
||||
uint32_t freelist_head_;
|
||||
// An external pointer table grows in blocks of this size. This is also the
|
||||
// initial size of the table.
|
||||
static const size_t kBlockSize = 64 * KB;
|
||||
static const size_t kEntriesPerBlock = kBlockSize / kSystemPointerSize;
|
||||
|
||||
static const Address kExternalPointerMarkBit = 1ULL << 63;
|
||||
|
||||
// Returns true if this external pointer table has been initialized.
|
||||
bool is_initialized() { return buffer_ != kNullAddress; }
|
||||
|
||||
// Extends the table and adds newly created entries to the freelist.
|
||||
// TODO(saelo) this can fail and so should probably return bool.
|
||||
void Grow();
|
||||
|
||||
// Computes the address of the specified entry.
|
||||
inline Address entry_address(uint32_t index) const {
|
||||
return buffer_ + index * sizeof(Address);
|
||||
}
|
||||
|
||||
// Loads the value at the given index. This method is non-atomic, only use it
|
||||
// when no other threads can currently access the table.
|
||||
inline Address load(uint32_t index) const {
|
||||
return base::Memory<Address>(entry_address(index));
|
||||
}
|
||||
|
||||
// Stores the provided value at the given index. This method is non-atomic,
|
||||
// only use it when no other threads can currently access the table.
|
||||
inline void store(uint32_t index, Address value) {
|
||||
base::Memory<Address>(entry_address(index)) = value;
|
||||
}
|
||||
|
||||
// Atomically loads the value at the given index.
|
||||
inline Address load_atomic(uint32_t index) const {
|
||||
auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
|
||||
return base::Relaxed_Load(addr);
|
||||
}
|
||||
|
||||
// Atomically stores the provided value at the given index.
|
||||
inline void store_atomic(uint32_t index, Address value) {
|
||||
auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
|
||||
base::Relaxed_Store(addr, value);
|
||||
}
|
||||
|
||||
static bool is_marked(Address entry) {
|
||||
return (entry & kExternalPointerMarkBit) == kExternalPointerMarkBit;
|
||||
}
|
||||
|
||||
static Address set_mark_bit(Address entry) {
|
||||
return entry | kExternalPointerMarkBit;
|
||||
}
|
||||
|
||||
static Address clear_mark_bit(Address entry) {
|
||||
return entry & ~kExternalPointerMarkBit;
|
||||
}
|
||||
|
||||
static bool is_free(Address entry) {
|
||||
return (entry & kExternalPointerFreeEntryTag) ==
|
||||
kExternalPointerFreeEntryTag;
|
||||
}
|
||||
|
||||
static Address make_freelist_entry(uint32_t current_freelist_head) {
|
||||
// The next freelist entry is stored in the lower 32 bits of the entry.
|
||||
Address entry = current_freelist_head;
|
||||
return entry | kExternalPointerFreeEntryTag;
|
||||
}
|
||||
|
||||
// The buffer backing this table. This is const after initialization. Should
|
||||
// only be accessed using the load_x() and store_x() methods, which take care
|
||||
// of atomicicy if necessary.
|
||||
Address buffer_ = kNullAddress;
|
||||
|
||||
// The current capacity of this table, which is the number of usable entries.
|
||||
uint32_t capacity_ = 0;
|
||||
|
||||
// The index of the first entry on the freelist or zero if the list is empty.
|
||||
uint32_t freelist_head_ = 0;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_SANDBOX_IS_AVAILABLE
|
||||
|
||||
#endif // V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
|
||||
|
@ -18,11 +18,10 @@ V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
|
||||
|
||||
constexpr ExternalPointer_t kNullExternalPointer = 0;
|
||||
|
||||
// Creates uninitialized entry in external pointer table and writes the entry id
|
||||
// to the field.
|
||||
// When sandbox is not enabled, it's a no-op.
|
||||
V8_INLINE void InitExternalPointerField(Address field_address,
|
||||
Isolate* isolate);
|
||||
// Creates zero-initialized entry in external pointer table and writes the entry
|
||||
// id to the field. When sandbox is not enabled, it's a no-op.
|
||||
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
|
||||
ExternalPointerTag tag);
|
||||
|
||||
// Creates and initializes entry in external pointer table and writes the entry
|
||||
// id to the field.
|
||||
@ -31,6 +30,9 @@ V8_INLINE void InitExternalPointerField(Address field_address,
|
||||
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
|
||||
Address value, ExternalPointerTag tag);
|
||||
|
||||
// Reads and returns a raw external pointer value.
|
||||
V8_INLINE ExternalPointer_t ReadRawExternalPointerField(Address field_address);
|
||||
|
||||
// Reads external pointer for the field, and decodes it if the sandbox is
|
||||
// enabled.
|
||||
V8_INLINE Address ReadExternalPointerField(Address field_address,
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "include/v8-internal.h"
|
||||
#include "include/v8-platform.h"
|
||||
#include "include/v8config.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user