[sandbox] Improve sandboxed pointer support
This CL removes the global IsValidBackingStorePointer function and turns the DCHECKs that ensure that sandboxed pointers point into the sandbox, which essentially cover the same condition, into CHECKs. This is mostly to facilitate debugging during the initial rollout, and the CHECKs can later be turned back into DCHECKs. In addition, this CL adds a fallback to a partially-reserved sandbox when sandboxed pointers are enabled and when the regular initialization fails. Bug: chromium:1218005 Change-Id: I75526f1a00ddb9095ae0e797dc9bb80a210f867b Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3367617 Reviewed-by: Igor Sheludko <ishell@chromium.org> Commit-Queue: Samuel Groß <saelo@chromium.org> Cr-Commit-Position: refs/heads/main@{#78620}
This commit is contained in:
parent
0b3b2cb33a
commit
549ee6f367
@ -1555,17 +1555,17 @@ void CodeStubAssembler::StoreSandboxedPointerToObject(TNode<HeapObject> object,
|
||||
TNode<RawPtrT> pointer) {
|
||||
#ifdef V8_SANDBOXED_POINTERS
|
||||
TNode<SandboxedPtrT> sbx_ptr = ReinterpretCast<SandboxedPtrT>(pointer);
|
||||
#ifdef DEBUG
|
||||
// Verify pointer points into the sandbox.
|
||||
|
||||
// Ensure pointer points into the sandbox.
|
||||
TNode<ExternalReference> sandbox_base_address =
|
||||
ExternalConstant(ExternalReference::sandbox_base_address());
|
||||
TNode<ExternalReference> sandbox_end_address =
|
||||
ExternalConstant(ExternalReference::sandbox_end_address());
|
||||
TNode<UintPtrT> sandbox_base = Load<UintPtrT>(sandbox_base_address);
|
||||
TNode<UintPtrT> sandbox_end = Load<UintPtrT>(sandbox_end_address);
|
||||
CSA_DCHECK(this, UintPtrGreaterThanOrEqual(sbx_ptr, sandbox_base));
|
||||
CSA_DCHECK(this, UintPtrLessThan(sbx_ptr, sandbox_end));
|
||||
#endif // DEBUG
|
||||
CSA_CHECK(this, UintPtrGreaterThanOrEqual(sbx_ptr, sandbox_base));
|
||||
CSA_CHECK(this, UintPtrLessThan(sbx_ptr, sandbox_end));
|
||||
|
||||
StoreObjectFieldNoWriteBarrier<SandboxedPtrT>(object, offset, sbx_ptr);
|
||||
#else
|
||||
StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer);
|
||||
|
@ -83,24 +83,22 @@ void IsolateAllocator::InitializeOncePerProcess() {
|
||||
if (GetProcessWideSandbox()->is_disabled()) {
|
||||
CHECK(kAllowBackingStoresOutsideSandbox);
|
||||
} else {
|
||||
auto cage = GetProcessWideSandbox();
|
||||
CHECK(cage->is_initialized());
|
||||
auto sandbox = GetProcessWideSandbox();
|
||||
CHECK(sandbox->is_initialized());
|
||||
// The pointer compression cage must be placed at the start of the sandbox.
|
||||
//
|
||||
|
||||
// TODO(chromium:12180) this currently assumes that no other pages were
|
||||
// allocated through the cage's page allocator in the meantime. In the
|
||||
// future, the cage initialization will happen just before this function
|
||||
// runs, and so this will be guaranteed. Currently however, it is possible
|
||||
// that the embedder accidentally uses the cage's page allocator prior to
|
||||
// initializing V8, in which case this CHECK will likely fail.
|
||||
void* hint = reinterpret_cast<void*>(cage->base());
|
||||
void* base = cage->page_allocator()->AllocatePages(
|
||||
hint, params.reservation_size, params.base_alignment,
|
||||
PageAllocator::kNoAccess);
|
||||
CHECK_EQ(base, hint);
|
||||
existing_reservation =
|
||||
base::AddressRegion(cage->base(), params.reservation_size);
|
||||
params.page_allocator = cage->page_allocator();
|
||||
Address base = sandbox->address_space()->AllocatePages(
|
||||
sandbox->base(), params.reservation_size, params.base_alignment,
|
||||
PagePermissions::kNoAccess);
|
||||
CHECK_EQ(sandbox->base(), base);
|
||||
existing_reservation = base::AddressRegion(base, params.reservation_size);
|
||||
params.page_allocator = sandbox->page_allocator();
|
||||
}
|
||||
#endif
|
||||
if (!GetProcessWidePtrComprCage()->InitReservation(params,
|
||||
|
@ -328,7 +328,6 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
|
||||
}
|
||||
}
|
||||
|
||||
DCHECK(IsValidBackingStorePointer(buffer_start));
|
||||
auto result = new BackingStore(buffer_start, // start
|
||||
byte_length, // length
|
||||
byte_length, // max length
|
||||
@ -455,8 +454,6 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
|
||||
return {};
|
||||
}
|
||||
|
||||
DCHECK(IsValidBackingStorePointer(allocation_base));
|
||||
|
||||
// Get a pointer to the start of the buffer, skipping negative guard region
|
||||
// if necessary.
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
@ -755,7 +752,6 @@ BackingStore::ResizeOrGrowResult BackingStore::GrowInPlace(
|
||||
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
|
||||
Isolate* isolate, void* allocation_base, size_t allocation_length,
|
||||
SharedFlag shared, bool free_on_destruct) {
|
||||
DCHECK(IsValidBackingStorePointer(allocation_base));
|
||||
auto result = new BackingStore(allocation_base, // start
|
||||
allocation_length, // length
|
||||
allocation_length, // max length
|
||||
@ -777,7 +773,6 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
|
||||
void* allocation_base, size_t allocation_length,
|
||||
v8::BackingStore::DeleterCallback deleter, void* deleter_data,
|
||||
SharedFlag shared) {
|
||||
DCHECK(IsValidBackingStorePointer(allocation_base));
|
||||
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
|
||||
auto result = new BackingStore(allocation_base, // start
|
||||
allocation_length, // length
|
||||
|
@ -41,7 +41,6 @@ DEF_GETTER(JSArrayBuffer, backing_store, void*) {
|
||||
}
|
||||
|
||||
void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
|
||||
DCHECK(IsValidBackingStorePointer(value));
|
||||
Address addr = reinterpret_cast<Address>(value);
|
||||
WriteSandboxedPointerField(kBackingStoreOffset, isolate, addr);
|
||||
}
|
||||
@ -255,7 +254,6 @@ DEF_GETTER(JSTypedArray, external_pointer, Address) {
|
||||
}
|
||||
|
||||
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
|
||||
DCHECK(IsValidBackingStorePointer(reinterpret_cast<void*>(value)));
|
||||
WriteSandboxedPointerField(kExternalPointerOffset, isolate, value);
|
||||
}
|
||||
|
||||
@ -371,7 +369,6 @@ DEF_GETTER(JSDataView, data_pointer, void*) {
|
||||
}
|
||||
|
||||
void JSDataView::set_data_pointer(Isolate* isolate, void* ptr) {
|
||||
DCHECK(IsValidBackingStorePointer(ptr));
|
||||
Address value = reinterpret_cast<Address>(ptr);
|
||||
WriteSandboxedPointerField(kDataPointerOffset, isolate, value);
|
||||
}
|
||||
|
@ -76,7 +76,6 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
|
||||
!backing_store->is_wasm_memory() && !backing_store->is_resizable(),
|
||||
backing_store->byte_length() == backing_store->max_byte_length());
|
||||
DCHECK(!was_detached());
|
||||
DCHECK(IsValidBackingStorePointer(backing_store->buffer_start()));
|
||||
Isolate* isolate = GetIsolate();
|
||||
|
||||
if (backing_store->IsEmpty()) {
|
||||
|
@ -141,10 +141,20 @@ bool Sandbox::Initialize(v8::VirtualAddressSpace* vas) {
|
||||
return InitializeAsPartiallyReservedSandbox(vas, sandbox_size,
|
||||
size_to_reserve);
|
||||
} else {
|
||||
// TODO(saelo) if this fails, we could still fall back to creating a
|
||||
// partially reserved sandbox. Decide if that would make sense.
|
||||
const bool use_guard_regions = true;
|
||||
return Initialize(vas, sandbox_size, use_guard_regions);
|
||||
bool success = Initialize(vas, sandbox_size, use_guard_regions);
|
||||
#ifdef V8_SANDBOXED_POINTERS
|
||||
// If sandboxed pointers are enabled, we need the sandbox to be initialized,
|
||||
// so fall back to creating a partially reserved sandbox.
|
||||
if (!success) {
|
||||
// Instead of going for the minimum reservation size directly, we could
|
||||
// also first try a couple of larger reservation sizes if that is deemed
|
||||
// sensible in the future.
|
||||
success = InitializeAsPartiallyReservedSandbox(
|
||||
vas, sandbox_size, kSandboxMinimumReservationSize);
|
||||
}
|
||||
#endif // V8_SANDBOXED_POINTERS
|
||||
return success;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,16 +179,6 @@ class V8_EXPORT_PRIVATE Sandbox {
|
||||
V8_EXPORT_PRIVATE Sandbox* GetProcessWideSandbox();
|
||||
#endif
|
||||
|
||||
V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
|
||||
#ifdef V8_SANDBOX
|
||||
Address addr = reinterpret_cast<Address>(ptr);
|
||||
return kAllowBackingStoresOutsideSandbox || addr == kNullAddress ||
|
||||
GetProcessWideSandbox()->Contains(addr);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
V8_INLINE void* EmptyBackingStoreBuffer() {
|
||||
#ifdef V8_SANDBOXED_POINTERS
|
||||
return reinterpret_cast<void*>(
|
||||
|
@ -31,7 +31,7 @@ V8_INLINE void WriteSandboxedPointerField(Address field_address,
|
||||
Address pointer) {
|
||||
#ifdef V8_SANDBOXED_POINTERS
|
||||
// The pointer must point into the sandbox.
|
||||
DCHECK(GetProcessWideSandbox()->Contains(pointer));
|
||||
CHECK(GetProcessWideSandbox()->Contains(pointer));
|
||||
|
||||
Address offset = pointer - cage_base.address();
|
||||
SandboxedPointer_t sandboxed_pointer = offset << kSandboxedPointerShift;
|
||||
|
Loading…
Reference in New Issue
Block a user