[platform] Introduce AddressSpaceReservation API

This low-level API implements virtual address space reservations on the
supported platforms. An AddressSpaceReservation supports similar
functionality as the global page management APIs in the OS class but
operates inside a continuous region of previously reserved virtual
address space. A reservation is backed by regular mmap mappings on
Posix, by placeholder mappings on Windows, and by VMARs on Fuchsia.

Bug: chromium:1218005
Change-Id: I99bc6bcbc26eb4aa3b54a31c671c9e06e92c471b
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3270540
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78130}
This commit is contained in:
Samuel Groß 2021-11-29 15:40:13 +01:00 committed by V8 LUCI CQ
parent 76059a8612
commit afcd219181
4 changed files with 545 additions and 83 deletions

View File

@ -34,19 +34,35 @@ zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
} // namespace
TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
// Determine ZX_VM_ALIGN_X constant corresponding to the specified alignment.
// Returns 0 if there is none.
zx_vm_option_t GetAlignmentOptionFromAlignment(size_t alignment) {
// The alignment must be one of the ZX_VM_ALIGN_X constants.
// See zircon/system/public/zircon/types.h.
static_assert(
ZX_VM_ALIGN_1KB == (10 << ZX_VM_ALIGN_BASE),
"Fuchsia's ZX_VM_ALIGN_1KB constant doesn't match expected value");
static_assert(
ZX_VM_ALIGN_4GB == (32 << ZX_VM_ALIGN_BASE),
"Fuchsia's ZX_VM_ALIGN_4GB constant doesn't match expected value");
zx_vm_option_t alignment_log2 = 0;
for (int shift = 10; shift <= 32; shift++) {
if (alignment == (size_t{1} << shift)) {
alignment_log2 = shift;
break;
}
}
return alignment_log2 << ZX_VM_ALIGN_BASE;
}
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
OS::MemoryPermission access) {
size_t page_size = OS::AllocatePageSize();
void* AllocateInternal(const zx::vmar& vmar, size_t page_size,
size_t vmar_offset, bool vmar_offset_is_hint,
size_t size, size_t alignment,
OS::MemoryPermission access) {
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
DCHECK_EQ(0, vmar_offset % alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
@ -68,20 +84,19 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
zx_vm_option_t options = GetProtectionFromMemoryPermission(access);
uint64_t vmar_offset = 0;
if (address) {
vmar_offset = reinterpret_cast<uint64_t>(address);
if (vmar_offset != 0) {
options |= ZX_VM_SPECIFIC;
}
zx_vaddr_t reservation;
zx_status_t status = zx::vmar::root_self()->map(options, vmar_offset, vmo, 0,
request_size, &reservation);
if (status != ZX_OK && address != nullptr) {
// Retry without the hint, if we supplied one.
zx_status_t status =
vmar.map(options, vmar_offset, vmo, 0, request_size, &reservation);
if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) {
// If a vmar_offset was specified and the allocation failed (for example,
// because the offset overlapped another mapping), then we should retry
// again without a vmar_offset if that offset was just meant to be a hint.
options &= ~(ZX_VM_SPECIFIC);
status = zx::vmar::root_self()->map(options, 0, vmo, 0, request_size,
&reservation);
status = vmar.map(options, 0, vmo, 0, request_size, &reservation);
}
if (status != ZX_OK) {
return nullptr;
@ -95,8 +110,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
zx::vmar::root_self()->unmap(reinterpret_cast<uintptr_t>(base),
prefix_size);
vmar.unmap(reinterpret_cast<uintptr_t>(base), prefix_size);
request_size -= prefix_size;
}
@ -105,8 +119,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
if (aligned_size != request_size) {
DCHECK_LT(aligned_size, request_size);
size_t suffix_size = request_size - aligned_size;
zx::vmar::root_self()->unmap(
reinterpret_cast<uintptr_t>(aligned_base + aligned_size), suffix_size);
vmar.unmap(reinterpret_cast<uintptr_t>(aligned_base + aligned_size),
suffix_size);
request_size -= suffix_size;
}
@ -114,39 +128,106 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
return static_cast<void*>(aligned_base);
}
// static
bool OS::Free(void* address, const size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
DCHECK_EQ(0, size % AllocatePageSize());
return zx::vmar::root_self()->unmap(reinterpret_cast<uintptr_t>(address),
size) == ZX_OK;
bool FreeInternal(const zx::vmar& vmar, size_t page_size, void* address,
const size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
DCHECK_EQ(0, size % page_size);
return vmar.unmap(reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
bool SetPermissionsInternal(const zx::vmar& vmar, size_t page_size,
void* address, size_t size,
OS::MemoryPermission access) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
DCHECK_EQ(0, size % page_size);
uint32_t prot = GetProtectionFromMemoryPermission(access);
return vmar.protect(prot, reinterpret_cast<uintptr_t>(address), size) ==
ZX_OK;
}
bool DiscardSystemPagesInternal(const zx::vmar& vmar, size_t page_size,
void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
DCHECK_EQ(0, size % page_size);
uint64_t address_int = reinterpret_cast<uint64_t>(address);
return vmar.op_range(ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0) ==
ZX_OK;
}
zx_status_t CreateAddressSpaceReservationInternal(
const zx::vmar& vmar, size_t page_size, size_t vmar_offset,
bool vmar_offset_is_hint, size_t size, size_t alignment,
OS::MemoryPermission max_permission, zx::vmar* child,
zx_vaddr_t* child_addr) {
DCHECK_EQ(0, size % page_size);
DCHECK_GE(alignment, page_size);
DCHECK_EQ(0, alignment % page_size);
DCHECK_EQ(0, vmar_offset % alignment);
// TODO(v8) determine these based on max_permission.
zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
ZX_VM_CAN_MAP_EXECUTE | ZX_VM_CAN_MAP_SPECIFIC;
zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
if (!alignment_option) {
return ZX_ERR_INVALID_ARGS;
}
options |= alignment_option;
if (vmar_offset != 0) {
options |= ZX_VM_SPECIFIC;
}
zx_status_t status =
vmar.allocate(options, vmar_offset, size, child, child_addr);
if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) {
// If a vmar_offset was specified and the allocation failed (for example,
// because the offset overlapped another mapping), then we should retry
// again without a vmar_offset if that offset was just meant to be a hint.
options &= ~(ZX_VM_SPECIFIC);
status = vmar.allocate(options, 0, size, child, child_addr);
}
return status;
}
} // namespace
TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
// static
bool OS::Release(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
return zx::vmar::root_self()->unmap(reinterpret_cast<uintptr_t>(address),
size) == ZX_OK;
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
constexpr bool vmar_offset_is_hint = true;
return AllocateInternal(*zx::vmar::root_self(), AllocatePageSize(),
reinterpret_cast<uint64_t>(address),
vmar_offset_is_hint, size, alignment, access);
}
// static
bool OS::Free(void* address, const size_t size) {
return FreeInternal(*zx::vmar::root_self(), AllocatePageSize(), address,
size);
}
// static
bool OS::Release(void* address, size_t size) { return Free(address, size); }
// static
bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
uint32_t prot = GetProtectionFromMemoryPermission(access);
return zx::vmar::root_self()->protect(
prot, reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
return SetPermissionsInternal(*zx::vmar::root_self(), CommitPageSize(),
address, size, access);
}
// static
bool OS::DiscardSystemPages(void* address, size_t size) {
uint64_t address_int = reinterpret_cast<uint64_t>(address);
zx_status_t status = zx::vmar::root_self()->op_range(
ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0);
return status == ZX_OK;
return DiscardSystemPagesInternal(*zx::vmar::root_self(), CommitPageSize(),
address, size);
}
// static
bool OS::DecommitPages(void* address, size_t size) {
// We rely on DiscardSystemPages decommitting the pages immediately (via
// ZX_VMO_OP_DECOMMIT) so that they are guaranteed to be zero-initialized
@ -155,6 +236,33 @@ bool OS::DecommitPages(void* address, size_t size) {
DiscardSystemPages(address, size);
}
// static
bool OS::CanReserveAddressSpace() { return true; }
// static
Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
void* hint, size_t size, size_t alignment,
MemoryPermission max_permission) {
zx::vmar child;
zx_vaddr_t child_addr;
uint64_t vmar_offset = reinterpret_cast<uint64_t>(hint);
constexpr bool vmar_offset_is_hint = true;
zx_status_t status = CreateAddressSpaceReservationInternal(
*zx::vmar::root_self(), AllocatePageSize(), vmar_offset,
vmar_offset_is_hint, size, alignment, max_permission, &child,
&child_addr);
if (status != ZX_OK) return {};
return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
child.release());
}
// static
bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
// Destroy the vmar and release the handle.
zx::vmar vmar(reservation.vmar_);
return vmar.destroy() == ZX_OK;
}
// static
bool OS::HasLazyCommits() { return true; }
@ -194,5 +302,74 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
return {};
}
Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
void* address, size_t size, OS::MemoryPermission max_permission) {
DCHECK(Contains(address, size));
zx::vmar child;
zx_vaddr_t child_addr;
size_t vmar_offset = 0;
if (address != 0) {
vmar_offset =
reinterpret_cast<size_t>(address) - reinterpret_cast<size_t>(base());
}
constexpr bool vmar_offset_is_hint = false;
zx_status_t status = CreateAddressSpaceReservationInternal(
*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset,
vmar_offset_is_hint, size, OS::AllocatePageSize(), max_permission, &child,
&child_addr);
if (status != ZX_OK) return {};
DCHECK_EQ(reinterpret_cast<void*>(child_addr), address);
return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
child.release());
}
bool AddressSpaceReservation::FreeSubReservation(
AddressSpaceReservation reservation) {
return OS::FreeAddressSpaceReservation(reservation);
}
bool AddressSpaceReservation::Allocate(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
size_t vmar_offset = 0;
if (address != 0) {
vmar_offset =
reinterpret_cast<size_t>(address) - reinterpret_cast<size_t>(base());
}
constexpr bool vmar_offset_is_hint = false;
void* allocation = AllocateInternal(
*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset,
vmar_offset_is_hint, size, OS::AllocatePageSize(), access);
DCHECK(!allocation || allocation == address);
return allocation != nullptr;
}
bool AddressSpaceReservation::Free(void* address, size_t size) {
DCHECK(Contains(address, size));
return FreeInternal(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
size);
}
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
return SetPermissionsInternal(*zx::unowned_vmar(vmar_), OS::CommitPageSize(),
address, size, access);
}
bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return DiscardSystemPagesInternal(*zx::unowned_vmar(vmar_),
OS::CommitPageSize(), address, size);
}
bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
DCHECK(Contains(address, size));
// See comment in OS::DecommitPages.
return SetPermissions(address, size, OS::MemoryPermission::kNoAccess) &&
DiscardSystemPages(address, size);
}
} // namespace base
} // namespace v8

View File

@ -467,6 +467,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return ret == 0;
}
// static
bool OS::DiscardSystemPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
@ -495,6 +496,7 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ret == 0;
}
// static
bool OS::DecommitPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
@ -509,6 +511,30 @@ bool OS::DecommitPages(void* address, size_t size) {
return ptr == address;
}
// static
bool OS::CanReserveAddressSpace() { return true; }
// static
Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
void* hint, size_t size, size_t alignment,
MemoryPermission max_permission) {
// On POSIX, address space reservations are backed by private memory mappings.
MemoryPermission permission = MemoryPermission::kNoAccess;
if (max_permission == MemoryPermission::kReadWriteExecute) {
permission = MemoryPermission::kNoAccessWillJitLater;
}
void* reservation = Allocate(hint, size, alignment, permission);
if (!reservation) return {};
return AddressSpaceReservation(reservation, size);
}
// static
bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
return Free(reservation.base(), reservation.size());
}
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
@ -823,6 +849,57 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
strncpy(dest, src, n);
}
// ----------------------------------------------------------------------------
// POSIX Address space reservation support.
//
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
void* address, size_t size, OS::MemoryPermission max_permission) {
DCHECK(Contains(address, size));
DCHECK_EQ(0, size % OS::AllocatePageSize());
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % OS::AllocatePageSize());
return AddressSpaceReservation(address, size);
}
bool AddressSpaceReservation::FreeSubReservation(
AddressSpaceReservation reservation) {
// Nothing to do.
// Pages allocated inside the reservation must've already been freed.
return true;
}
bool AddressSpaceReservation::Allocate(void* address, size_t size,
OS::MemoryPermission access) {
// The region is already mmap'ed, so it just has to be made accessible now.
DCHECK(Contains(address, size));
return OS::SetPermissions(address, size, access);
}
bool AddressSpaceReservation::Free(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DecommitPages(address, size);
}
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
return OS::SetPermissions(address, size, access);
}
bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DiscardSystemPages(address, size);
}
bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DecommitPages(address, size);
}
#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// ----------------------------------------------------------------------------
// POSIX thread support.

View File

@ -722,6 +722,20 @@ void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
}
typedef PVOID (*VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG,
MEM_EXTENDED_PARAMETER*, ULONG);
VirtualAlloc2_t VirtualAlloc2;
void OS::EnsureWin32MemoryAPILoaded() {
static bool loaded = false;
if (!loaded) {
VirtualAlloc2 = (VirtualAlloc2_t)GetProcAddress(
GetModuleHandle(L"kernelbase.dll"), "VirtualAlloc2");
loaded = true;
}
}
// static
size_t OS::AllocatePageSize() {
static size_t allocate_alignment = 0;
@ -801,6 +815,14 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
void* VirtualAllocWrapper(void* hint, size_t size, DWORD flags, DWORD protect) {
if (VirtualAlloc2) {
return VirtualAlloc2(nullptr, hint, size, flags, protect, NULL, 0);
} else {
return VirtualAlloc(hint, size, flags, protect);
}
}
uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
void* hint) {
LPVOID base = nullptr;
@ -816,16 +838,57 @@ uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
if (use_aslr && protect != PAGE_READWRITE) {
// For executable or reserved pages try to randomize the allocation address.
base = VirtualAlloc(hint, size, flags, protect);
base = VirtualAllocWrapper(hint, size, flags, protect);
}
// On failure, let the OS find an address to use.
if (base == nullptr) {
base = VirtualAlloc(nullptr, size, flags, protect);
base = VirtualAllocWrapper(nullptr, size, flags, protect);
}
return reinterpret_cast<uint8_t*>(base);
}
void* AllocateInternal(void* hint, size_t size, size_t alignment,
size_t page_size, DWORD flags, DWORD protect) {
// First, try an exact size aligned allocation.
uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
CHECK(VirtualFree(base, 0, MEM_RELEASE));
// Clear the hint. It's unlikely we can allocate at this address.
hint = nullptr;
// Add the maximum misalignment so we are guaranteed an aligned base address
// in the allocated region.
size_t padded_size = size + (alignment - page_size);
const int kMaxAttempts = 3;
aligned_base = nullptr;
for (int i = 0; i < kMaxAttempts; ++i) {
base = RandomizedVirtualAlloc(padded_size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
CHECK(VirtualFree(base, 0, MEM_RELEASE));
aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
base = reinterpret_cast<uint8_t*>(
VirtualAllocWrapper(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,
// base will be nullptr.
if (base != nullptr) break;
}
DCHECK_IMPLIES(base, base == aligned_base);
return reinterpret_cast<void*>(base);
}
} // namespace
// static
@ -842,43 +905,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
: MEM_RESERVE | MEM_COMMIT;
DWORD protect = GetProtectionFromMemoryPermission(access);
// First, try an exact size aligned allocation.
uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
CHECK(Free(base, size));
// Clear the hint. It's unlikely we can allocate at this address.
hint = nullptr;
// Add the maximum misalignment so we are guaranteed an aligned base address
// in the allocated region.
size_t padded_size = size + (alignment - page_size);
const int kMaxAttempts = 3;
aligned_base = nullptr;
for (int i = 0; i < kMaxAttempts; ++i) {
base = RandomizedVirtualAlloc(padded_size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
CHECK(Free(base, padded_size));
aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,
// base will be nullptr.
if (base != nullptr) break;
}
DCHECK_IMPLIES(base, base == aligned_base);
return reinterpret_cast<void*>(base);
return AllocateInternal(hint, size, alignment, page_size, flags, protect);
}
// static
@ -904,7 +931,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
DWORD protect = GetProtectionFromMemoryPermission(access);
return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
return VirtualAllocWrapper(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
@ -929,7 +956,7 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
// failure.
void* ptr = VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE);
void* ptr = VirtualAllocWrapper(address, size, MEM_RESET, PAGE_READWRITE);
CHECK(ptr);
return ptr;
}
@ -949,6 +976,35 @@ bool OS::DecommitPages(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::CanReserveAddressSpace() { return VirtualAlloc2 != nullptr; }
// static
Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
void* hint, size_t size, size_t alignment,
MemoryPermission max_permission) {
CHECK(CanReserveAddressSpace());
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
DCHECK_LE(page_size, alignment);
hint = AlignedAddress(hint, alignment);
// On Windows, address space reservations are backed by placeholder mappings.
void* reservation =
AllocateInternal(hint, size, alignment, page_size,
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS);
if (!reservation) return {};
return AddressSpaceReservation(reservation, size);
}
// static
bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
return OS::Free(reservation.base(), reservation.size());
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
@ -1068,6 +1124,64 @@ Win32MemoryMappedFile::~Win32MemoryMappedFile() {
CloseHandle(file_);
}
Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
void* address, size_t size, OS::MemoryPermission max_permission) {
// Nothing to do, the sub reservation must already have been split by now.
DCHECK(Contains(address, size));
DCHECK_EQ(0, size % OS::AllocatePageSize());
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % OS::AllocatePageSize());
return AddressSpaceReservation(address, size);
}
bool AddressSpaceReservation::FreeSubReservation(
AddressSpaceReservation reservation) {
// Nothing to do.
// Pages allocated inside the reservation must've already been freed.
return true;
}
bool AddressSpaceReservation::SplitPlaceholder(void* address, size_t size) {
DCHECK(Contains(address, size));
return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
bool AddressSpaceReservation::MergePlaceholders(void* address, size_t size) {
DCHECK(Contains(address, size));
return VirtualFree(address, size, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
}
bool AddressSpaceReservation::Allocate(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
CHECK(VirtualAlloc2);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE | MEM_REPLACE_PLACEHOLDER
: MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER;
DWORD protect = GetProtectionFromMemoryPermission(access);
return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0);
}
bool AddressSpaceReservation::Free(void* address, size_t size) {
DCHECK(Contains(address, size));
return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
return OS::SetPermissions(address, size, access);
}
bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DiscardSystemPages(address, size);
}
bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DecommitPages(address, size);
}
// The following code loads functions defined in DbhHelp.h and TlHelp32.h
// dynamically. This is to avoid being depending on dbghelp.dll and

View File

@ -29,6 +29,7 @@
#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
@ -36,6 +37,10 @@
#include "src/base/qnx-math.h"
#endif
#if V8_OS_FUCHSIA
#include <zircon/types.h>
#endif // V8_OS_FUCHSIA
#ifdef V8_USE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
@ -115,6 +120,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#endif // V8_NO_FAST_TLS
class AddressSpaceReservation;
class PageAllocator;
class TimezoneCache;
@ -132,6 +138,17 @@ class V8_BASE_EXPORT OS {
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
#if V8_OS_WIN
// On Windows, ensure the newer memory API is loaded if available. This
// includes function like VirtualAlloc2 and MapViewOfFile3.
// TODO(chromium:1218005) this should probably happen as part of Initialize,
// but that is currently invoked too late, after the virtual memory cage
// is initialized. However, eventually the virtual memory cage initialization
// will happen as part of V8::Initialize, at which point this function can
// probably be merged into OS::Initialize.
static void EnsureWin32MemoryAPILoaded();
#endif
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
@ -291,6 +308,7 @@ class V8_BASE_EXPORT OS {
private:
// These classes use the private memory management API below.
friend class AddressSpaceReservation;
friend class MemoryMappedFile;
friend class PosixMemoryMappedFile;
friend class v8::base::PageAllocator;
@ -326,6 +344,15 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size);
V8_WARN_UNUSED_RESULT static bool CanReserveAddressSpace();
V8_WARN_UNUSED_RESULT static Optional<AddressSpaceReservation>
CreateAddressSpaceReservation(void* hint, size_t size, size_t alignment,
MemoryPermission max_permission);
V8_WARN_UNUSED_RESULT static bool FreeAddressSpaceReservation(
AddressSpaceReservation reservation);
static const int msPerSecond = 1000;
#if V8_OS_POSIX
@ -347,6 +374,73 @@ inline void EnsureConsoleOutput() {
#endif // (defined(_WIN32) || defined(_WIN64))
}
// ----------------------------------------------------------------------------
// AddressSpaceReservation
//
// This class provides the same memory management functions as OS but operates
// inside a previously reserved contiguous region of virtual address space.
class V8_BASE_EXPORT AddressSpaceReservation {
public:
using Address = uintptr_t;
void* base() const { return base_; }
size_t size() const { return size_; }
bool Contains(void* region_addr, size_t region_size) const {
Address base = reinterpret_cast<Address>(base_);
Address region_base = reinterpret_cast<Address>(region_addr);
return (region_base >= base) &&
((region_base + region_size) <= (base + size_));
}
V8_WARN_UNUSED_RESULT bool Allocate(void* address, size_t size,
OS::MemoryPermission access);
V8_WARN_UNUSED_RESULT bool Free(void* address, size_t size);
V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
OS::MemoryPermission access);
V8_WARN_UNUSED_RESULT bool DiscardSystemPages(void* address, size_t size);
V8_WARN_UNUSED_RESULT bool DecommitPages(void* address, size_t size);
V8_WARN_UNUSED_RESULT Optional<AddressSpaceReservation> CreateSubReservation(
void* address, size_t size, OS::MemoryPermission max_permission);
V8_WARN_UNUSED_RESULT static bool FreeSubReservation(
AddressSpaceReservation reservation);
#if V8_OS_WIN
// On Windows, the placeholder mappings backing address space reservations
// need to be split and merged as page allocations can only replace an entire
// placeholder mapping, not parts of it. This must be done by the users of
// this API as it requires a RegionAllocator (or equivalent) to keep track of
// sub-regions and decide when to split and when to coalesce multiple free
// regions into a single one.
V8_WARN_UNUSED_RESULT bool SplitPlaceholder(void* address, size_t size);
V8_WARN_UNUSED_RESULT bool MergePlaceholders(void* address, size_t size);
#endif // V8_OS_WIN
private:
friend class OS;
#if V8_OS_FUCHSIA
AddressSpaceReservation(void* base, size_t size, zx_handle_t vmar)
: base_(base), size_(size), vmar_(vmar) {}
#else
AddressSpaceReservation(void* base, size_t size) : base_(base), size_(size) {}
#endif // V8_OS_FUCHSIA
void* base_ = nullptr;
size_t size_ = 0;
#if V8_OS_FUCHSIA
// On Fuchsia, address space reservations are backed by VMARs.
zx_handle_t vmar_ = ZX_HANDLE_INVALID;
#endif // V8_OS_FUCHSIA
};
// ----------------------------------------------------------------------------
// Thread
//