[Memory] Create memory management API in v8::internal.

- Creates a memory management API in v8::internal, which corresponds
  to the existing one in base::OS.
- Implements the new API in terms of the old one.
- Changes all usage of the base::OS API to the one in v8::internal. This
  includes all tests, except platform and OS tests.
- Makes OS:: methods private.
- Moves all LSAN calls into the v8::internal functions.

Bug: chromium:756050
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: Iaa3f022e3e12fdebf937f3c76b6c6455014beb8a
Reviewed-on: https://chromium-review.googlesource.com/794856
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: Eric Holk <eholk@chromium.org>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50139}
This commit is contained in:
Bill Budge 2017-12-15 09:59:57 -08:00 committed by Commit Bot
parent 93b3397e52
commit a449f09fad
32 changed files with 472 additions and 384 deletions

View File

@ -103,10 +103,101 @@ void AlignedFree(void *ptr) {
#endif
}
byte* AllocateSystemPage(void* address, size_t* allocated) {
size_t page_size = base::OS::AllocatePageSize();
void* result = base::OS::Allocate(address, page_size, page_size,
base::OS::MemoryPermission::kReadWrite);
#define STATIC_ASSERT_ENUM(a, b) \
static_assert(static_cast<int>(a) == static_cast<int>(b), \
"mismatching enum: " #a)
STATIC_ASSERT_ENUM(MemoryPermission::kNoAccess,
base::OS::MemoryPermission::kNoAccess);
STATIC_ASSERT_ENUM(MemoryPermission::kReadWrite,
base::OS::MemoryPermission::kReadWrite);
STATIC_ASSERT_ENUM(MemoryPermission::kReadWriteExecute,
base::OS::MemoryPermission::kReadWriteExecute);
STATIC_ASSERT_ENUM(MemoryPermission::kReadExecute,
base::OS::MemoryPermission::kReadExecute);
#undef STATIC_ASSERT_ENUM
// Default Memory Manager.
// TODO(bbudge) Move this to libplatform.
class DefaultMemoryManager {
public:
static size_t AllocatePageSize() { return base::OS::AllocatePageSize(); }
static size_t CommitPageSize() { return base::OS::CommitPageSize(); }
static void* GetRandomMmapAddr() { return base::OS::GetRandomMmapAddr(); }
static void* AllocatePages(void* address, size_t size, size_t alignment,
MemoryPermission access) {
void* result =
base::OS::Allocate(address, size, alignment,
static_cast<base::OS::MemoryPermission>(access));
#if defined(LEAK_SANITIZER)
if (result != nullptr) {
__lsan_register_root_region(result, size);
}
#endif
return result;
}
static bool FreePages(void* address, const size_t size) {
bool result = base::OS::Free(address, size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
}
#endif
return result;
}
static bool ReleasePages(void* address, size_t size, size_t new_size) {
DCHECK_LT(new_size, size);
bool result = base::OS::Release(reinterpret_cast<byte*>(address) + new_size,
size - new_size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
__lsan_register_root_region(address, new_size);
}
#endif
return result;
}
static bool SetPermissions(void* address, size_t size,
MemoryPermission access) {
return base::OS::SetPermissions(
address, size, static_cast<base::OS::MemoryPermission>(access));
}
};
size_t AllocatePageSize() { return DefaultMemoryManager::AllocatePageSize(); }
size_t CommitPageSize() { return DefaultMemoryManager::CommitPageSize(); }
// Generate a random address to be used for hinting allocation calls.
void* GetRandomMmapAddr() { return DefaultMemoryManager::GetRandomMmapAddr(); }
void* AllocatePages(void* address, size_t size, size_t alignment,
MemoryPermission access) {
return DefaultMemoryManager::AllocatePages(address, size, alignment, access);
}
bool FreePages(void* address, const size_t size) {
return DefaultMemoryManager::FreePages(address, size);
}
bool ReleasePages(void* address, size_t size, size_t new_size) {
return DefaultMemoryManager::ReleasePages(address, size, new_size);
}
bool SetPermissions(void* address, size_t size, MemoryPermission access) {
return DefaultMemoryManager::SetPermissions(address, size, access);
}
byte* AllocatePage(void* address, size_t* allocated) {
size_t page_size = AllocatePageSize();
void* result = AllocatePages(address, page_size, page_size,
MemoryPermission::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
@ -115,15 +206,12 @@ VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(nullptr), size_(0) {
size_t page_size = base::OS::AllocatePageSize();
size_t page_size = AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
address_ = base::OS::Allocate(hint, alloc_size, alignment,
base::OS::MemoryPermission::kNoAccess);
address_ =
AllocatePages(hint, alloc_size, alignment, MemoryPermission::kNoAccess);
if (address_ != nullptr) {
size_ = alloc_size;
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address_, size_);
#endif
}
}
@ -139,9 +227,9 @@ void VirtualMemory::Reset() {
}
bool VirtualMemory::SetPermissions(void* address, size_t size,
base::OS::MemoryPermission access) {
MemoryPermission access) {
CHECK(InVM(address, size));
bool result = base::OS::SetPermissions(address, size, access);
bool result = v8::internal::SetPermissions(address, size, access);
DCHECK(result);
USE(result);
return result;
@ -149,8 +237,7 @@ bool VirtualMemory::SetPermissions(void* address, size_t size,
size_t VirtualMemory::Release(void* free_start) {
DCHECK(IsReserved());
DCHECK(IsAddressAligned(static_cast<Address>(free_start),
base::OS::CommitPageSize()));
DCHECK(IsAddressAligned(static_cast<Address>(free_start), CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t free_size = size_ - (reinterpret_cast<size_t>(free_start) -
@ -159,11 +246,7 @@ size_t VirtualMemory::Release(void* free_start) {
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(address_, size_);
__lsan_register_root_region(address_, size_ - free_size);
#endif
CHECK(base::OS::Release(free_start, free_size));
CHECK(ReleasePages(address_, size_, size_ - free_size));
size_ -= free_size;
return free_size;
}
@ -176,10 +259,7 @@ void VirtualMemory::Free() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(address, size);
#endif
CHECK(base::OS::Free(address, size));
CHECK(FreePages(address, size));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {

View File

@ -76,10 +76,61 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
// Allocates a single system memory page with read/write permissions. The
// address parameter is a hint. Returns the base address of the memory, or null
// on failure. Permissions can be changed on the base address.
byte* AllocateSystemPage(void* address, size_t* allocated);
// These must be in sync with the permissions in base/platform/platform.h.
enum class MemoryPermission {
kNoAccess,
kReadWrite,
// TODO(hpayer): Remove this flag. Memory should never be rwx.
kReadWriteExecute,
kReadExecute
};
// Gets the page granularity for AllocatePages and FreePages. Addresses returned
// by AllocatePages and AllocatePage are aligned to this size.
V8_EXPORT_PRIVATE size_t AllocatePageSize();
// Gets the granularity at which the permissions and release calls can be made.
V8_EXPORT_PRIVATE size_t CommitPageSize();
// Generate a random address to be used for hinting allocation calls.
V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
// Allocates memory. Permissions are set according to the access argument.
// |address| is a hint. |size| and |alignment| must be multiples of
// AllocatePageSize(). Returns the address of the allocated memory, with the
// specified size and alignment, or nullptr on failure.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT void* AllocatePages(void* address, size_t size,
size_t alignment,
MemoryPermission access);
// Frees memory allocated by a call to AllocatePages. |address| and |size| must
// be multiples of AllocatePageSize(). Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
// Releases memory that is no longer needed. The range specified by |address|
// and |size| must be an allocated memory region. |size| and |new_size| must be
// multiples of CommitPageSize(). Memory from |new_size| to |size| is released.
// Released memory is left in an undefined state, so it should not be accessed.
// Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
size_t new_size);
// Sets permissions according to |access|. |address| and |size| must be
// multiples of CommitPageSize(). Setting permission to kNoAccess may
// cause the memory contents to be lost. Returns true on success, otherwise
// false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
MemoryPermission access);
// Convenience function that allocates a single system page with read and write
// permissions. |address| is a hint. Returns the base address of the memory and
// the page size via |allocated| on success. Returns nullptr on failure.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
@ -90,8 +141,7 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by
// address().
VirtualMemory(size_t size, void* hint,
size_t alignment = base::OS::AllocatePageSize());
VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize());
// Construct a virtual memory by assigning it some already mapped address
// and size.
@ -130,8 +180,7 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Returns true on success, otherwise false.
bool SetPermissions(void* address, size_t size,
base::OS::MemoryPermission access);
bool SetPermissions(void* address, size_t size, MemoryPermission access);
// Releases memory after |free_start|. Returns the number of bytes released.
size_t Release(void* free_start);

View File

@ -8,9 +8,6 @@
#ifdef V8_USE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
#if defined(LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
#endif // defined(LEAK_SANITIZER)
#include <cmath> // For isnan.
#include <limits>
#include <vector>
@ -508,16 +505,10 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
size_t page_size = base::OS::AllocatePageSize();
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
void* address =
base::OS::Allocate(base::OS::GetRandomMmapAddr(), allocated, page_size,
base::OS::MemoryPermission::kNoAccess);
#if defined(LEAK_SANITIZER)
if (address != nullptr) {
__lsan_register_root_region(address, allocated);
}
#endif
void* address = i::AllocatePages(i::GetRandomMmapAddr(), allocated,
page_size, i::MemoryPermission::kNoAccess);
return address;
}
@ -528,9 +519,9 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
size_t page_size = base::OS::AllocatePageSize();
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(base::OS::Free(data, allocated));
CHECK(i::FreePages(data, allocated));
return;
}
}
@ -541,11 +532,11 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
v8::ArrayBuffer::Allocator::Protection protection) {
DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
base::OS::MemoryPermission permission =
i::MemoryPermission permission =
(protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
? base::OS::MemoryPermission::kReadWrite
: base::OS::MemoryPermission::kNoAccess;
CHECK(base::OS::SetPermissions(data, length, permission));
? i::MemoryPermission::kReadWrite
: i::MemoryPermission::kNoAccess;
CHECK(i::SetPermissions(data, length, permission));
}
};

View File

@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
return stub;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -170,8 +169,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -184,8 +182,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
return stub;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -261,8 +258,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
@ -273,8 +269,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -290,8 +285,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -130,11 +130,9 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
}
void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
const size_t actual_size = RoundUp(size, OS::AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
int flags = GetFlagsForMemoryPermission(access);
void* result =
mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset);
void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
@ -284,6 +282,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
request_size = RoundUp(request_size, OS::AllocatePageSize());
void* result = base::Allocate(address, request_size, access);
if (result == nullptr) return nullptr;

View File

@ -36,6 +36,12 @@
#endif
namespace v8 {
namespace internal {
// TODO(bbudge) Move this to libplatform.
class DefaultMemoryManager;
} // namespace internal
namespace base {
// ----------------------------------------------------------------------------
@ -157,6 +163,9 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
// OS memory management API. Except for testing, use the equivalent API in
// v8::internal (src/allocation.h).
enum class MemoryPermission {
kNoAccess,
kReadWrite,
@ -165,40 +174,6 @@ class V8_BASE_EXPORT OS {
kReadExecute
};
// Gets the page granularity for Allocate. Addresses returned by Allocate are
// aligned to this size.
static size_t AllocatePageSize();
// Gets the granularity at which the permissions and commit calls can be made.
static size_t CommitPageSize();
// Generate a random address to be used for hinting allocation calls.
static void* GetRandomMmapAddr();
// Allocates memory. Permissions are set according to the access argument.
// The address parameter is a hint. The size and alignment parameters must be
// multiples of AllocatePageSize(). Returns the address of the allocated
// memory, with the specified size and alignment, or nullptr on failure.
V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
size_t alignment,
MemoryPermission access);
// Frees memory allocated by a call to Allocate. address and size must be
// multiples of AllocatePageSize(). Returns true on success, otherwise false.
V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
// Releases memory that is no longer needed. The range specified by address
// and size must be part of an allocated memory region, and must be multiples
// of CommitPageSize(). Released memory is left in an undefined state, so it
// should not be accessed. Returns true on success, otherwise false.
V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Setting permission to kNoAccess may cause
// the memory contents to be lost. Returns true on success, otherwise false.
V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
MemoryPermission access);
static bool HasLazyCommits();
// Sleep for a specified time interval.
@ -280,6 +255,28 @@ class V8_BASE_EXPORT OS {
static int GetCurrentThreadId();
private:
// These classes use the private memory management API below.
friend class MemoryMappedFile;
friend class PosixMemoryMappedFile;
friend class v8::internal::DefaultMemoryManager;
static size_t AllocatePageSize();
static size_t CommitPageSize();
static void* GetRandomMmapAddr();
V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
size_t alignment,
MemoryPermission access);
V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
MemoryPermission access);
static const int msPerSecond = 1000;
#if V8_OS_POSIX

View File

@ -13,10 +13,6 @@
#include <utility>
#include <vector>
#if defined(LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
#endif // defined(LEAK_SANITIZER)
#ifdef ENABLE_VTUNE_JIT_INTERFACE
#include "src/third_party/vtune/v8-vtune.h"
#endif
@ -146,24 +142,19 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
// TODO(titzer): allocations should fail if >= 2gb because array buffers
// store their lengths as a SMI internally.
if (length >= kTwoGB) return nullptr;
size_t page_size = base::OS::AllocatePageSize();
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
// Rounding up could go over the limit.
if (allocated >= kTwoGB) return nullptr;
void* address = base::OS::Allocate(nullptr, allocated, page_size,
base::OS::MemoryPermission::kReadWrite);
#if defined(LEAK_SANITIZER)
if (address != nullptr) {
__lsan_register_root_region(address, allocated);
}
#endif
return address;
return i::AllocatePages(nullptr, allocated, page_size,
i::MemoryPermission::kReadWrite);
}
void FreeVM(void* data, size_t length) {
size_t page_size = base::OS::AllocatePageSize();
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(base::OS::Free(data, allocated));
CHECK(i::FreePages(data, allocated));
}
};
@ -192,7 +183,7 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
private:
size_t Adjust(size_t length) {
const size_t kAllocationLimit = 10 * kMB;
return length > kAllocationLimit ? base::OS::AllocatePageSize() : length;
return length > kAllocationLimit ? i::AllocatePageSize() : length;
}
};

View File

@ -5580,7 +5580,7 @@ bool Heap::SetUp() {
}
mmap_region_base_ =
reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.

View File

@ -1646,7 +1646,7 @@ class Heap {
void ReportCodeStatistics(const char* title);
#endif
void* GetRandomMmapAddr() {
void* result = base::OS::GetRandomMmapAddr();
void* result = v8::internal::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
#if V8_OS_MACOSX
// The Darwin kernel [as of macOS 10.12.5] does not clean up page
@ -1657,7 +1657,7 @@ class Heap {
// killed. Confine the hint to a 32-bit section of the virtual address
// space. See crbug.com/700928.
uintptr_t offset =
reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
#endif // V8_OS_MACOSX

View File

@ -118,8 +118,8 @@ bool CodeRange::SetUp(size_t requested) {
VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(
requested, Max(kCodeRangeAreaAlignment, base::OS::AllocatePageSize()),
base::OS::GetRandomMmapAddr(), &reservation)) {
requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()),
GetRandomMmapAddr(), &reservation)) {
return false;
}
@ -131,7 +131,7 @@ bool CodeRange::SetUp(size_t requested) {
// the beginning of an executable space.
if (reserved_area > 0) {
if (!reservation.SetPermissions(base, reserved_area,
base::OS::MemoryPermission::kReadWrite))
MemoryPermission::kReadWrite))
return false;
base += reserved_area;
@ -226,7 +226,7 @@ bool CodeRange::CommitRawMemory(Address start, size_t length) {
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
return virtual_memory_.SetPermissions(start, length,
base::OS::MemoryPermission::kNoAccess);
MemoryPermission::kNoAccess);
}
@ -234,8 +234,7 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.emplace_back(address, length);
virtual_memory_.SetPermissions(address, length,
base::OS::MemoryPermission::kNoAccess);
virtual_memory_.SetPermissions(address, length, MemoryPermission::kNoAccess);
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
@ -399,8 +398,7 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
if (!base::OS::SetPermissions(base, size,
base::OS::MemoryPermission::kReadWrite)) {
if (!SetPermissions(base, size, MemoryPermission::kReadWrite)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
@ -429,7 +427,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
CHECK(base::OS::Free(base, size));
CHECK(FreePages(base, size));
}
}
@ -462,7 +460,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
}
} else {
if (reservation.SetPermissions(base, commit_size,
base::OS::MemoryPermission::kReadWrite)) {
MemoryPermission::kReadWrite)) {
UpdateAllocatedSpaceLimits(base, base + commit_size);
} else {
base = nullptr;
@ -526,8 +524,8 @@ void MemoryChunk::SetReadAndExecutable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
CHECK(base::OS::SetPermissions(protect_start, protect_size,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(protect_start, protect_size,
MemoryPermission::kReadExecute));
}
}
@ -545,8 +543,8 @@ void MemoryChunk::SetReadAndWritable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
CHECK(base::OS::SetPermissions(unprotect_start, unprotect_size,
base::OS::MemoryPermission::kReadWrite));
CHECK(SetPermissions(unprotect_start, unprotect_size,
MemoryPermission::kReadWrite));
}
}
@ -600,9 +598,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(area_start, page_size));
size_t area_size = RoundUp(area_end - area_start, page_size);
CHECK(base::OS::SetPermissions(
area_start, area_size,
base::OS::MemoryPermission::kReadWriteExecute));
CHECK(SetPermissions(area_start, area_size,
MemoryPermission::kReadWriteExecute));
}
}
@ -942,7 +939,7 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + CodePageGuardSize());
reservation->SetPermissions(chunk->area_end_, page_size,
base::OS::MemoryPermission::kNoAccess);
MemoryPermission::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
// partially starting at |start_free| will also release the potentially
@ -1094,9 +1091,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
if (!base::OS::SetPermissions(start, size,
base::OS::MemoryPermission::kNoAccess))
return false;
if (!SetPermissions(start, size, MemoryPermission::kNoAccess)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@ -1142,7 +1137,7 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
return base::OS::CommitPageSize();
return CommitPageSize();
}
}
@ -1164,25 +1159,23 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
const Address post_guard_page = start + reserved_size - guard_size;
// Commit the non-executable header, from start to pre-code guard page.
if (vm->SetPermissions(start, pre_guard_offset,
base::OS::MemoryPermission::kReadWrite)) {
MemoryPermission::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
base::OS::MemoryPermission::kNoAccess)) {
MemoryPermission::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
base::OS::MemoryPermission::kReadWrite)) {
MemoryPermission::kReadWrite)) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
base::OS::MemoryPermission::kNoAccess)) {
MemoryPermission::kNoAccess)) {
UpdateAllocatedSpaceLimits(start, code_area + commit_size);
return true;
}
vm->SetPermissions(code_area, commit_size,
base::OS::MemoryPermission::kNoAccess);
vm->SetPermissions(code_area, commit_size, MemoryPermission::kNoAccess);
}
}
vm->SetPermissions(start, pre_guard_offset,
base::OS::MemoryPermission::kNoAccess);
vm->SetPermissions(start, pre_guard_offset, MemoryPermission::kNoAccess);
}
return false;
}
@ -2370,7 +2363,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
@ -2414,7 +2407,7 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
DCHECK(IsAligned(delta, AllocatePageSize()));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* new_last_page;
Page* last_page;

View File

@ -58,7 +58,7 @@ void StoreBuffer::SetUp() {
if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers,
base::OS::MemoryPermission::kReadWrite)) {
MemoryPermission::kReadWrite)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;

View File

@ -16,8 +16,7 @@ namespace internal {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -39,8 +38,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@ -133,8 +131,7 @@ class LabelConverter {
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -452,8 +449,7 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);

View File

@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
return stub;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -545,8 +544,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -557,8 +555,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -574,8 +571,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -546,8 +545,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -558,8 +556,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -575,8 +572,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -20,8 +20,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -41,8 +40,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -20,8 +20,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -38,8 +37,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -542,8 +542,8 @@ Address NativeModule::AllocateForCode(size_t size) {
}
Address ret = mem.ranges().front().first;
Address end = ret + size;
Address commit_start = RoundUp(ret, base::OS::AllocatePageSize());
Address commit_end = RoundUp(end, base::OS::AllocatePageSize());
Address commit_start = RoundUp(ret, AllocatePageSize());
Address commit_end = RoundUp(end, AllocatePageSize());
// {commit_start} will be either ret or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
@ -565,7 +565,7 @@ Address NativeModule::AllocateForCode(size_t size) {
Address start =
std::max(commit_start, reinterpret_cast<Address>(it->address()));
size_t commit_size = static_cast<size_t>(commit_end - start);
DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
DCHECK(IsAligned(commit_size, AllocatePageSize()));
if (!wasm_code_manager_->Commit(start, commit_size)) {
return nullptr;
}
@ -574,7 +574,7 @@ Address NativeModule::AllocateForCode(size_t size) {
}
#else
size_t commit_size = static_cast<size_t>(commit_end - commit_start);
DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
DCHECK(IsAligned(commit_size, AllocatePageSize()));
if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
return nullptr;
}
@ -674,9 +674,8 @@ WasmCodeManager::WasmCodeManager(v8::Isolate* isolate, size_t max_committed)
}
bool WasmCodeManager::Commit(Address start, size_t size) {
DCHECK(
IsAligned(reinterpret_cast<size_t>(start), base::OS::AllocatePageSize()));
DCHECK(IsAligned(size, base::OS::AllocatePageSize()));
DCHECK(IsAligned(reinterpret_cast<size_t>(start), AllocatePageSize()));
DCHECK(IsAligned(size, AllocatePageSize()));
if (size > static_cast<size_t>(std::numeric_limits<intptr_t>::max())) {
return false;
}
@ -686,8 +685,7 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
remaining_uncommitted_.Increment(size);
return false;
}
bool ret = base::OS::SetPermissions(start, size,
base::OS::MemoryPermission::kReadWrite);
bool ret = SetPermissions(start, size, MemoryPermission::kReadWrite);
TRACE_HEAP("Setting rw permissions for %p:%p\n",
reinterpret_cast<void*>(start),
reinterpret_cast<void*>(start + size));
@ -729,11 +727,11 @@ void WasmCodeManager::AssignRanges(void* start, void* end,
void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
DCHECK_GT(size, 0);
size = RoundUp(size, base::OS::AllocatePageSize());
if (hint == nullptr) hint = base::OS::GetRandomMmapAddr();
size = RoundUp(size, AllocatePageSize());
if (hint == nullptr) hint = GetRandomMmapAddr();
if (!AlignedAllocVirtualMemory(
size, static_cast<size_t>(base::OS::AllocatePageSize()), hint, ret)) {
if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()),
hint, ret)) {
DCHECK(!ret->IsReserved());
}
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n", ret->address(), ret->end(),
@ -745,7 +743,7 @@ size_t WasmCodeManager::GetAllocationChunk(const WasmModule& module) {
// from something embedder-provided
if (kRequiresCodeRange) return kMaxWasmCodeMemory;
DCHECK(kModuleCanAllocateMoreMemory);
size_t ret = base::OS::AllocatePageSize();
size_t ret = AllocatePageSize();
// a ballpark guesstimate on native inflation factor.
constexpr size_t kMultiplier = 4;
@ -788,9 +786,8 @@ bool NativeModule::SetExecutable(bool executable) {
if (is_executable_ == executable) return true;
TRACE_HEAP("Setting module %zu as executable: %d.\n", instance_id,
executable);
base::OS::MemoryPermission permission =
executable ? base::OS::MemoryPermission::kReadExecute
: base::OS::MemoryPermission::kReadWrite;
MemoryPermission permission = executable ? MemoryPermission::kReadExecute
: MemoryPermission::kReadWrite;
#if V8_OS_WIN
// On windows, we need to switch permissions per separate virtual memory
@ -803,7 +800,7 @@ bool NativeModule::SetExecutable(bool executable) {
// not.
if (can_request_more_memory_) {
for (auto& vmem : owned_memory_) {
if (!base::OS::SetPermissions(vmem.address(), vmem.size(), permission)) {
if (!SetPermissions(vmem.address(), vmem.size(), permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
@ -817,8 +814,8 @@ bool NativeModule::SetExecutable(bool executable) {
// allocated_memory_ is fine-grained, so we need to
// page-align it.
size_t range_size = RoundUp(static_cast<size_t>(range.second - range.first),
base::OS::AllocatePageSize());
if (!base::OS::SetPermissions(range.first, range_size, permission)) {
AllocatePageSize());
if (!SetPermissions(range.first, range_size, permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n",
@ -905,7 +902,7 @@ void WasmCodeManager::FreeNativeModuleMemories(NativeModule* native_module) {
// which we currently indicate by having the isolate_ as null
if (isolate_ == nullptr) return;
size_t freed_mem = native_module->committed_memory_;
DCHECK(IsAligned(freed_mem, base::OS::AllocatePageSize()));
DCHECK(IsAligned(freed_mem, AllocatePageSize()));
remaining_uncommitted_.Increment(freed_mem);
isolate_->AdjustAmountOfExternalAllocatedMemory(
-static_cast<int64_t>(freed_mem));

View File

@ -24,8 +24,8 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
DCHECK_EQ(0, size % base::OS::CommitPageSize());
allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
// The Reserve makes the whole region inaccessible by default.
allocation_base =

View File

@ -15,8 +15,7 @@ namespace internal {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -32,8 +31,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
CHECK(SetPermissions(buffer, allocated, MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}

View File

@ -571,11 +571,11 @@ static inline void CheckDoubleEquals(double expected, double actual) {
static inline uint8_t* AllocateAssemblerBuffer(
size_t* allocated,
size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize) {
size_t page_size = v8::base::OS::AllocatePageSize();
size_t page_size = v8::internal::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size);
void* result =
v8::base::OS::Allocate(nullptr, alloc_size, page_size,
v8::base::OS::MemoryPermission::kReadWrite);
void* result = v8::internal::AllocatePages(
nullptr, alloc_size, page_size,
v8::internal::MemoryPermission::kReadWriteExecute);
CHECK(result);
*allocated = alloc_size;
return static_cast<uint8_t*>(result);
@ -583,8 +583,8 @@ static inline uint8_t* AllocateAssemblerBuffer(
static inline void MakeAssemblerBufferExecutable(uint8_t* buffer,
size_t allocated) {
bool result = v8::base::OS::SetPermissions(
buffer, allocated, v8::base::OS::MemoryPermission::kReadExecute);
bool result = v8::internal::SetPermissions(
buffer, allocated, v8::internal::MemoryPermission::kReadExecute);
CHECK(result);
}

View File

@ -5676,9 +5676,8 @@ TEST(UncommitUnusedLargeObjectMemory) {
CcTest::CollectAllGarbage();
CHECK(chunk->CommittedPhysicalMemory() < committed_memory_before);
size_t shrinked_size =
RoundUp((array->address() - chunk->address()) + array->Size(),
base::OS::CommitPageSize());
size_t shrinked_size = RoundUp(
(array->address() - chunk->address()) + array->Size(), CommitPageSize());
CHECK_EQ(shrinked_size, chunk->CommittedPhysicalMemory());
}

View File

@ -102,13 +102,12 @@ static void VerifyMemoryChunk(Isolate* isolate,
reserve_area_size, commit_area_size, executable, nullptr);
size_t alignment = code_range != nullptr && code_range->valid()
? MemoryChunk::kAlignment
: base::OS::CommitPageSize();
: CommitPageSize();
size_t reserved_size =
((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
alignment)
: RoundUp(header_size + reserve_area_size,
base::OS::CommitPageSize());
: RoundUp(header_size + reserve_area_size, CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() <
memory_chunk->address() + memory_chunk->size());
@ -686,7 +685,7 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
size_t should_have_shrunk =
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
base::OS::CommitPageSize());
CommitPageSize());
CHECK_EQ(should_have_shrunk, shrunk);
}

View File

@ -4,6 +4,12 @@
#include <stdlib.h>
#include <string.h>
#if V8_OS_POSIX
#include <setjmp.h>
#include <signal.h>
#include <unistd.h> // NOLINT
#endif
#include "src/v8.h"
#include "test/cctest/cctest.h"
@ -54,7 +60,7 @@ size_t GetHugeMemoryAmount() {
static size_t huge_memory = 0;
if (!huge_memory) {
for (int i = 0; i < 100; i++) {
huge_memory |= bit_cast<size_t>(v8::base::OS::GetRandomMmapAddr());
huge_memory |= bit_cast<size_t>(v8::internal::GetRandomMmapAddr());
}
// Make it larger than the available address space.
huge_memory *= 2;
@ -122,7 +128,7 @@ TEST(AlignedAllocOOM) {
// On failure, this won't return, since an AlignedAlloc failure is fatal.
// In that case, behavior is checked in OnAlignedAllocOOM before exit.
void* result = v8::internal::AlignedAlloc(GetHugeMemoryAmount(),
v8::base::OS::AllocatePageSize());
v8::internal::AllocatePageSize());
// On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
@ -143,7 +149,7 @@ TEST(AlignedAllocVirtualMemoryOOM) {
CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result;
bool success = v8::internal::AlignedAllocVirtualMemory(
GetHugeMemoryAmount(), v8::base::OS::AllocatePageSize(), nullptr,
GetHugeMemoryAmount(), v8::internal::AllocatePageSize(), nullptr,
&result);
// On a few systems, allocation somehow succeeds.
CHECK_IMPLIES(success, result.IsReserved());

View File

@ -213,7 +213,7 @@ static void InitializeVM() {
__ Ret(); \
__ GetCode(masm.isolate(), nullptr);
#define TEARDOWN() CHECK(v8::base::OS::Free(buf, allocated));
#define TEARDOWN() CHECK(v8::internal::FreePages(buf, allocated));
#endif // ifdef USE_SIMULATOR.

View File

@ -12,45 +12,6 @@ using OS = v8::base::OS;
namespace v8 {
namespace internal {
TEST(OSAllocateAndFree) {
size_t page_size = OS::AllocatePageSize();
CHECK_NE(0, page_size);
// A large allocation, aligned at native allocation granularity.
const size_t kAllocationSize = 1 * MB;
void* mem_addr = OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize,
page_size, OS::MemoryPermission::kReadWrite);
CHECK_NOT_NULL(mem_addr);
CHECK(OS::Free(mem_addr, kAllocationSize));
// A large allocation, aligned significantly beyond native granularity.
const size_t kBigAlignment = 64 * MB;
void* aligned_mem_addr =
OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize, kBigAlignment,
OS::MemoryPermission::kReadWrite);
CHECK_NOT_NULL(aligned_mem_addr);
CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
CHECK(OS::Free(aligned_mem_addr, kAllocationSize));
}
TEST(OSReserveMemory) {
size_t page_size = OS::AllocatePageSize();
const size_t kAllocationSize = 1 * MB;
void* mem_addr = OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize,
page_size, OS::MemoryPermission::kReadWrite);
CHECK_NE(0, page_size);
CHECK_NOT_NULL(mem_addr);
size_t commit_size = OS::CommitPageSize();
CHECK(OS::SetPermissions(mem_addr, commit_size,
OS::MemoryPermission::kReadWrite));
// Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr);
addr[KB - 1] = 2;
CHECK(OS::SetPermissions(mem_addr, commit_size,
OS::MemoryPermission::kNoAccess));
CHECK(OS::Free(mem_addr, kAllocationSize));
}
#ifdef V8_CC_GNU
static uintptr_t sp_addr = 0;

View File

@ -43,7 +43,7 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
const bool enable_guard_regions =
trap_handler::IsTrapHandlerEnabled() && test_module_.is_wasm();
uint32_t alloc_size =
enable_guard_regions ? RoundUp(size, base::OS::CommitPageSize()) : size;
enable_guard_regions ? RoundUp(size, CommitPageSize()) : size;
Handle<JSArrayBuffer> new_buffer =
wasm::NewArrayBuffer(isolate_, alloc_size, enable_guard_regions);
CHECK(!new_buffer.is_null());

View File

@ -35,6 +35,7 @@ v8_source_set("unittests_sources") {
"../../test/common/wasm/wasm-macro-gen.h",
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
"allocation-unittest.cc",
"api/access-check-unittest.cc",
"api/exception-unittest.cc",
"api/interceptor-unittest.cc",

View File

@ -0,0 +1,164 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/allocation.h"
#if V8_OS_POSIX
#include <setjmp.h>
#include <signal.h>
#include <unistd.h> // NOLINT
#endif // V8_OS_POSIX
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
// TODO(eholk): Add a windows version of permissions tests.
#if V8_OS_POSIX
namespace {
// These tests make sure the routines to allocate memory do so with the correct
// permissions.
//
// Unfortunately, there is no API to find the protection of a memory address,
// so instead we test permissions by installing a signal handler, probing a
// memory location and recovering from the fault.
//
// We don't test the execution permission because to do so we'd have to
// dynamically generate code and test if we can execute it.
class MemoryAllocationPermissionsTest : public ::testing::Test {
static void SignalHandler(int signal, siginfo_t* info, void*) {
siglongjmp(continuation_, 1);
}
struct sigaction old_action_;
// On Mac, sometimes we get SIGBUS instead of SIGSEGV.
#if V8_OS_MACOSX
struct sigaction old_bus_action_;
#endif
protected:
virtual void SetUp() {
struct sigaction action;
action.sa_sigaction = SignalHandler;
sigemptyset(&action.sa_mask);
action.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &action, &old_action_);
#if V8_OS_MACOSX
sigaction(SIGBUS, &action, &old_bus_action_);
#endif
}
virtual void TearDown() {
// Be a good citizen and restore the old signal handler.
sigaction(SIGSEGV, &old_action_, nullptr);
#if V8_OS_MACOSX
sigaction(SIGBUS, &old_bus_action_, nullptr);
#endif
}
public:
static sigjmp_buf continuation_;
enum class MemoryAction { kRead, kWrite };
void ProbeMemory(volatile int* buffer, MemoryAction action,
bool should_succeed) {
const int save_sigs = 1;
if (!sigsetjmp(continuation_, save_sigs)) {
switch (action) {
case MemoryAction::kRead: {
// static_cast to remove the reference and force a memory read.
USE(static_cast<int>(*buffer));
break;
}
case MemoryAction::kWrite: {
*buffer = 0;
break;
}
}
if (should_succeed) {
SUCCEED();
} else {
FAIL();
}
return;
}
if (should_succeed) {
FAIL();
} else {
SUCCEED();
}
}
void TestPermissions(v8::internal::MemoryPermission permission, bool can_read,
bool can_write) {
const size_t page_size = AllocatePageSize();
int* buffer = static_cast<int*>(
AllocatePages(nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
CHECK(FreePages(buffer, page_size));
}
};
sigjmp_buf MemoryAllocationPermissionsTest::continuation_;
} // namespace
TEST_F(MemoryAllocationPermissionsTest, DoTest) {
TestPermissions(MemoryPermission::kNoAccess, false, false);
TestPermissions(MemoryPermission::kReadWrite, true, true);
TestPermissions(MemoryPermission::kReadWriteExecute, true, true);
}
#endif // V8_OS_POSIX
// Basic tests of allocation.
class AllocationTest : public ::testing::Test {};
TEST(AllocationTest, AllocateAndFree) {
size_t page_size = v8::internal::AllocatePageSize();
CHECK_NE(0, page_size);
// A large allocation, aligned at native allocation granularity.
const size_t kAllocationSize = 1 * v8::internal::MB;
void* mem_addr = v8::internal::AllocatePages(
v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
v8::internal::MemoryPermission::kReadWrite);
CHECK_NOT_NULL(mem_addr);
CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
// A large allocation, aligned significantly beyond native granularity.
const size_t kBigAlignment = 64 * v8::internal::MB;
void* aligned_mem_addr = v8::internal::AllocatePages(
v8::internal::GetRandomMmapAddr(), kAllocationSize, kBigAlignment,
v8::internal::MemoryPermission::kReadWrite);
CHECK_NOT_NULL(aligned_mem_addr);
CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
CHECK(v8::internal::FreePages(aligned_mem_addr, kAllocationSize));
}
TEST(AllocationTest, ReserveMemory) {
size_t page_size = v8::internal::AllocatePageSize();
const size_t kAllocationSize = 1 * v8::internal::MB;
void* mem_addr = v8::internal::AllocatePages(
v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
v8::internal::MemoryPermission::kReadWrite);
CHECK_NE(0, page_size);
CHECK_NOT_NULL(mem_addr);
size_t commit_size = v8::internal::CommitPageSize();
CHECK(v8::internal::SetPermissions(
mem_addr, commit_size, v8::internal::MemoryPermission::kReadWrite));
// Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr);
addr[v8::internal::KB - 1] = 2;
CHECK(v8::internal::SetPermissions(
mem_addr, commit_size, v8::internal::MemoryPermission::kNoAccess));
CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
}
} // namespace internal
} // namespace v8

View File

@ -4,23 +4,8 @@
#include "src/base/platform/platform.h"
#if V8_OS_POSIX
#include <setjmp.h>
#include <signal.h>
#include <unistd.h> // NOLINT
#endif
#if V8_OS_WIN
#include "src/base/win32-headers.h"
#endif
#include "testing/gtest/include/gtest/gtest.h"
#if V8_OS_ANDROID
#define DISABLE_ON_ANDROID(Name) DISABLED_##Name
#else
#define DISABLE_ON_ANDROID(Name) Name
#endif
namespace v8 {
namespace base {
@ -98,106 +83,5 @@ TEST_F(ThreadLocalStorageTest, DoTest) {
Join();
}
#if V8_OS_POSIX
// TODO(eholk): Add a windows version of these tests
namespace {
// These tests make sure the routines to allocate memory do so with the correct
// permissions.
//
// Unfortunately, there is no API to find the protection of a memory address,
// so instead we test permissions by installing a signal handler, probing a
// memory location and recovering from the fault.
//
// We don't test the execution permission because to do so we'd have to
// dynamically generate code and test if we can execute it.
class MemoryAllocationPermissionsTest : public ::testing::Test {
static void SignalHandler(int signal, siginfo_t* info, void*) {
siglongjmp(continuation_, 1);
}
struct sigaction old_action_;
// On Mac, sometimes we get SIGBUS instead of SIGSEGV.
#if V8_OS_MACOSX
struct sigaction old_bus_action_;
#endif
protected:
virtual void SetUp() {
struct sigaction action;
action.sa_sigaction = SignalHandler;
sigemptyset(&action.sa_mask);
action.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &action, &old_action_);
#if V8_OS_MACOSX
sigaction(SIGBUS, &action, &old_bus_action_);
#endif
}
virtual void TearDown() {
// be a good citizen and restore the old signal handler.
sigaction(SIGSEGV, &old_action_, nullptr);
#if V8_OS_MACOSX
sigaction(SIGBUS, &old_bus_action_, nullptr);
#endif
}
public:
static sigjmp_buf continuation_;
enum class MemoryAction { kRead, kWrite };
void ProbeMemory(volatile int* buffer, MemoryAction action,
bool should_succeed) {
const int save_sigs = 1;
if (!sigsetjmp(continuation_, save_sigs)) {
switch (action) {
case MemoryAction::kRead: {
// static_cast to remove the reference and force a memory read.
USE(static_cast<int>(*buffer));
break;
}
case MemoryAction::kWrite: {
*buffer = 0;
break;
}
}
if (should_succeed) {
SUCCEED();
} else {
FAIL();
}
return;
}
if (should_succeed) {
FAIL();
} else {
SUCCEED();
}
}
void TestPermissions(OS::MemoryPermission permission, bool can_read,
bool can_write) {
const size_t page_size = OS::AllocatePageSize();
int* buffer = static_cast<int*>(
OS::Allocate(nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
CHECK(OS::Free(buffer, page_size));
}
};
sigjmp_buf MemoryAllocationPermissionsTest::continuation_;
TEST_F(MemoryAllocationPermissionsTest, DoTest) {
TestPermissions(OS::MemoryPermission::kNoAccess, false, false);
TestPermissions(OS::MemoryPermission::kReadWrite, true, true);
TestPermissions(OS::MemoryPermission::kReadWriteExecute, true, true);
}
} // namespace
#endif // V8_OS_POSIX
} // namespace base
} // namespace v8

View File

@ -99,7 +99,7 @@ TEST_F(HeapTest, ASLR) {
}
if (hints.size() == 1) {
EXPECT_TRUE((*hints.begin()) == nullptr);
EXPECT_TRUE(base::OS::GetRandomMmapAddr() == nullptr);
EXPECT_TRUE(i::GetRandomMmapAddr() == nullptr);
} else {
// It is unlikely that 1000 random samples will collide to less then 500
// values.

View File

@ -8,6 +8,7 @@
'variables': {
'v8_code': 1,
'unittests_sources': [ ### gcmole(all) ###
'allocation-unittest.cc',
'api/access-check-unittest.cc',
'api/exception-unittest.cc',
'api/interceptor-unittest.cc',

View File

@ -190,7 +190,7 @@ class WasmCodeManagerTest : public TestWithIsolate {
return native_module->AddCode(desc, 0, index, 0, {}, false);
}
size_t page() const { return base::OS::AllocatePageSize(); }
size_t page() const { return AllocatePageSize(); }
v8::Isolate* v8_isolate() const {
return reinterpret_cast<v8::Isolate*>(isolate());
}