[ptr-compr] Explicitly pass v8::PageAllocator instance to helper functions.

... like AllocatePage[s](), FreePages() and SetPermissions().
This CL also changes base::PageAllocator to cache AllocatePageSize and CommitPageSize
values returned by the OS.
This is a necessary cleanup before introducing BoundedPageAllocator.

Bug: v8:8096
Change-Id: Ifb7cdd2caa6a1b029ce0fca6545c61df9d281be2
Reviewed-on: https://chromium-review.googlesource.com/1209343
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55690}
This commit is contained in:
Igor Sheludko 2018-09-06 15:44:07 +02:00 committed by Commit Bot
parent 98bdaf9836
commit 51224eab41
22 changed files with 283 additions and 143 deletions

View File

@ -57,15 +57,17 @@ struct InitializePageAllocator {
static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
page_allocator = LAZY_INSTANCE_INITIALIZER;
v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
DCHECK_NOT_NULL(page_allocator.Get());
return page_allocator.Get();
}
void* Malloced::New(size_t size) {
void* result = AllocWithRetry(size);
if (result == nullptr) {
@ -131,26 +133,31 @@ void AlignedFree(void *ptr) {
#endif
}
size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); }
size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); }
void SetRandomMmapSeed(int64_t seed) {
GetPageAllocator()->SetRandomMmapSeed(seed);
size_t AllocatePageSize() {
return GetPlatformPageAllocator()->AllocatePageSize();
}
void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
void* AllocatePages(void* address, size_t size, size_t alignment,
void SetRandomMmapSeed(int64_t seed) {
GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
}
void* GetRandomMmapAddr() {
return GetPlatformPageAllocator()->GetRandomMmapAddr();
}
void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
size_t size, size_t alignment,
PageAllocator::Permission access) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(address, AlignedAddress(address, alignment));
DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result =
GetPageAllocator()->AllocatePages(address, size, alignment, access);
result = page_allocator->AllocatePages(address, size, alignment, access);
if (result != nullptr) break;
size_t request_size = size + alignment - AllocatePageSize();
size_t request_size = size + alignment - page_allocator->AllocatePageSize();
if (!OnCriticalMemoryPressure(request_size)) break;
}
#if defined(LEAK_SANITIZER)
@ -161,9 +168,11 @@ void* AllocatePages(void* address, size_t size, size_t alignment,
return result;
}
bool FreePages(void* address, const size_t size) {
DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
bool result = GetPageAllocator()->FreePages(address, size);
bool FreePages(v8::PageAllocator* page_allocator, void* address,
const size_t size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
bool result = page_allocator->FreePages(address, size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
@ -172,9 +181,11 @@ bool FreePages(void* address, const size_t size) {
return result;
}
bool ReleasePages(void* address, size_t size, size_t new_size) {
bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
size_t new_size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_LT(new_size, size);
bool result = GetPageAllocator()->ReleasePages(address, size, new_size);
bool result = page_allocator->ReleasePages(address, size, new_size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
@ -184,15 +195,18 @@ bool ReleasePages(void* address, size_t size, size_t new_size) {
return result;
}
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) {
return GetPageAllocator()->SetPermissions(address, size, access);
bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
size_t size, PageAllocator::Permission access) {
DCHECK_NOT_NULL(page_allocator);
return page_allocator->SetPermissions(address, size, access);
}
byte* AllocatePage(void* address, size_t* allocated) {
size_t page_size = AllocatePageSize();
void* result =
AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite);
byte* AllocatePage(v8::PageAllocator* page_allocator, void* address,
size_t* allocated) {
DCHECK_NOT_NULL(page_allocator);
size_t page_size = page_allocator->AllocatePageSize();
void* result = AllocatePages(page_allocator, address, page_size, page_size,
PageAllocator::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
@ -206,14 +220,19 @@ bool OnCriticalMemoryPressure(size_t length) {
return true;
}
VirtualMemory::VirtualMemory() : address_(kNullAddress), size_(0) {}
VirtualMemory::VirtualMemory()
: page_allocator_(GetPlatformPageAllocator()),
address_(kNullAddress),
size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(kNullAddress), size_(0) {
size_t page_size = AllocatePageSize();
: page_allocator_(GetPlatformPageAllocator()),
address_(kNullAddress),
size_(0) {
size_t page_size = page_allocator_->AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
address_ = reinterpret_cast<Address>(
AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess));
address_ = reinterpret_cast<Address>(AllocatePages(
page_allocator_, hint, alloc_size, alignment, PageAllocator::kNoAccess));
if (address_ != kNullAddress) {
size_ = alloc_size;
}
@ -233,21 +252,22 @@ void VirtualMemory::Reset() {
bool VirtualMemory::SetPermissions(Address address, size_t size,
PageAllocator::Permission access) {
CHECK(InVM(address, size));
bool result = v8::internal::SetPermissions(address, size, access);
bool result =
v8::internal::SetPermissions(page_allocator_, address, size, access);
DCHECK(result);
return result;
}
size_t VirtualMemory::Release(Address free_start) {
DCHECK(IsReserved());
DCHECK(IsAddressAligned(free_start, CommitPageSize()));
DCHECK(IsAddressAligned(free_start, page_allocator_->CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t free_size = size_ - (free_start - address_);
CHECK(InVM(free_start, free_size));
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, address_ + size_);
CHECK(ReleasePages(reinterpret_cast<void*>(address_), size_,
CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(address_), size_,
size_ - free_size));
size_ -= free_size;
return free_size;
@ -263,12 +283,13 @@ void VirtualMemory::Free() {
Reset();
// FreePages expects size to be aligned to allocation granularity. Trimming
// may leave size at only commit granularity. Align it here.
CHECK(FreePages(reinterpret_cast<void*>(address),
RoundUp(size, AllocatePageSize())));
CHECK(FreePages(page_allocator_, reinterpret_cast<void*>(address),
RoundUp(size, page_allocator_->AllocatePageSize())));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
DCHECK(!IsReserved());
page_allocator_ = from->page_allocator_;
address_ = from->address_;
size_ = from->size_;
from->Reset();

View File

@ -82,6 +82,9 @@ void* AllocWithRetry(size_t size);
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
// Gets the page granularity for AllocatePages and FreePages. Addresses returned
// by AllocatePages and AllocatePage are aligned to this size.
V8_EXPORT_PRIVATE size_t AllocatePageSize();
@ -101,14 +104,16 @@ V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
// AllocatePageSize(). Returns the address of the allocated memory, with the
// specified size and alignment, or nullptr on failure.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT void* AllocatePages(void* address, size_t size,
V8_WARN_UNUSED_RESULT void* AllocatePages(v8::PageAllocator* page_allocator,
void* address, size_t size,
size_t alignment,
PageAllocator::Permission access);
// Frees memory allocated by a call to AllocatePages. |address| and |size| must
// be multiples of AllocatePageSize(). Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
V8_WARN_UNUSED_RESULT bool FreePages(v8::PageAllocator* page_allocator,
void* address, const size_t size);
// Releases memory that is no longer needed. The range specified by |address|
// and |size| must be an allocated memory region. |size| and |new_size| must be
@ -116,7 +121,8 @@ V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
// Released memory is left in an undefined state, so it should not be accessed.
// Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
V8_WARN_UNUSED_RESULT bool ReleasePages(v8::PageAllocator* page_allocator,
void* address, size_t size,
size_t new_size);
// Sets permissions according to |access|. |address| and |size| must be
@ -124,18 +130,21 @@ V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
// cause the memory contents to be lost. Returns true on success, otherwise
// false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
V8_WARN_UNUSED_RESULT bool SetPermissions(v8::PageAllocator* page_allocator,
void* address, size_t size,
PageAllocator::Permission access);
inline bool SetPermissions(Address address, size_t size,
PageAllocator::Permission access) {
return SetPermissions(reinterpret_cast<void*>(address), size, access);
inline bool SetPermissions(v8::PageAllocator* page_allocator, Address address,
size_t size, PageAllocator::Permission access) {
return SetPermissions(page_allocator, reinterpret_cast<void*>(address), size,
access);
}
// Convenience function that allocates a single system page with read and write
// permissions. |address| is a hint. Returns the base address of the memory and
// the page size via |allocated| on success. Returns nullptr on failure.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
V8_WARN_UNUSED_RESULT byte* AllocatePage(v8::PageAllocator* page_allocator,
void* address, size_t* allocated);
// Function that may release reserved memory regions to allow failed allocations
// to succeed. |length| is the amount of memory needed. Returns |true| if memory
@ -156,7 +165,9 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// Construct a virtual memory by assigning it some already mapped address
// and size.
VirtualMemory(Address address, size_t size)
: address_(address), size_(size) {}
: page_allocator_(GetPlatformPageAllocator()),
address_(address),
size_(size) {}
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
@ -168,6 +179,8 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// Initialize or resets an embedded VirtualMemory object.
void Reset();
v8::PageAllocator* page_allocator() { return page_allocator_; }
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
@ -208,6 +221,8 @@ class V8_EXPORT_PRIVATE VirtualMemory {
}
private:
// Page allocator that controls the virtual memory.
v8::PageAllocator* page_allocator_;
Address address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};

View File

@ -24,8 +24,10 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
#if defined(USE_SIMULATOR)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -170,7 +172,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -182,8 +185,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#if defined(USE_SIMULATOR)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -259,7 +264,8 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
masm.GetCode(isolate, &desc);
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
@ -269,8 +275,10 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -286,7 +294,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -24,11 +24,9 @@ STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
#undef STATIC_ASSERT_ENUM
size_t PageAllocator::AllocatePageSize() {
return base::OS::AllocatePageSize();
}
size_t PageAllocator::CommitPageSize() { return base::OS::CommitPageSize(); }
PageAllocator::PageAllocator()
: allocate_page_size_(base::OS::AllocatePageSize()),
commit_page_size_(base::OS::CommitPageSize()) {}
void PageAllocator::SetRandomMmapSeed(int64_t seed) {
base::OS::SetRandomMmapSeed(seed);

View File

@ -15,11 +15,12 @@ namespace base {
class V8_BASE_EXPORT PageAllocator
: public NON_EXPORTED_BASE(::v8::PageAllocator) {
public:
PageAllocator();
virtual ~PageAllocator() = default;
size_t AllocatePageSize() override;
size_t AllocatePageSize() override { return allocate_page_size_; }
size_t CommitPageSize() override;
size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override;
@ -34,6 +35,10 @@ class V8_BASE_EXPORT PageAllocator
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override;
private:
const size_t allocate_page_size_;
const size_t commit_page_size_;
};
} // namespace base

View File

@ -118,18 +118,20 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
// store their lengths as a SMI internally.
if (length >= kTwoGB) return nullptr;
size_t page_size = i::AllocatePageSize();
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
// Rounding up could go over the limit.
if (allocated >= kTwoGB) return nullptr;
return i::AllocatePages(nullptr, allocated, page_size,
return i::AllocatePages(page_allocator, nullptr, allocated, page_size,
PageAllocator::kReadWrite);
}
void FreeVM(void* data, size_t length) {
size_t page_size = i::AllocatePageSize();
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(data, allocated));
CHECK(i::FreePages(page_allocator, data, allocated));
}
};

View File

@ -304,6 +304,8 @@ void CodeRangeAddressHint::NotifyFreedCodeRange(void* code_range_start,
MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
size_t code_range_size)
: isolate_(isolate),
data_page_allocator_(GetPlatformPageAllocator()),
code_page_allocator_(GetPlatformPageAllocator()),
code_range_(nullptr),
capacity_(RoundUp(capacity, Page::kPageSize)),
size_(0),
@ -490,7 +492,9 @@ size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
}
bool MemoryAllocator::CommitMemory(Address base, size_t size) {
if (!SetPermissions(base, size, PageAllocator::kReadWrite)) {
// TODO(ishell): use proper page allocator
if (!SetPermissions(GetPlatformPageAllocator(), base, size,
PageAllocator::kReadWrite)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
@ -499,7 +503,7 @@ bool MemoryAllocator::CommitMemory(Address base, size_t size) {
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
// TODO(ishell): make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory.
DCHECK(code_range() == nullptr ||
!code_range()->contains(reservation->address()));
@ -512,13 +516,13 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
void MemoryAllocator::FreeMemory(Address base, size_t size,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
// TODO(ishell): make code_range part of memory allocator?
if (code_range() != nullptr && code_range()->contains(base)) {
DCHECK(executable == EXECUTABLE);
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
CHECK(FreePages(reinterpret_cast<void*>(base), size));
CHECK(FreePages(data_page_allocator_, reinterpret_cast<void*>(base), size));
}
}
@ -608,8 +612,10 @@ void MemoryChunk::SetReadAndExecutable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
CHECK(SetPermissions(protect_start, protect_size,
PageAllocator::kReadExecute));
// TODO(ishell): use reservation_.SetPermissions() once it's always
// initialized.
CHECK(SetPermissions(reservation_.page_allocator(), protect_start,
protect_size, PageAllocator::kReadExecute));
}
}
@ -627,8 +633,10 @@ void MemoryChunk::SetReadAndWritable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
CHECK(SetPermissions(unprotect_start, unprotect_size,
PageAllocator::kReadWrite));
// TODO(ishell): use reservation_.SetPermissions() once it's always
// initialized.
CHECK(SetPermissions(reservation_.page_allocator(), unprotect_start,
unprotect_size, PageAllocator::kReadWrite));
}
}
@ -696,7 +704,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(area_start, page_size));
size_t area_size = RoundUp(area_end - area_start, page_size);
CHECK(SetPermissions(area_start, area_size,
// TODO(ishell): use reservation->SetPermissions() once it's always
// initialized.
CHECK(SetPermissions(reservation->page_allocator(), area_start, area_size,
PageAllocator::kReadWriteExecute));
}
}
@ -928,7 +938,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if ((base + chunk_size) == 0u) {
CHECK(!last_chunk_.IsReserved());
last_chunk_.TakeControl(&reservation);
UncommitBlock(last_chunk_.address(), last_chunk_.size());
UncommitBlock(&last_chunk_, last_chunk_.address(), last_chunk_.size());
size_ -= chunk_size;
if (executable == EXECUTABLE) {
size_executable_ -= chunk_size;
@ -1128,7 +1138,8 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
UncommitBlock(reservation, reinterpret_cast<Address>(chunk),
MemoryChunk::kPageSize);
} else {
if (reservation->IsReserved()) {
FreeMemory(reservation, chunk->executable());
@ -1237,9 +1248,13 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size) {
return true;
}
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
if (!SetPermissions(start, size, PageAllocator::kNoAccess)) return false;
bool MemoryAllocator::UncommitBlock(VirtualMemory* reservation, Address start,
size_t size) {
// TODO(ishell): use reservation->SetPermissions() once it's always
// initialized.
if (!SetPermissions(reservation->page_allocator(), start, size,
PageAllocator::kNoAccess))
return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@ -3342,7 +3357,10 @@ void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
if (access == PageAllocator::kRead) {
page->MakeHeaderRelocatable();
}
CHECK(SetPermissions(page->address() + area_start_offset,
// TODO(ishell): use p->reserved_memory()->SetPermissions() once it's always
// initialized.
CHECK(SetPermissions(page->reserved_memory()->page_allocator(),
page->address() + area_start_offset,
page->size() - area_start_offset, access));
}
}

View File

@ -363,7 +363,7 @@ class MemoryChunk {
+ kUIntptrSize // uintptr_t flags_
+ kPointerSize // Address area_start_
+ kPointerSize // Address area_end_
+ 2 * kPointerSize // VirtualMemory reservation_
+ 3 * kPointerSize // VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
+ kIntptrSize // intptr_t progress_bar_
@ -894,6 +894,9 @@ class ReadOnlyPage : public Page {
// Clears any pointers in the header that point out of the page that would
// otherwise make the header non-relocatable.
void MakeHeaderRelocatable();
private:
friend class ReadOnlySpace;
};
class LargePage : public MemoryChunk {
@ -1456,7 +1459,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// start is not kNullAddress, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
bool UncommitBlock(Address start, size_t size);
bool UncommitBlock(VirtualMemory* reservation, Address start, size_t size);
// Zaps a contiguous block of memory [start..(start+size)[ with
// a given zap value.
@ -1467,6 +1470,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
size_t commit_size,
size_t reserved_size);
// Page allocator instance for allocating non-executable pages.
// Guaranteed to be a valid pointer.
v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
// Page allocator instance for allocating executable pages.
// Guaranteed to be a valid pointer.
v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; }
@ -1518,6 +1529,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
}
Isolate* isolate_;
v8::PageAllocator* data_page_allocator_;
v8::PageAllocator* code_page_allocator_;
CodeRange* code_range_;
// Maximum space size in bytes.

View File

@ -16,8 +16,10 @@ namespace internal {
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -39,7 +41,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@ -131,8 +134,10 @@ class LabelConverter {
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -450,7 +455,8 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);

View File

@ -51,16 +51,18 @@ void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
uint32_t* size) {
EmbeddedData d = EmbeddedData::FromIsolate(isolate);
const uint32_t page_size = static_cast<uint32_t>(AllocatePageSize());
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
const uint32_t page_size =
static_cast<uint32_t>(page_allocator->AllocatePageSize());
const uint32_t allocated_size = RoundUp(d.size(), page_size);
uint8_t* allocated_bytes = static_cast<uint8_t*>(
AllocatePages(GetRandomMmapAddr(), allocated_size, page_size,
PageAllocator::kReadWrite));
AllocatePages(page_allocator, isolate->heap()->GetRandomMmapAddr(),
allocated_size, page_size, PageAllocator::kReadWrite));
CHECK_NOT_NULL(allocated_bytes);
std::memcpy(allocated_bytes, d.data(), d.size());
CHECK(SetPermissions(allocated_bytes, allocated_size,
CHECK(SetPermissions(page_allocator, allocated_bytes, allocated_size,
PageAllocator::kReadExecute));
*data = allocated_bytes;
@ -72,8 +74,10 @@ void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
// static
void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
uint32_t size) {
const uint32_t page_size = static_cast<uint32_t>(AllocatePageSize());
CHECK(FreePages(data, RoundUp(size, page_size)));
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
const uint32_t page_size =
static_cast<uint32_t>(page_allocator->AllocatePageSize());
CHECK(FreePages(page_allocator, data, RoundUp(size, page_size)));
}
} // namespace internal

View File

@ -24,8 +24,10 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -545,7 +547,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -555,8 +558,10 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -572,7 +577,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -23,9 +23,10 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
#if defined(USE_SIMULATOR)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -546,7 +547,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -556,8 +558,10 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -573,7 +577,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -76,7 +76,8 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
isolate->wasm_engine()->memory_tracker();
if (!memory_tracker->FreeMemoryIfIsWasmMemory(isolate,
allocation.backing_store)) {
CHECK(FreePages(allocation.allocation_base, allocation.length));
CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
allocation.length));
}
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,

View File

@ -20,8 +20,10 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -41,7 +43,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -20,8 +20,10 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -38,7 +40,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -640,6 +640,7 @@ void NativeModule::PatchJumpTable(uint32_t func_index, Address target,
}
Address NativeModule::AllocateForCode(size_t size) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// This happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
AddressRange mem = free_code_space_.Allocate(size);
@ -660,8 +661,8 @@ Address NativeModule::AllocateForCode(size_t size) {
mem = free_code_space_.Allocate(size);
if (mem.is_empty()) return kNullAddress;
}
Address commit_start = RoundUp(mem.start, AllocatePageSize());
Address commit_end = RoundUp(mem.end, AllocatePageSize());
Address commit_start = RoundUp(mem.start, page_allocator->AllocatePageSize());
Address commit_end = RoundUp(mem.end, page_allocator->AllocatePageSize());
// {commit_start} will be either mem.start or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
@ -683,7 +684,7 @@ Address NativeModule::AllocateForCode(size_t size) {
if (commit_end > it->end() || it->address() >= commit_end) continue;
Address start = std::max(commit_start, it->address());
size_t commit_size = static_cast<size_t>(commit_end - start);
DCHECK(IsAligned(commit_size, AllocatePageSize()));
DCHECK(IsAligned(commit_size, page_allocator->AllocatePageSize()));
if (!wasm_code_manager_->Commit(start, commit_size)) {
return kNullAddress;
}
@ -692,7 +693,7 @@ Address NativeModule::AllocateForCode(size_t size) {
}
#else
size_t commit_size = static_cast<size_t>(commit_end - commit_start);
DCHECK(IsAligned(commit_size, AllocatePageSize()));
DCHECK(IsAligned(commit_size, page_allocator->AllocatePageSize()));
if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
return kNullAddress;
}
@ -783,7 +784,8 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
? PageAllocator::kReadWrite
: PageAllocator::kReadWriteExecute;
bool ret = SetPermissions(start, size, permission);
bool ret =
SetPermissions(GetPlatformPageAllocator(), start, size, permission);
TRACE_HEAP("Setting rw permissions for %p:%p\n",
reinterpret_cast<void*>(start),
reinterpret_cast<void*>(start + size));
@ -909,6 +911,8 @@ bool NativeModule::SetExecutable(bool executable) {
if (is_executable_ == executable) return true;
TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
if (FLAG_wasm_write_protect_code_memory) {
PageAllocator::Permission permission =
executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
@ -923,7 +927,8 @@ bool NativeModule::SetExecutable(bool executable) {
// committed or not.
if (can_request_more_memory_) {
for (auto& vmem : owned_code_space_) {
if (!SetPermissions(vmem.address(), vmem.size(), permission)) {
if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
@ -936,8 +941,10 @@ bool NativeModule::SetExecutable(bool executable) {
for (auto& range : allocated_code_space_.ranges()) {
// allocated_code_space_ is fine-grained, so we need to
// page-align it.
size_t range_size = RoundUp(range.size(), AllocatePageSize());
if (!SetPermissions(range.start, range_size, permission)) {
size_t range_size =
RoundUp(range.size(), page_allocator->AllocatePageSize());
if (!SetPermissions(page_allocator, range.start, range_size,
permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n",

View File

@ -77,8 +77,9 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
}
// The Reserve makes the whole region inaccessible by default.
*allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
PageAllocator::kNoAccess);
*allocation_base =
AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
kWasmPageSize, PageAllocator::kNoAccess);
if (*allocation_base == nullptr) {
memory_tracker->ReleaseReservation(*allocation_length);
AddAllocationStatusSample(heap->isolate(), AllocationStatus::kOtherFailure);
@ -91,8 +92,9 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
// Make the part we care about accessible.
if (size > 0) {
bool result = SetPermissions(memory, RoundUp(size, kWasmPageSize),
PageAllocator::kReadWrite);
bool result =
SetPermissions(GetPlatformPageAllocator(), memory,
RoundUp(size, kWasmPageSize), PageAllocator::kReadWrite);
// SetPermissions commits the extra memory, which may put us over the
// process memory limit. If so, report this as an OOM.
if (!result) {
@ -225,7 +227,8 @@ bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
const void* buffer_start) {
if (IsWasmMemory(buffer_start)) {
const AllocationData allocation = ReleaseAllocation(isolate, buffer_start);
CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
allocation.allocation_length));
return true;
}
return false;

View File

@ -904,8 +904,8 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
DCHECK_NOT_NULL(old_buffer->backing_store());
// If adjusting permissions fails, propagate error back to return
// failure to grow.
if (!i::SetPermissions(old_mem_start, new_size,
PageAllocator::kReadWrite)) {
if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start,
new_size, PageAllocator::kReadWrite)) {
return {};
}
reinterpret_cast<v8::Isolate*>(isolate)

View File

@ -15,8 +15,10 @@ namespace internal {
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@ -32,7 +34,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}

View File

@ -90,6 +90,11 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
TestCodeRangeScope test_code_range_scope(isolate, code_range);
v8::PageAllocator* data_page_allocator =
memory_allocator->data_page_allocator();
v8::PageAllocator* code_page_allocator =
memory_allocator->code_page_allocator();
size_t header_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardStartOffset()
: MemoryChunk::kObjectStartOffset;
@ -100,12 +105,13 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
reserve_area_size, commit_area_size, executable, space);
size_t alignment = code_range != nullptr && code_range->valid()
? MemoryChunk::kAlignment
: CommitPageSize();
: code_page_allocator->CommitPageSize();
size_t reserved_size =
((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
alignment)
: RoundUp(header_size + reserve_area_size, CommitPageSize());
: RoundUp(header_size + reserve_area_size,
data_page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() <
memory_chunk->address() + memory_chunk->size());

View File

@ -17,7 +17,8 @@ static inline uint8_t* AllocateAssemblerBuffer(
size_t page_size = v8::internal::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size);
void* result = v8::internal::AllocatePages(
address, alloc_size, page_size, v8::PageAllocator::kReadWriteExecute);
GetPlatformPageAllocator(), address, alloc_size, page_size,
v8::PageAllocator::kReadWriteExecute);
CHECK(result);
*allocated = alloc_size;
return static_cast<uint8_t*>(result);
@ -25,8 +26,9 @@ static inline uint8_t* AllocateAssemblerBuffer(
static inline void MakeAssemblerBufferExecutable(uint8_t* buffer,
size_t allocated) {
bool result = v8::internal::SetPermissions(buffer, allocated,
v8::PageAllocator::kReadExecute);
bool result =
v8::internal::SetPermissions(GetPlatformPageAllocator(), buffer,
allocated, v8::PageAllocator::kReadExecute);
CHECK(result);
// Flush the instruction cache as part of making the buffer executable.
@ -35,8 +37,9 @@ static inline void MakeAssemblerBufferExecutable(uint8_t* buffer,
static inline void MakeAssemblerBufferWritable(uint8_t* buffer,
size_t allocated) {
bool result = v8::internal::SetPermissions(buffer, allocated,
v8::PageAllocator::kReadWrite);
bool result =
v8::internal::SetPermissions(GetPlatformPageAllocator(), buffer,
allocated, v8::PageAllocator::kReadWrite);
CHECK(result);
}

View File

@ -95,12 +95,14 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
void TestPermissions(PageAllocator::Permission permission, bool can_read,
bool can_write) {
const size_t page_size = AllocatePageSize();
int* buffer = static_cast<int*>(
AllocatePages(nullptr, page_size, page_size, permission));
v8::PageAllocator* page_allocator =
v8::internal::GetPlatformPageAllocator();
const size_t page_size = page_allocator->AllocatePageSize();
int* buffer = static_cast<int*>(AllocatePages(
page_allocator, nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
CHECK(FreePages(buffer, page_size));
CHECK(FreePages(page_allocator, buffer, page_size));
}
};
@ -125,41 +127,46 @@ TEST(AllocationTest, AllocateAndFree) {
size_t page_size = v8::internal::AllocatePageSize();
CHECK_NE(0, page_size);
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
// A large allocation, aligned at native allocation granularity.
const size_t kAllocationSize = 1 * v8::internal::MB;
void* mem_addr = v8::internal::AllocatePages(
v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
PageAllocator::Permission::kReadWrite);
page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize,
page_size, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(mem_addr);
CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize));
// A large allocation, aligned significantly beyond native granularity.
const size_t kBigAlignment = 64 * v8::internal::MB;
void* aligned_mem_addr = v8::internal::AllocatePages(
AlignedAddress(v8::internal::GetRandomMmapAddr(), kBigAlignment),
page_allocator,
AlignedAddress(page_allocator->GetRandomMmapAddr(), kBigAlignment),
kAllocationSize, kBigAlignment, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(aligned_mem_addr);
CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
CHECK(v8::internal::FreePages(aligned_mem_addr, kAllocationSize));
CHECK(v8::internal::FreePages(page_allocator, aligned_mem_addr,
kAllocationSize));
}
TEST(AllocationTest, ReserveMemory) {
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
size_t page_size = v8::internal::AllocatePageSize();
const size_t kAllocationSize = 1 * v8::internal::MB;
void* mem_addr = v8::internal::AllocatePages(
v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
PageAllocator::Permission::kReadWrite);
page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize,
page_size, PageAllocator::Permission::kReadWrite);
CHECK_NE(0, page_size);
CHECK_NOT_NULL(mem_addr);
size_t commit_size = v8::internal::CommitPageSize();
CHECK(v8::internal::SetPermissions(mem_addr, commit_size,
size_t commit_size = page_allocator->CommitPageSize();
CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size,
PageAllocator::Permission::kReadWrite));
// Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr);
addr[v8::internal::KB - 1] = 2;
CHECK(v8::internal::SetPermissions(mem_addr, commit_size,
CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size,
PageAllocator::Permission::kNoAccess));
CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize));
}
} // namespace internal