Revert "Reland "[Memory] Use OS::Allocate for all OS memory allocations.""

Revert this and its follow-up as suspect for current canary OOM crasher.

This reverts commit 4899bcb66d.
This reverts commit b73ee3344a.

TBR=adamk@chromium.org,hpayer@chromium.org

Bug: chromium:783708
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I4c00582e7ab2df22216ad6732e2843e9958db0c0
Reviewed-on: https://chromium-review.googlesource.com/765447
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49315}
This commit is contained in:
Jakob Gruber 2017-11-11 19:43:01 +01:00 committed by Commit Bot
parent 2feb99dc8a
commit 8122afa726
30 changed files with 665 additions and 574 deletions

View File

@ -103,33 +103,28 @@ void AlignedFree(void *ptr) {
#endif
}
byte* AllocateSystemPage(void* address, size_t* allocated) {
size_t page_size = base::OS::AllocatePageSize();
void* result = base::OS::Allocate(address, page_size, page_size,
base::OS::MemoryPermission::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(nullptr), size_(0) {
size_t page_size = base::OS::AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
address_ = base::OS::Allocate(hint, alloc_size, alignment,
base::OS::MemoryPermission::kNoAccess);
if (address_ != nullptr) {
size_ = alloc_size;
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(base::OS::ReserveRegion(size, hint)), size_(size) {
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address_, size_);
__lsan_register_root_region(address_, size_);
#endif
}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(nullptr), size_(0) {
address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address_, size_);
#endif
}
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
Release();
bool result = base::OS::ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
@ -183,7 +178,7 @@ void VirtualMemory::Release() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
bool result = base::OS::Free(address, size);
bool result = base::OS::ReleaseRegion(address, size);
USE(result);
DCHECK(result);
}
@ -210,14 +205,14 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) {
VirtualMemory first_try(size, hint, alignment);
VirtualMemory first_try(size, alignment, hint);
if (first_try.IsReserved()) {
result->TakeControl(&first_try);
return true;
}
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, hint, alignment);
VirtualMemory second_try(size, alignment, hint);
result->TakeControl(&second_try);
return result->IsReserved();
}

View File

@ -76,22 +76,19 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
// Allocates a single system memory page with read/write permissions. The
// address parameter is a hint. Returns the base address of the memory, or null
// on failure. Permissions can be changed on the base address.
byte* AllocateSystemPage(void* address, size_t* allocated);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
// Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by
// address().
VirtualMemory(size_t size, void* hint,
size_t alignment = base::OS::AllocatePageSize());
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size, void* hint);
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
VirtualMemory(size_t size, size_t alignment, void* hint);
// Construct a virtual memory by assigning it some already mapped address
// and size.

View File

@ -489,15 +489,10 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
size_t page_size = base::OS::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
void* address =
base::OS::Allocate(base::OS::GetRandomMmapAddr(), allocated, page_size,
base::OS::MemoryPermission::kNoAccess);
base::OS::ReserveRegion(length, base::OS::GetRandomMmapAddr());
#if defined(LEAK_SANITIZER)
if (address != nullptr) {
__lsan_register_root_region(address, allocated);
}
__lsan_register_root_region(address, length);
#endif
return address;
}
@ -509,9 +504,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
bool result = base::OS::Free(data, length);
DCHECK(result);
USE(result);
base::OS::ReleaseRegion(data, length);
return;
}
}

View File

@ -14,21 +14,21 @@
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_ARM)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
Register dest = r0;
@ -169,8 +169,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -182,12 +182,12 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#if defined(USE_SIMULATOR)
return stub;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
Register dest = r0;
@ -259,8 +259,8 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
@ -271,12 +271,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
@ -288,8 +288,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -28,35 +28,23 @@ namespace base {
namespace {
// The memory allocation implementation is taken from platform-win32.cc.
// The VirtualMemory implementation is taken from platform-win32.cc.
// The mmap-based virtual memory implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
// This causes OS::CommitRegion to not always commit the memory region
// This causes VirtualMemory::Commit to not always commit the memory region
// specified.
DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
return PAGE_NOACCESS;
case OS::MemoryPermission::kReadWrite:
return PAGE_READWRITE;
case OS::MemoryPermission::kReadWriteExecute:
return PAGE_EXECUTE_READWRITE;
}
UNREACHABLE();
}
void* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
void* hint) {
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
LPVOID base = nullptr;
if (protect == PAGE_EXECUTE_READWRITE || protect == PAGE_NOACCESS) {
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
// For exectutable pages try and randomize the allocation address
base = VirtualAlloc(hint, size, flags, protect);
base = VirtualAlloc(hint, size, action, protection);
}
// If that fails, let the OS find an address to use.
if (base == nullptr) base = VirtualAlloc(nullptr, size, flags, protect);
// After three attempts give up and let the OS find an address to use.
if (base == nullptr) base = VirtualAlloc(nullptr, size, action, protection);
return base;
}
@ -93,49 +81,51 @@ double CygwinTimezoneCache::LocalTimeOffset() {
}
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
DWORD protect = GetProtectionFromMemoryPermission(access);
void* base = RandomizedVirtualAlloc(request_size, flags, protect, address);
if (base == nullptr) return nullptr;
uint8_t* aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
int resize_attempts = 0;
const int kMaxResizeAttempts = 3;
while (aligned_base != base) {
// Try reducing the size by freeing and then re-allocating at the aligned
// base. Retry logic is needed since we may lose the memory due to a race.
Free(base, request_size);
if (resize_attempts == kMaxResizeAttempts) return nullptr;
base = RandomizedVirtualAlloc(size, flags, protect, aligned_base);
if (base == nullptr) return nullptr;
aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
resize_attempts++;
}
return static_cast<void*>(aligned_base);
void* OS::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
// static
bool OS::Free(void* address, const size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
hint = AlignedAddress(hint, alignment);
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != nullptr) {
request_size = size;
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
}
*allocated = request_size;
return static_cast<void*>(address);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
DWORD protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (nullptr == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
// static
@ -143,6 +133,11 @@ bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;

View File

@ -18,19 +18,42 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
OS::MemoryPermission access) {
// Currently we only support reserving memory.
DCHECK_EQ(MemoryPermission::kNoAccess, access);
size_t page_size = OS::AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
return nullptr;
}
// static
void OS::Guard(void* address, size_t size) {
CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size,
0 /*no permissions*/));
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
zx_handle_t vmo;
if (zx_vmo_create(size, 0, &vmo) != ZX_OK) return nullptr;
uintptr_t result;
zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size,
0 /*no permissions*/, &result);
zx_handle_close(vmo);
if (status != ZX_OK) return nullptr;
return reinterpret_cast<void*>(result);
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
zx_handle_t vmo;
if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
*allocated = 0;
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
@ -43,25 +66,26 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// so close the vmo either way.
zx_handle_close(vmo);
if (status != ZX_OK) {
*allocated = 0;
return nullptr;
}
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, page_size);
size_t aligned_size = RoundUp(size, OS::AllocatePageSize());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
DCHECK_LT(aligned_size, request_size);
size_t suffix_size = request_size - aligned_size;
zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(aligned_base + aligned_size),
@ -70,22 +94,11 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::Free(void* address, size_t size) {
return zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
void OS::Guard(void* address, size_t size) {
CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size,
0 /*no permissions*/));
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
uint32_t prot = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
@ -102,6 +115,12 @@ bool OS::UncommitRegion(void* address, size_t size) {
0 /*no permissions*/) == ZX_OK;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return zx_vmar_unmap(zx_vmar_root_self(),

View File

@ -97,27 +97,6 @@ int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
}
UNREACHABLE();
}
void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
const size_t actual_size = RoundUp(size, OS::AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
if (access == OS::MemoryPermission::kNoAccess) {
// TODO(bbudge) Improve readability by moving platform specific code into
// helper functions.
#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
flags |= MAP_NORESERVE;
#endif
#if V8_OS_QNX
flags |= MAP_LAZY;
#endif // V8_OS_QNX
}
void* result =
mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
#endif // !V8_OS_FUCHSIA
} // namespace
@ -229,45 +208,25 @@ void* OS::GetRandomMmapAddr() {
}
// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
void* result = base::Allocate(address, request_size, access);
if (result == nullptr) return nullptr;
// Unmap memory allocated before the aligned base address.
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
// Unmap memory allocated after the potentially unaligned end.
if (size != request_size) {
DCHECK_LT(size, request_size);
size_t suffix_size = request_size - size;
OS::Free(aligned_base + size, suffix_size);
request_size -= suffix_size;
}
DCHECK_EQ(size, request_size);
return static_cast<void*>(aligned_base);
#if !V8_OS_FUCHSIA
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
#endif // !V8_OS_FUCHSIA
// static
bool OS::Free(void* address, const size_t size) {
return munmap(address, size) == 0;
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(address, size);
USE(result);
DCHECK_EQ(0, result);
}
#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
void OS::SetReadAndExecutable(void* address, const size_t size) {
#if V8_OS_CYGWIN
@ -313,6 +272,60 @@ void OS::SetReadWriteAndExecutable(void* address, const size_t size) {
}
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// static
void* OS::ReserveRegion(size_t size, void* hint) {
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
map_flags |= MAP_NORESERVE;
#endif
#if V8_OS_QNX
map_flags |= MAP_LAZY;
#endif // V8_OS_QNX
void* result = mmap(hint, size, PROT_NONE, map_flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(0, alignment % OS::AllocatePageSize());
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocatePageSize());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
@ -345,6 +358,11 @@ bool OS::UncommitRegion(void* address, size_t size) {
#endif // V8_OS_AIX
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;

View File

@ -732,20 +732,8 @@ void* OS::GetRandomMmapAddr() {
namespace {
DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
return PAGE_NOACCESS;
case OS::MemoryPermission::kReadWrite:
return PAGE_READWRITE;
case OS::MemoryPermission::kReadWriteExecute:
return PAGE_EXECUTE_READWRITE;
}
UNREACHABLE();
}
void* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
void* hint) {
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
LPVOID base = NULL;
static BOOL use_aslr = -1;
#ifdef V8_HOST_ARCH_32_BIT
@ -758,57 +746,56 @@ void* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
#endif
if (use_aslr &&
(protect == PAGE_EXECUTE_READWRITE || protect == PAGE_NOACCESS)) {
// For executable or reserved pages try to randomize the allocation address.
base = VirtualAlloc(hint, size, flags, protect);
(protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) {
// For executable pages try and randomize the allocation address
base = VirtualAlloc(hint, size, action, protection);
}
// On failure, let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(nullptr, size, flags, protect);
// After three attempts give up and let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
return base;
}
} // namespace
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, static_cast<int>(AllocatePageSize()));
int flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
int protect = GetProtectionFromMemoryPermission(access);
void* base = RandomizedVirtualAlloc(request_size, flags, protect, address);
if (base == nullptr) return nullptr;
uint8_t* aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
int resize_attempts = 0;
const int kMaxResizeAttempts = 3;
while (aligned_base != base) {
// Try reducing the size by freeing and then re-allocating at the aligned
// base. Retry logic is needed since we may lose the memory due to a race.
Free(base, request_size);
if (resize_attempts == kMaxResizeAttempts) return nullptr;
base = RandomizedVirtualAlloc(size, flags, protect, aligned_base);
if (base == nullptr) return nullptr;
aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
resize_attempts++;
// Windows XP SP2 allows Data Excution Prevention (DEP).
int prot = PAGE_NOACCESS;
switch (access) {
case OS::MemoryPermission::kNoAccess: {
prot = PAGE_NOACCESS;
break;
}
case OS::MemoryPermission::kReadWrite: {
prot = PAGE_READWRITE;
break;
}
case OS::MemoryPermission::kReadWriteExecute: {
prot = PAGE_EXECUTE_READWRITE;
break;
}
}
return static_cast<void*>(aligned_base);
LPVOID mbase =
RandomizedVirtualAlloc(msize, MEM_COMMIT | MEM_RESERVE, prot, hint);
if (mbase == NULL) return NULL;
DCHECK_EQ(reinterpret_cast<uintptr_t>(mbase) % OS::AllocatePageSize(), 0);
*allocated = msize;
return mbase;
}
// static
bool OS::Free(void* address, const size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(address, 0, MEM_RELEASE);
USE(size);
}
void OS::SetReadAndExecutable(void* address, const size_t size) {
@ -838,10 +825,51 @@ void OS::SetReadWriteAndExecutable(void* address, const size_t size) {
VirtualProtect(address, size, PAGE_EXECUTE_READWRITE, &oldprotect));
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != nullptr) {
request_size = size;
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
}
*allocated = request_size;
return static_cast<void*>(address);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
DWORD protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
// static
@ -849,6 +877,11 @@ bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;

View File

@ -172,14 +172,12 @@ class V8_BASE_EXPORT OS {
static void* GetRandomMmapAddr();
// Allocates memory. Permissions are set according to the access argument.
// The address parameter is a hint. The size and alignment parameters must be
// multiples of AllocatePageSize(). Returns the address of the allocated
// memory, with the specified size and alignment, or nullptr on failure.
static void* Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access);
// Returns the address of the allocated memory, or nullptr on failure.
static void* Allocate(const size_t requested, size_t* allocated,
MemoryPermission access, void* hint = nullptr);
// Frees memory allocated by a call to Allocate.
static bool Free(void* address, const size_t size);
static void Free(void* address, const size_t size);
// Mark a region of memory executable and readable but not writable.
static void SetReadAndExecutable(void* address, const size_t size);
@ -194,10 +192,17 @@ class V8_BASE_EXPORT OS {
// function. This is only a temporary function and will go away soon.
static void SetReadWriteAndExecutable(void* address, const size_t size);
static void* ReserveRegion(size_t size, void* hint);
static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated);
static bool CommitRegion(void* address, size_t size, bool is_executable);
static bool UncommitRegion(void* address, size_t size);
static bool ReleaseRegion(void* address, size_t size);
// Release part of a reserved address range.
static bool ReleasePartialRegion(void* address, size_t size);

View File

@ -142,9 +142,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void Free(void* data, size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
bool result = base::OS::Free(data, length);
DCHECK(result);
USE(result);
base::OS::ReleaseRegion(data, length);
return;
}
#endif
@ -162,17 +160,16 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
#if USE_VM
void* VirtualMemoryAllocate(size_t length) {
size_t page_size = base::OS::AllocatePageSize();
size_t alloc_size = RoundUp(length, page_size);
void* address = base::OS::Allocate(nullptr, alloc_size, page_size,
base::OS::MemoryPermission::kReadWrite);
if (address != nullptr) {
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address, alloc_size);
#endif
MSAN_MEMORY_IS_INITIALIZED(address, alloc_size);
void* data = base::OS::ReserveRegion(length, nullptr);
if (data && !base::OS::CommitRegion(data, length, false)) {
base::OS::ReleaseRegion(data, length);
return nullptr;
}
return address;
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(data, length);
#endif
MSAN_MEMORY_IS_INITIALIZED(data, length);
return data;
}
#endif
};

View File

@ -125,7 +125,7 @@ bool CodeRange::SetUp(size_t requested) {
}
// We are sure that we have mapped a block of requested addresses.
DCHECK_GE(reservation.size(), requested);
DCHECK(reservation.size() == requested);
Address base = reinterpret_cast<Address>(reservation.address());
// On some platforms, specifically Win64, we need to reserve some pages at
@ -443,7 +443,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
bool result = base::OS::Free(base, size);
bool result = base::OS::ReleaseRegion(base, size);
USE(result);
DCHECK(result);
}
@ -456,10 +456,15 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
return nullptr;
Address result = static_cast<Address>(reservation.address());
const Address base =
::RoundUp(static_cast<Address>(reservation.address()), alignment);
if (base + size != reservation.end()) {
const Address unused_start = ::RoundUp(base + size, GetCommitPageSize());
reservation.ReleasePartial(unused_start);
}
size_.Increment(reservation.size());
controller->TakeControl(&reservation);
return result;
return base;
}
Address MemoryAllocator::AllocateAlignedMemory(

View File

@ -14,13 +14,14 @@ namespace internal {
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
@ -38,8 +39,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@ -131,12 +132,12 @@ class LabelConverter {
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
LabelConverter conv(buffer);
@ -450,8 +451,8 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);

View File

@ -13,26 +13,26 @@
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
3 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
{
Label lastb, unaligned, aligned, chkw,
loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
@ -544,8 +544,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -555,12 +555,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
@ -572,8 +572,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -13,26 +13,27 @@
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_MIPS)
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
3 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
{
Label lastb, unaligned, aligned, chkw,
loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
@ -545,8 +546,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolte, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -556,12 +557,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
@ -573,8 +574,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -13,21 +13,22 @@
namespace v8 {
namespace internal {
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
// Called from C
// Called from C
__ function_descriptor();
__ MovFromFloatParameter(d1);
@ -40,8 +41,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -19,12 +19,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
@ -37,8 +37,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -13,15 +13,17 @@ namespace internal {
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute,
isolate->heap()->GetRandomMmapAddr()));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
// xmm0: raw double input.
// Move double input into registers.
__ Sqrtsd(xmm0, xmm0);
@ -31,8 +33,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}

View File

@ -32,7 +32,6 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
#include "src/assembler.h"
#include "src/debug/debug-interface.h"
#include "src/factory.h"
#include "src/flags.h"
@ -568,19 +567,6 @@ static inline void CheckDoubleEquals(double expected, double actual) {
CHECK_GE(expected, actual - kEpsilon);
}
static inline uint8_t* AllocateAssemblerBuffer(
size_t* allocated,
size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize) {
size_t page_size = v8::base::OS::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size);
void* result =
v8::base::OS::Allocate(nullptr, alloc_size, page_size,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*allocated = alloc_size;
return static_cast<uint8_t*>(result);
}
static v8::debug::DebugDelegate dummy_delegate;
static inline void EnableDebugger(v8::Isolate* isolate) {

View File

@ -174,14 +174,15 @@ static void InitializeVM() {
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \
size_t allocated; \
byte* buf = AllocateAssemblerBuffer(&allocated, buf_size); \
MacroAssembler masm(isolate, buf, static_cast<int>(allocated), \
v8::internal::CodeObjectRequired::kYes); \
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \
size_t actual_size; \
byte* buf = static_cast<byte*>(v8::base::OS::Allocate( \
buf_size, &actual_size, base::OS::MemoryPermission::kReadWriteExecute)); \
MacroAssembler masm(isolate, buf, static_cast<unsigned>(actual_size), \
v8::internal::CodeObjectRequired::kYes); \
RegisterDump core;
#define RESET() \
@ -213,7 +214,8 @@ static void InitializeVM() {
__ Ret(); \
__ GetCode(masm.isolate(), nullptr);
#define TEARDOWN() v8::base::OS::Free(buf, allocated);
#define TEARDOWN() \
v8::base::OS::Free(buf, actual_size);
#endif // ifdef USE_SIMULATOR.

View File

@ -67,13 +67,27 @@ static const Register arg1 = rdi;
static const Register arg2 = rsi;
#endif
#define __ masm.
#define __ assm.
namespace {
byte* AllocateExecutablePage(int* actual_size) {
size_t allocated = 0;
void* result =
v8::base::OS::Allocate(Assembler::kMinimalBufferSize, &allocated,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*actual_size = static_cast<int>(allocated);
return static_cast<byte*>(result);
}
} // namespace
TEST(AssemblerX64ReturnOperation) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Assemble a simple function that copies argument 2 and returns it.
__ movq(rax, arg2);
@ -81,7 +95,7 @@ TEST(AssemblerX64ReturnOperation) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(2, result);
@ -90,9 +104,9 @@ TEST(AssemblerX64ReturnOperation) {
TEST(AssemblerX64StackOperations) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
@ -110,7 +124,7 @@ TEST(AssemblerX64StackOperations) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(2, result);
@ -119,9 +133,9 @@ TEST(AssemblerX64StackOperations) {
TEST(AssemblerX64ArithmeticOperations) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Assemble a simple function that adds arguments returning the sum.
__ movq(rax, arg2);
@ -129,7 +143,7 @@ TEST(AssemblerX64ArithmeticOperations) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(5, result);
@ -138,9 +152,9 @@ TEST(AssemblerX64ArithmeticOperations) {
TEST(AssemblerX64CmpbOperation) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Assemble a function that compare argument byte returing 1 if equal else 0.
// On Windows, it compares rcx with rdx which does not require REX prefix;
@ -155,7 +169,7 @@ TEST(AssemblerX64CmpbOperation) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(0x1002, 0x2002);
CHECK_EQ(1, result);
@ -165,14 +179,14 @@ TEST(AssemblerX64CmpbOperation) {
TEST(Regression684407) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
Address before = masm.pc();
Address before = assm.pc();
__ cmpl(Operand(arg1, 0),
Immediate(0, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
Address after = masm.pc();
Address after = assm.pc();
size_t instruction_size = static_cast<size_t>(after - before);
// Check that the immediate is not encoded as uint8.
CHECK_LT(sizeof(uint32_t), instruction_size);
@ -180,9 +194,9 @@ TEST(Regression684407) {
TEST(AssemblerX64ImulOperation) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Assemble a simple function that multiplies arguments returning the high
// word.
@ -192,7 +206,7 @@ TEST(AssemblerX64ImulOperation) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(0, result);
@ -205,9 +219,9 @@ TEST(AssemblerX64ImulOperation) {
TEST(AssemblerX64testbwqOperation) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
__ pushq(rbx);
__ pushq(rdi);
@ -361,7 +375,7 @@ TEST(AssemblerX64testbwqOperation) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(0, 0);
CHECK_EQ(1, result);
@ -369,9 +383,9 @@ TEST(AssemblerX64testbwqOperation) {
TEST(AssemblerX64XchglOperations) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
__ movq(rax, Operand(arg1, 0));
__ movq(r11, Operand(arg2, 0));
@ -381,7 +395,7 @@ TEST(AssemblerX64XchglOperations) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@ -394,16 +408,16 @@ TEST(AssemblerX64XchglOperations) {
TEST(AssemblerX64OrlOperations) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
__ movq(rax, Operand(arg2, 0));
__ orl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@ -415,16 +429,16 @@ TEST(AssemblerX64OrlOperations) {
TEST(AssemblerX64RollOperations) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
__ movq(rax, arg1);
__ roll(rax, Immediate(1));
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t src = V8_2PART_UINT64_C(0x10000000, C0000000);
uint64_t result = FUNCTION_CAST<F5>(buffer)(src);
@ -434,16 +448,16 @@ TEST(AssemblerX64RollOperations) {
TEST(AssemblerX64SublOperations) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
__ movq(rax, Operand(arg2, 0));
__ subl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@ -455,9 +469,9 @@ TEST(AssemblerX64SublOperations) {
TEST(AssemblerX64TestlOperations) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Set rax with the ZF flag of the testl instruction.
Label done;
@ -470,7 +484,7 @@ TEST(AssemblerX64TestlOperations) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 00000000);
@ -481,9 +495,9 @@ TEST(AssemblerX64TestlOperations) {
TEST(AssemblerX64TestwOperations) {
typedef uint16_t (*F)(uint16_t * x);
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Set rax with the ZF flag of the testl instruction.
Label done;
@ -495,7 +509,7 @@ TEST(AssemblerX64TestwOperations) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint16_t operand = 0x8000;
uint16_t result = FUNCTION_CAST<F>(buffer)(&operand);
@ -504,16 +518,16 @@ TEST(AssemblerX64TestwOperations) {
TEST(AssemblerX64XorlOperations) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
__ movq(rax, Operand(arg2, 0));
__ xorl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 60000000);
@ -525,9 +539,9 @@ TEST(AssemblerX64XorlOperations) {
TEST(AssemblerX64MemoryOperands) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Assemble a simple function that copies argument 2 and returns it.
__ pushq(rbp);
@ -547,7 +561,7 @@ TEST(AssemblerX64MemoryOperands) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(3, result);
@ -556,9 +570,9 @@ TEST(AssemblerX64MemoryOperands) {
TEST(AssemblerX64ControlFlow) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Assemble a simple function that copies argument 1 and returns it.
__ pushq(rbp);
@ -573,7 +587,7 @@ TEST(AssemblerX64ControlFlow) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(3, result);
@ -582,9 +596,9 @@ TEST(AssemblerX64ControlFlow) {
TEST(AssemblerX64LoopImmediates) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
// Assemble two loops using rax as counter, and verify the ending counts.
Label Fail;
@ -621,7 +635,7 @@ TEST(AssemblerX64LoopImmediates) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(1, result);
@ -675,7 +689,7 @@ TEST(AssemblerX64LabelChaining) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Assembler masm(CcTest::i_isolate(), nullptr, 0);
Assembler assm(CcTest::i_isolate(), nullptr, 0);
Label target;
__ j(equal, &target);
@ -690,7 +704,7 @@ TEST(AssemblerMultiByteNop) {
v8::HandleScope scope(CcTest::isolate());
byte buffer[1024];
Isolate* isolate = CcTest::i_isolate();
Assembler masm(isolate, buffer, sizeof(buffer));
Assembler assm(isolate, buffer, sizeof(buffer));
__ pushq(rbx);
__ pushq(rcx);
__ pushq(rdx);
@ -703,9 +717,9 @@ TEST(AssemblerMultiByteNop) {
__ movq(rdi, Immediate(5));
__ movq(rsi, Immediate(6));
for (int i = 0; i < 16; i++) {
int before = masm.pc_offset();
int before = assm.pc_offset();
__ Nop(i);
CHECK_EQ(masm.pc_offset() - before, i);
CHECK_EQ(assm.pc_offset() - before, i);
}
Label fail;
@ -738,7 +752,7 @@ TEST(AssemblerMultiByteNop) {
__ ret(0);
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
@ -761,7 +775,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK_EQ(ELEMENT_COUNT, vec->Length());
Isolate* isolate = CcTest::i_isolate();
Assembler masm(isolate, buffer, sizeof(buffer));
Assembler assm(isolate, buffer, sizeof(buffer));
// Remove return address from the stack for fix stack frame alignment.
__ popq(rcx);
@ -794,7 +808,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
__ ret(0);
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
@ -850,15 +864,14 @@ TEST(AssemblerX64Extractps) {
v8::HandleScope scope(CcTest::isolate());
byte buffer[256];
Isolate* isolate = CcTest::i_isolate();
Assembler masm(isolate, buffer, sizeof(buffer));
{
CpuFeatureScope fscope2(&masm, SSE4_1);
Assembler assm(isolate, buffer, sizeof(buffer));
{ CpuFeatureScope fscope2(&assm, SSE4_1);
__ extractps(rax, xmm0, 0x1);
__ ret(0);
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -881,7 +894,7 @@ TEST(AssemblerX64SSE) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler masm(isolate, buffer, sizeof(buffer),
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
@ -896,7 +909,7 @@ TEST(AssemblerX64SSE) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -917,10 +930,10 @@ TEST(AssemblerX64FMA_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
MacroAssembler masm(isolate, buffer, sizeof(buffer),
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&masm, FMA3);
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
// argument in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
@ -1122,7 +1135,7 @@ TEST(AssemblerX64FMA_sd) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1143,10 +1156,10 @@ TEST(AssemblerX64FMA_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
MacroAssembler masm(isolate, buffer, sizeof(buffer),
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&masm, FMA3);
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
// arguments in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
@ -1348,7 +1361,7 @@ TEST(AssemblerX64FMA_ss) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1367,7 +1380,7 @@ TEST(AssemblerX64SSE_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
Assembler masm(isolate, buffer, sizeof(buffer));
Assembler assm(isolate, buffer, sizeof(buffer));
{
Label exit;
// arguments in xmm0, xmm1 and xmm2
@ -1423,7 +1436,7 @@ TEST(AssemblerX64SSE_ss) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1445,9 +1458,9 @@ TEST(AssemblerX64AVX_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
Assembler masm(isolate, buffer, sizeof(buffer));
Assembler assm(isolate, buffer, sizeof(buffer));
{
CpuFeatureScope avx_scope(&masm, AVX);
CpuFeatureScope avx_scope(&assm, AVX);
Label exit;
// arguments in xmm0, xmm1 and xmm2
__ subq(rsp, Immediate(kDoubleSize * 2)); // For memory operand
@ -1508,7 +1521,7 @@ TEST(AssemblerX64AVX_ss) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1530,9 +1543,9 @@ TEST(AssemblerX64AVX_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
Assembler masm(isolate, buffer, sizeof(buffer));
Assembler assm(isolate, buffer, sizeof(buffer));
{
CpuFeatureScope avx_scope(&masm, AVX);
CpuFeatureScope avx_scope(&assm, AVX);
Label exit;
// arguments in xmm0, xmm1 and xmm2
__ subq(rsp, Immediate(kDoubleSize * 2)); // For memory operand
@ -1747,7 +1760,7 @@ TEST(AssemblerX64AVX_sd) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1769,10 +1782,10 @@ TEST(AssemblerX64BMI1) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
MacroAssembler masm(isolate, buffer, sizeof(buffer),
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&masm, BMI1);
CpuFeatureScope fscope(&assm, BMI1);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@ -1939,7 +1952,7 @@ TEST(AssemblerX64BMI1) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1959,10 +1972,10 @@ TEST(AssemblerX64LZCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler masm(isolate, buffer, sizeof(buffer),
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&masm, LZCNT);
CpuFeatureScope fscope(&assm, LZCNT);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@ -1999,7 +2012,7 @@ TEST(AssemblerX64LZCNT) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2019,10 +2032,10 @@ TEST(AssemblerX64POPCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler masm(isolate, buffer, sizeof(buffer),
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&masm, POPCNT);
CpuFeatureScope fscope(&assm, POPCNT);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1111111111111100)); // source operand
@ -2059,7 +2072,7 @@ TEST(AssemblerX64POPCNT) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2079,10 +2092,10 @@ TEST(AssemblerX64BMI2) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
MacroAssembler masm(isolate, buffer, sizeof(buffer),
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&masm, BMI2);
CpuFeatureScope fscope(&assm, BMI2);
Label exit;
__ pushq(rbx); // save rbx
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@ -2322,7 +2335,7 @@ TEST(AssemblerX64BMI2) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2340,7 +2353,7 @@ TEST(AssemblerX64JumpTables1) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
MacroAssembler masm(isolate, nullptr, 0,
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
@ -2367,7 +2380,7 @@ TEST(AssemblerX64JumpTables1) {
__ ret(0);
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2388,7 +2401,7 @@ TEST(AssemblerX64JumpTables2) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
MacroAssembler masm(isolate, nullptr, 0,
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
@ -2416,7 +2429,7 @@ TEST(AssemblerX64JumpTables2) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2433,9 +2446,9 @@ TEST(AssemblerX64JumpTables2) {
TEST(AssemblerX64PslldWithXmm15) {
CcTest::InitializeVM();
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
__ movq(xmm15, arg1);
__ pslld(xmm15, 1);
@ -2443,7 +2456,7 @@ TEST(AssemblerX64PslldWithXmm15) {
__ ret(0);
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
assm.GetCode(CcTest::i_isolate(), &desc);
uint64_t result = FUNCTION_CAST<F5>(buffer)(V8_UINT64_C(0x1122334455667788));
CHECK_EQ(V8_UINT64_C(0x22446688aaccef10), result);
}
@ -2456,10 +2469,10 @@ TEST(AssemblerX64vmovups) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler masm(isolate, buffer, sizeof(buffer),
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope avx_scope(&masm, AVX);
CpuFeatureScope avx_scope(&assm, AVX);
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
__ shufps(xmm1, xmm1, 0x0); // brocast second argument
// copy xmm1 to xmm0 through the stack to test the "vmovups reg, mem".
@ -2472,7 +2485,7 @@ TEST(AssemblerX64vmovups) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT

View File

@ -45,13 +45,15 @@ namespace internal {
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -115,7 +117,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}

View File

@ -45,14 +45,15 @@ namespace internal {
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size = 4 * Assembler::kMinimalBufferSize;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
actual_size, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
size_t allocated;
byte* buffer =
AllocateAssemblerBuffer(&allocated, 4 * Assembler::kMinimalBufferSize);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -115,7 +116,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}

View File

@ -42,17 +42,19 @@
namespace v8 {
namespace internal {
#define __ masm.
#define __ assm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -107,7 +109,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ ret(kDoubleSize);
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}

View File

@ -47,13 +47,15 @@ namespace internal {
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -128,7 +130,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}

View File

@ -47,13 +47,15 @@ namespace internal {
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -125,7 +127,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
Assembler::FlushICache(isolate, buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}

View File

@ -42,17 +42,19 @@ namespace v8 {
namespace internal {
namespace test_code_stubs_x64 {
#define __ masm.
#define __ assm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -105,7 +107,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ ret(0);
CodeDesc desc;
masm.GetCode(isolate, &desc);
assm.GetCode(isolate, &desc);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}

View File

@ -45,16 +45,24 @@ typedef void* (*F)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
typedef int (*F5)(void*, void*, void*, void*, void*);
byte* AllocateExecutablePage(int* actual_size) {
size_t allocated = 0;
void* result =
v8::base::OS::Allocate(Assembler::kMinimalBufferSize, &allocated,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*actual_size = static_cast<int>(allocated);
return static_cast<byte*>(result);
}
TEST(LoadAndStoreWithRepresentation) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
__ sub(sp, sp, Operand(1 * kPointerSize));
Label exit;
@ -138,10 +146,9 @@ TEST(ExtractLane) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
@ -279,10 +286,9 @@ TEST(ReplaceLane) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.

View File

@ -52,6 +52,16 @@ typedef int (*F0)();
#define __ masm->
byte* AllocateExecutablePage(int* actual_size) {
size_t allocated = 0;
void* result =
v8::base::OS::Allocate(Assembler::kMinimalBufferSize, &allocated,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*actual_size = static_cast<int>(allocated);
return static_cast<byte*>(result);
}
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
__ pushq(kRootRegister);
@ -99,9 +109,9 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
TEST(SmiMove) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
@ -182,9 +192,9 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiCompare) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -228,9 +238,9 @@ TEST(SmiCompare) {
TEST(Integer32ToSmi) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -325,9 +335,9 @@ TEST(Integer32ToSmi) {
TEST(SmiCheck) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -422,9 +432,9 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiIndex) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -455,9 +465,9 @@ TEST(OperandOffset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -803,9 +813,9 @@ TEST(OperandOffset) {
TEST(LoadAndStoreWithRepresentation) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
@ -1070,9 +1080,9 @@ void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
TEST(SIMDMacros) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;

View File

@ -13,18 +13,18 @@ namespace v8 {
namespace internal {
TEST(OSReserveMemory) {
size_t page_size = OS::AllocatePageSize();
void* mem_addr = OS::Allocate(OS::GetRandomMmapAddr(), 1 * MB, page_size,
OS::MemoryPermission::kReadWrite);
CHECK_NE(0, page_size);
size_t mem_size = 0;
void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocatePageSize(),
OS::GetRandomMmapAddr(), &mem_size);
CHECK_NE(0, mem_size);
CHECK_NOT_NULL(mem_addr);
size_t commit_size = OS::CommitPageSize();
CHECK(OS::CommitRegion(mem_addr, commit_size, false));
size_t block_size = 4 * KB;
CHECK(OS::CommitRegion(mem_addr, block_size, false));
// Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr);
addr[KB - 1] = 2;
CHECK(OS::UncommitRegion(mem_addr, commit_size));
CHECK(OS::Free(mem_addr, page_size));
CHECK(OS::UncommitRegion(mem_addr, block_size));
OS::ReleaseRegion(mem_addr, mem_size);
}
#ifdef V8_CC_GNU

View File

@ -179,12 +179,13 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
void TestPermissions(OS::MemoryPermission permission, bool can_read,
bool can_write) {
const size_t page_size = OS::AllocatePageSize();
int* buffer = static_cast<int*>(
OS::Allocate(nullptr, page_size, page_size, permission));
const size_t allocation_size = OS::CommitPageSize();
size_t actual = 0;
int* buffer =
static_cast<int*>(OS::Allocate(allocation_size, &actual, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
OS::Free(buffer, page_size);
OS::Free(buffer, actual);
}
};