[Memory] Use OS::Allocate for all OS memory allocations.

- Eliminates OS::ReserveRegion and OS::ReserveAlignedRegion.
- Changes OS::Allocate to take alignment parameter, reorders parameters
  to match page_allocator.
- Since the size of memory allocation can be deduced, don't return the
  amount of memory allocated.
- Changes reservation of aligned address space. Before we would reserve
  (size + alignment) rounded up to page size. This is too much, because
  maximum misalignment is (alignment - page_size).
- On Windows and Cygwin, we release an oversize allocation and
  immediately retry at the aligned address in the allocation. If we
  lose the address due to a race, we just retry.
- Clean up all the calls to OS::Allocate in codegen and tests by adding
  helper AllocateSystemPage function (allocation.h) and
  AllocateAssemblerBuffer (cctest.h).
- Changes 'assm' to 'masm' in some targets for consistency when using
  a macro-assembler.

Bug: chromium:756050
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I306dbe042cc867670fdc935abca29db074b0da71
Reviewed-on: https://chromium-review.googlesource.com/749848
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49235}
This commit is contained in:
Bill Budge 2017-11-08 10:20:01 -08:00 committed by Commit Bot
parent 494aa2e015
commit 7e78506fc2
30 changed files with 535 additions and 601 deletions

View File

@ -103,21 +103,28 @@ void AlignedFree(void *ptr) {
#endif
}
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(base::OS::ReserveRegion(size, hint)), size_(size) {
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address_, size_);
#endif
byte* AllocateSystemPage(void* address, size_t* allocated) {
size_t page_size = base::OS::AllocatePageSize();
void* result = base::OS::Allocate(address, page_size, page_size,
base::OS::MemoryPermission::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(nullptr), size_(0) {
address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
size_t page_size = base::OS::AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
address_ = base::OS::Allocate(hint, alloc_size, alignment,
base::OS::MemoryPermission::kNoAccess);
if (address_ != nullptr) {
size_ = alloc_size;
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address_, size_);
__lsan_register_root_region(address_, size_);
#endif
}
}
VirtualMemory::~VirtualMemory() {
@ -205,14 +212,14 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) {
VirtualMemory first_try(size, alignment, hint);
VirtualMemory first_try(size, hint, alignment);
if (first_try.IsReserved()) {
result->TakeControl(&first_try);
return true;
}
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, alignment, hint);
VirtualMemory second_try(size, hint, alignment);
result->TakeControl(&second_try);
return result->IsReserved();
}

View File

@ -76,19 +76,22 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
// Allocates a single system memory page with read/write permissions. The
// address parameter is a hint. Returns the base address of the memory, or null
// on failure. Permissions can be changed on the base address.
byte* AllocateSystemPage(void* address, size_t* allocated);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size, void* hint);
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
VirtualMemory(size_t size, size_t alignment, void* hint);
// Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by
// address().
VirtualMemory(size_t size, void* hint,
size_t alignment = base::OS::AllocatePageSize());
// Construct a virtual memory by assigning it some already mapped address
// and size.

View File

@ -489,10 +489,15 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
size_t page_size = base::OS::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
void* address =
base::OS::ReserveRegion(length, base::OS::GetRandomMmapAddr());
base::OS::Allocate(base::OS::GetRandomMmapAddr(), allocated, page_size,
base::OS::MemoryPermission::kNoAccess);
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address, length);
if (address != nullptr) {
__lsan_register_root_region(address, allocated);
}
#endif
return address;
}

View File

@ -14,21 +14,21 @@
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_ARM)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
Register dest = r0;
@ -169,8 +169,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -182,12 +182,12 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#if defined(USE_SIMULATOR)
return stub;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
Register dest = r0;
@ -259,8 +259,8 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
@ -271,12 +271,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
@ -288,8 +288,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -34,6 +34,18 @@ namespace {
// This causes VirtualMemory::Commit to not always commit the memory region
// specified.
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
return PAGE_NOACCESS;
case OS::MemoryPermission::kReadWrite:
return PAGE_READWRITE;
case OS::MemoryPermission::kReadWriteExecute:
return PAGE_EXECUTE_READWRITE;
}
UNREACHABLE();
}
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
LPVOID base = nullptr;
@ -80,43 +92,44 @@ double CygwinTimezoneCache::LocalTimeOffset() {
(loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
int flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
int prot = GetProtectionFromMemoryPermission(access);
void* base = RandomizedVirtualAlloc(request_size, flags, prot, address);
if (base == nullptr) return nullptr;
uint8_t* aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
int resize_attempts = 0;
const int kMaxResizeAttempts = 3;
while (aligned_base != base) {
// Try reducing the size by freeing and then re-allocating at the aligned
// base. Retry logic is needed since we may lose the memory due to a race.
Free(base, request_size);
if (resize_attempts == kMaxResizeAttempts) return nullptr;
base = RandomizedVirtualAlloc(size, flags, prot, aligned_base);
if (base == nullptr) return nullptr;
aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
resize_attempts++;
}
return static_cast<void*>(aligned_base);
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
hint = AlignedAddress(hint, alignment);
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != nullptr) {
request_size = size;
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
}
*allocated = request_size;
return static_cast<void*>(address);
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(address, 0, MEM_RELEASE);
USE(size);
}
// static

View File

@ -18,42 +18,19 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
// static
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
return nullptr;
}
// static
void OS::Guard(void* address, size_t size) {
CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size,
0 /*no permissions*/));
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
zx_handle_t vmo;
if (zx_vmo_create(size, 0, &vmo) != ZX_OK) return nullptr;
uintptr_t result;
zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size,
0 /*no permissions*/, &result);
zx_handle_close(vmo);
if (status != ZX_OK) return nullptr;
return reinterpret_cast<void*>(result);
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* OS::Allocate(void* address, size_t size, size_t alignment,
OS::MemoryPermission access) {
// Currently we only support reserving memory.
DCHECK_EQ(MemoryPermission::kNoAccess, access);
size_t page_size = OS::AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
zx_handle_t vmo;
if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
*allocated = 0;
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
@ -66,26 +43,25 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
// so close the vmo either way.
zx_handle_close(vmo);
if (status != ZX_OK) {
*allocated = 0;
return nullptr;
}
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocatePageSize());
DCHECK_LE(aligned_size, request_size);
size_t aligned_size = RoundUp(size, page_size);
if (aligned_size != request_size) {
DCHECK_LT(aligned_size, request_size);
size_t suffix_size = request_size - aligned_size;
zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(aligned_base + aligned_size),
@ -94,11 +70,16 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
void OS::Guard(void* address, size_t size) {
CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size,
0 /*no permissions*/));
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
uint32_t prot = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |

View File

@ -97,6 +97,27 @@ int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
}
UNREACHABLE();
}
void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
const size_t actual_size = RoundUp(size, OS::AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
if (access == OS::MemoryPermission::kNoAccess) {
// TODO(bbudge) Improve readability by moving platform specific code into
// helper functions.
#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
flags |= MAP_NORESERVE;
#endif
#if V8_OS_QNX
flags |= MAP_LAZY;
#endif // V8_OS_QNX
}
void* result =
mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
#endif // !V8_OS_FUCHSIA
} // namespace
@ -208,25 +229,48 @@ void* OS::GetRandomMmapAddr() {
}
// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
#if !V8_OS_FUCHSIA
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
#endif // !V8_OS_FUCHSIA
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
void* result = base::Allocate(address, request_size, access);
if (result == nullptr) return nullptr;
// Unmap memory allocated before the aligned base address.
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
// Unmap memory allocated after the potentially unaligned end.
if (size != request_size) {
DCHECK_LT(size, request_size);
size_t suffix_size = request_size - size;
OS::Free(aligned_base + size, suffix_size);
request_size -= suffix_size;
}
DCHECK_EQ(size, request_size);
return static_cast<void*>(aligned_base);
}
#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
#if !V8_OS_CYGWIN
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(address, size);
USE(result);
DCHECK_EQ(0, result);
}
#endif // !V8_OS_CYGWIN
void OS::SetReadAndExecutable(void* address, const size_t size) {
#if V8_OS_CYGWIN
@ -272,60 +316,6 @@ void OS::SetReadWriteAndExecutable(void* address, const size_t size) {
}
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// static
void* OS::ReserveRegion(size_t size, void* hint) {
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
map_flags |= MAP_NORESERVE;
#endif
#if V8_OS_QNX
map_flags |= MAP_LAZY;
#endif // V8_OS_QNX
void* result = mmap(hint, size, PROT_NONE, map_flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(0, alignment % OS::AllocatePageSize());
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocatePageSize());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);

View File

@ -732,6 +732,18 @@ void* OS::GetRandomMmapAddr() {
namespace {
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
return PAGE_NOACCESS;
case OS::MemoryPermission::kReadWrite:
return PAGE_READWRITE;
case OS::MemoryPermission::kReadWriteExecute:
return PAGE_EXECUTE_READWRITE;
}
UNREACHABLE();
}
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
LPVOID base = NULL;
@ -747,49 +759,50 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
if (use_aslr &&
(protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) {
// For executable pages try and randomize the allocation address
// For executable or reserved pages try to randomize the allocation address.
base = VirtualAlloc(hint, size, action, protection);
}
// After three attempts give up and let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
// On failure, let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(nullptr, size, action, protection);
return base;
}
} // namespace
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, static_cast<int>(AllocatePageSize()));
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
// Windows XP SP2 allows Data Excution Prevention (DEP).
int prot = PAGE_NOACCESS;
switch (access) {
case OS::MemoryPermission::kNoAccess: {
prot = PAGE_NOACCESS;
break;
}
case OS::MemoryPermission::kReadWrite: {
prot = PAGE_READWRITE;
break;
}
case OS::MemoryPermission::kReadWriteExecute: {
prot = PAGE_EXECUTE_READWRITE;
break;
}
int flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
int prot = GetProtectionFromMemoryPermission(access);
void* base = RandomizedVirtualAlloc(request_size, flags, prot, address);
if (base == nullptr) return nullptr;
uint8_t* aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
int resize_attempts = 0;
const int kMaxResizeAttempts = 3;
while (aligned_base != base) {
// Try reducing the size by freeing and then re-allocating at the aligned
// base. Retry logic is needed since we may lose the memory due to a race.
Free(base, request_size);
if (resize_attempts == kMaxResizeAttempts) return nullptr;
base = RandomizedVirtualAlloc(size, flags, prot, aligned_base);
if (base == nullptr) return nullptr;
aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
resize_attempts++;
}
LPVOID mbase =
RandomizedVirtualAlloc(msize, MEM_COMMIT | MEM_RESERVE, prot, hint);
if (mbase == NULL) return NULL;
DCHECK_EQ(reinterpret_cast<uintptr_t>(mbase) % OS::AllocatePageSize(), 0);
*allocated = msize;
return mbase;
return static_cast<void*>(aligned_base);
}
void OS::Free(void* address, const size_t size) {
@ -825,44 +838,6 @@ void OS::SetReadWriteAndExecutable(void* address, const size_t size) {
VirtualProtect(address, size, PAGE_EXECUTE_READWRITE, &oldprotect));
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != nullptr) {
request_size = size;
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
}
*allocated = request_size;
return static_cast<void*>(address);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;

View File

@ -172,9 +172,11 @@ class V8_BASE_EXPORT OS {
static void* GetRandomMmapAddr();
// Allocates memory. Permissions are set according to the access argument.
// Returns the address of the allocated memory, or nullptr on failure.
static void* Allocate(const size_t requested, size_t* allocated,
MemoryPermission access, void* hint = nullptr);
// The address parameter is a hint. The size and alignment parameters must be
// multiples of AllocatePageSize(). Returns the address of the allocated
// memory, with the specified size and alignment, or nullptr on failure.
static void* Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access);
// Frees memory allocated by a call to Allocate.
static void Free(void* address, const size_t size);
@ -192,11 +194,6 @@ class V8_BASE_EXPORT OS {
// function. This is only a temporary function and will go away soon.
static void SetReadWriteAndExecutable(void* address, const size_t size);
static void* ReserveRegion(size_t size, void* hint);
static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated);
static bool CommitRegion(void* address, size_t size, bool is_executable);
static bool UncommitRegion(void* address, size_t size);

View File

@ -160,16 +160,17 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
#if USE_VM
void* VirtualMemoryAllocate(size_t length) {
void* data = base::OS::ReserveRegion(length, nullptr);
if (data && !base::OS::CommitRegion(data, length, false)) {
base::OS::ReleaseRegion(data, length);
return nullptr;
}
size_t page_size = base::OS::AllocatePageSize();
size_t alloc_size = RoundUp(length, page_size);
void* address = base::OS::Allocate(nullptr, alloc_size, page_size,
base::OS::MemoryPermission::kReadWrite);
if (address != nullptr) {
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(data, length);
__lsan_register_root_region(address, alloc_size);
#endif
MSAN_MEMORY_IS_INITIALIZED(data, length);
return data;
MSAN_MEMORY_IS_INITIALIZED(address, alloc_size);
}
return address;
}
#endif
};

View File

@ -125,7 +125,7 @@ bool CodeRange::SetUp(size_t requested) {
}
// We are sure that we have mapped a block of requested addresses.
DCHECK(reservation.size() == requested);
DCHECK_GE(reservation.size(), requested);
Address base = reinterpret_cast<Address>(reservation.address());
// On some platforms, specifically Win64, we need to reserve some pages at
@ -456,15 +456,10 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
return nullptr;
const Address base =
::RoundUp(static_cast<Address>(reservation.address()), alignment);
if (base + size != reservation.end()) {
const Address unused_start = ::RoundUp(base + size, GetCommitPageSize());
reservation.ReleasePartial(unused_start);
}
Address result = static_cast<Address>(reservation.address());
size_.Increment(reservation.size());
controller->TakeControl(&reservation);
return base;
return result;
}
Address MemoryAllocator::AllocateAlignedMemory(

View File

@ -14,14 +14,13 @@ namespace internal {
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
@ -39,8 +38,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@ -132,12 +131,12 @@ class LabelConverter {
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
LabelConverter conv(buffer);
@ -451,8 +450,8 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);

View File

@ -13,26 +13,26 @@
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
3 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
{
Label lastb, unaligned, aligned, chkw,
loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
@ -544,8 +544,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -555,12 +555,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
@ -572,8 +572,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -13,27 +13,26 @@
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
3 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
{
Label lastb, unaligned, aligned, chkw,
loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
@ -546,8 +545,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolte, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -557,12 +556,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
@ -574,8 +573,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -13,22 +13,21 @@
namespace v8 {
namespace internal {
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
// Called from C
// Called from C
__ function_descriptor();
__ MovFromFloatParameter(d1);
@ -41,8 +40,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -19,12 +19,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
@ -37,8 +37,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}

View File

@ -13,17 +13,15 @@ namespace internal {
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute,
isolate->heap()->GetRandomMmapAddr()));
size_t allocated = 0;
byte* buffer =
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
// xmm0: raw double input.
// Move double input into registers.
__ Sqrtsd(xmm0, xmm0);
@ -33,8 +31,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
base::OS::SetReadAndExecutable(buffer, allocated);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}

View File

@ -32,6 +32,7 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
#include "src/assembler.h"
#include "src/debug/debug-interface.h"
#include "src/factory.h"
#include "src/flags.h"
@ -567,6 +568,19 @@ static inline void CheckDoubleEquals(double expected, double actual) {
CHECK_GE(expected, actual - kEpsilon);
}
static inline uint8_t* AllocateAssemblerBuffer(
size_t* allocated,
size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize) {
size_t page_size = v8::base::OS::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size);
void* result =
v8::base::OS::Allocate(nullptr, alloc_size, page_size,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*allocated = alloc_size;
return static_cast<uint8_t*>(result);
}
static v8::debug::DebugDelegate dummy_delegate;
static inline void EnableDebugger(v8::Isolate* isolate) {

View File

@ -174,15 +174,14 @@ static void InitializeVM() {
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \
size_t actual_size; \
byte* buf = static_cast<byte*>(v8::base::OS::Allocate( \
buf_size, &actual_size, base::OS::MemoryPermission::kReadWriteExecute)); \
MacroAssembler masm(isolate, buf, static_cast<unsigned>(actual_size), \
v8::internal::CodeObjectRequired::kYes); \
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \
size_t allocated; \
byte* buffer = AllocateAssemblerBuffer(&allocated); \
MacroAssembler masm(isolate, buf, static_cast<int>(allocated), \
v8::internal::CodeObjectRequired::kYes); \
RegisterDump core;
#define RESET() \

View File

@ -67,27 +67,13 @@ static const Register arg1 = rdi;
static const Register arg2 = rsi;
#endif
#define __ assm.
namespace {
byte* AllocateExecutablePage(int* actual_size) {
size_t allocated = 0;
void* result =
v8::base::OS::Allocate(Assembler::kMinimalBufferSize, &allocated,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*actual_size = static_cast<int>(allocated);
return static_cast<byte*>(result);
}
} // namespace
#define __ masm.
TEST(AssemblerX64ReturnOperation) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that copies argument 2 and returns it.
__ movq(rax, arg2);
@ -95,7 +81,7 @@ TEST(AssemblerX64ReturnOperation) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(2, result);
@ -104,9 +90,9 @@ TEST(AssemblerX64ReturnOperation) {
TEST(AssemblerX64StackOperations) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
@ -124,7 +110,7 @@ TEST(AssemblerX64StackOperations) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(2, result);
@ -133,9 +119,9 @@ TEST(AssemblerX64StackOperations) {
TEST(AssemblerX64ArithmeticOperations) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that adds arguments returning the sum.
__ movq(rax, arg2);
@ -143,7 +129,7 @@ TEST(AssemblerX64ArithmeticOperations) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(5, result);
@ -152,9 +138,9 @@ TEST(AssemblerX64ArithmeticOperations) {
TEST(AssemblerX64CmpbOperation) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a function that compare argument byte returing 1 if equal else 0.
// On Windows, it compares rcx with rdx which does not require REX prefix;
@ -169,7 +155,7 @@ TEST(AssemblerX64CmpbOperation) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(0x1002, 0x2002);
CHECK_EQ(1, result);
@ -179,14 +165,14 @@ TEST(AssemblerX64CmpbOperation) {
TEST(Regression684407) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
Address before = assm.pc();
Address before = masm.pc();
__ cmpl(Operand(arg1, 0),
Immediate(0, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
Address after = assm.pc();
Address after = masm.pc();
size_t instruction_size = static_cast<size_t>(after - before);
// Check that the immediate is not encoded as uint8.
CHECK_LT(sizeof(uint32_t), instruction_size);
@ -194,9 +180,9 @@ TEST(Regression684407) {
TEST(AssemblerX64ImulOperation) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that multiplies arguments returning the high
// word.
@ -206,7 +192,7 @@ TEST(AssemblerX64ImulOperation) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(0, result);
@ -219,9 +205,9 @@ TEST(AssemblerX64ImulOperation) {
TEST(AssemblerX64testbwqOperation) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ pushq(rbx);
__ pushq(rdi);
@ -375,7 +361,7 @@ TEST(AssemblerX64testbwqOperation) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(0, 0);
CHECK_EQ(1, result);
@ -383,9 +369,9 @@ TEST(AssemblerX64testbwqOperation) {
TEST(AssemblerX64XchglOperations) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, Operand(arg1, 0));
__ movq(r11, Operand(arg2, 0));
@ -395,7 +381,7 @@ TEST(AssemblerX64XchglOperations) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@ -408,16 +394,16 @@ TEST(AssemblerX64XchglOperations) {
TEST(AssemblerX64OrlOperations) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, Operand(arg2, 0));
__ orl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@ -429,16 +415,16 @@ TEST(AssemblerX64OrlOperations) {
TEST(AssemblerX64RollOperations) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, arg1);
__ roll(rax, Immediate(1));
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t src = V8_2PART_UINT64_C(0x10000000, C0000000);
uint64_t result = FUNCTION_CAST<F5>(buffer)(src);
@ -448,16 +434,16 @@ TEST(AssemblerX64RollOperations) {
TEST(AssemblerX64SublOperations) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, Operand(arg2, 0));
__ subl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@ -469,9 +455,9 @@ TEST(AssemblerX64SublOperations) {
TEST(AssemblerX64TestlOperations) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Set rax with the ZF flag of the testl instruction.
Label done;
@ -484,7 +470,7 @@ TEST(AssemblerX64TestlOperations) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 00000000);
@ -495,9 +481,9 @@ TEST(AssemblerX64TestlOperations) {
TEST(AssemblerX64TestwOperations) {
typedef uint16_t (*F)(uint16_t * x);
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Set rax with the ZF flag of the testl instruction.
Label done;
@ -509,7 +495,7 @@ TEST(AssemblerX64TestwOperations) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint16_t operand = 0x8000;
uint16_t result = FUNCTION_CAST<F>(buffer)(&operand);
@ -518,16 +504,16 @@ TEST(AssemblerX64TestwOperations) {
TEST(AssemblerX64XorlOperations) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, Operand(arg2, 0));
__ xorl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 60000000);
@ -539,9 +525,9 @@ TEST(AssemblerX64XorlOperations) {
TEST(AssemblerX64MemoryOperands) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that copies argument 2 and returns it.
__ pushq(rbp);
@ -561,7 +547,7 @@ TEST(AssemblerX64MemoryOperands) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(3, result);
@ -570,9 +556,9 @@ TEST(AssemblerX64MemoryOperands) {
TEST(AssemblerX64ControlFlow) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that copies argument 1 and returns it.
__ pushq(rbp);
@ -587,7 +573,7 @@ TEST(AssemblerX64ControlFlow) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(3, result);
@ -596,9 +582,9 @@ TEST(AssemblerX64ControlFlow) {
TEST(AssemblerX64LoopImmediates) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble two loops using rax as counter, and verify the ending counts.
Label Fail;
@ -635,7 +621,7 @@ TEST(AssemblerX64LoopImmediates) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(1, result);
@ -689,7 +675,7 @@ TEST(AssemblerX64LabelChaining) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Assembler assm(CcTest::i_isolate(), nullptr, 0);
Assembler masm(CcTest::i_isolate(), nullptr, 0);
Label target;
__ j(equal, &target);
@ -704,7 +690,7 @@ TEST(AssemblerMultiByteNop) {
v8::HandleScope scope(CcTest::isolate());
byte buffer[1024];
Isolate* isolate = CcTest::i_isolate();
Assembler assm(isolate, buffer, sizeof(buffer));
Assembler masm(isolate, buffer, sizeof(buffer));
__ pushq(rbx);
__ pushq(rcx);
__ pushq(rdx);
@ -717,9 +703,9 @@ TEST(AssemblerMultiByteNop) {
__ movq(rdi, Immediate(5));
__ movq(rsi, Immediate(6));
for (int i = 0; i < 16; i++) {
int before = assm.pc_offset();
int before = masm.pc_offset();
__ Nop(i);
CHECK_EQ(assm.pc_offset() - before, i);
CHECK_EQ(masm.pc_offset() - before, i);
}
Label fail;
@ -752,7 +738,7 @@ TEST(AssemblerMultiByteNop) {
__ ret(0);
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
@ -775,7 +761,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK_EQ(ELEMENT_COUNT, vec->Length());
Isolate* isolate = CcTest::i_isolate();
Assembler assm(isolate, buffer, sizeof(buffer));
Assembler masm(isolate, buffer, sizeof(buffer));
// Remove return address from the stack for fix stack frame alignment.
__ popq(rcx);
@ -808,7 +794,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
__ ret(0);
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
@ -864,14 +850,15 @@ TEST(AssemblerX64Extractps) {
v8::HandleScope scope(CcTest::isolate());
byte buffer[256];
Isolate* isolate = CcTest::i_isolate();
Assembler assm(isolate, buffer, sizeof(buffer));
{ CpuFeatureScope fscope2(&assm, SSE4_1);
Assembler masm(isolate, buffer, sizeof(buffer));
{
CpuFeatureScope fscope2(&masm, SSE4_1);
__ extractps(rax, xmm0, 0x1);
__ ret(0);
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -894,7 +881,7 @@ TEST(AssemblerX64SSE) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
@ -909,7 +896,7 @@ TEST(AssemblerX64SSE) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -930,10 +917,10 @@ TEST(AssemblerX64FMA_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, FMA3);
CpuFeatureScope fscope(&masm, FMA3);
Label exit;
// argument in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
@ -1135,7 +1122,7 @@ TEST(AssemblerX64FMA_sd) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1156,10 +1143,10 @@ TEST(AssemblerX64FMA_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, FMA3);
CpuFeatureScope fscope(&masm, FMA3);
Label exit;
// arguments in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
@ -1361,7 +1348,7 @@ TEST(AssemblerX64FMA_ss) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1380,7 +1367,7 @@ TEST(AssemblerX64SSE_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
Assembler assm(isolate, buffer, sizeof(buffer));
Assembler masm(isolate, buffer, sizeof(buffer));
{
Label exit;
// arguments in xmm0, xmm1 and xmm2
@ -1436,7 +1423,7 @@ TEST(AssemblerX64SSE_ss) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1458,9 +1445,9 @@ TEST(AssemblerX64AVX_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
Assembler assm(isolate, buffer, sizeof(buffer));
Assembler masm(isolate, buffer, sizeof(buffer));
{
CpuFeatureScope avx_scope(&assm, AVX);
CpuFeatureScope avx_scope(&masm, AVX);
Label exit;
// arguments in xmm0, xmm1 and xmm2
__ subq(rsp, Immediate(kDoubleSize * 2)); // For memory operand
@ -1521,7 +1508,7 @@ TEST(AssemblerX64AVX_ss) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1543,9 +1530,9 @@ TEST(AssemblerX64AVX_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
Assembler assm(isolate, buffer, sizeof(buffer));
Assembler masm(isolate, buffer, sizeof(buffer));
{
CpuFeatureScope avx_scope(&assm, AVX);
CpuFeatureScope avx_scope(&masm, AVX);
Label exit;
// arguments in xmm0, xmm1 and xmm2
__ subq(rsp, Immediate(kDoubleSize * 2)); // For memory operand
@ -1760,7 +1747,7 @@ TEST(AssemblerX64AVX_sd) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1782,10 +1769,10 @@ TEST(AssemblerX64BMI1) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, BMI1);
CpuFeatureScope fscope(&masm, BMI1);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@ -1952,7 +1939,7 @@ TEST(AssemblerX64BMI1) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -1972,10 +1959,10 @@ TEST(AssemblerX64LZCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, LZCNT);
CpuFeatureScope fscope(&masm, LZCNT);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@ -2012,7 +1999,7 @@ TEST(AssemblerX64LZCNT) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2032,10 +2019,10 @@ TEST(AssemblerX64POPCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, POPCNT);
CpuFeatureScope fscope(&masm, POPCNT);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1111111111111100)); // source operand
@ -2072,7 +2059,7 @@ TEST(AssemblerX64POPCNT) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2092,10 +2079,10 @@ TEST(AssemblerX64BMI2) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, BMI2);
CpuFeatureScope fscope(&masm, BMI2);
Label exit;
__ pushq(rbx); // save rbx
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@ -2335,7 +2322,7 @@ TEST(AssemblerX64BMI2) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2353,7 +2340,7 @@ TEST(AssemblerX64JumpTables1) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
MacroAssembler assm(isolate, nullptr, 0,
MacroAssembler masm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
@ -2380,7 +2367,7 @@ TEST(AssemblerX64JumpTables1) {
__ ret(0);
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2401,7 +2388,7 @@ TEST(AssemblerX64JumpTables2) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
MacroAssembler assm(isolate, nullptr, 0,
MacroAssembler masm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
@ -2429,7 +2416,7 @@ TEST(AssemblerX64JumpTables2) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@ -2446,9 +2433,9 @@ TEST(AssemblerX64JumpTables2) {
TEST(AssemblerX64PslldWithXmm15) {
CcTest::InitializeVM();
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
Assembler assm(CcTest::i_isolate(), buffer, actual_size);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(xmm15, arg1);
__ pslld(xmm15, 1);
@ -2456,7 +2443,7 @@ TEST(AssemblerX64PslldWithXmm15) {
__ ret(0);
CodeDesc desc;
assm.GetCode(CcTest::i_isolate(), &desc);
masm.GetCode(CcTest::i_isolate(), &desc);
uint64_t result = FUNCTION_CAST<F5>(buffer)(V8_UINT64_C(0x1122334455667788));
CHECK_EQ(V8_UINT64_C(0x22446688aaccef10), result);
}
@ -2469,10 +2456,10 @@ TEST(AssemblerX64vmovups) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope avx_scope(&assm, AVX);
CpuFeatureScope avx_scope(&masm, AVX);
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
__ shufps(xmm1, xmm1, 0x0); // brocast second argument
// copy xmm1 to xmm0 through the stack to test the "vmovups reg, mem".
@ -2485,7 +2472,7 @@ TEST(AssemblerX64vmovups) {
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT

View File

@ -45,15 +45,13 @@ namespace internal {
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -117,7 +115,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}

View File

@ -45,15 +45,14 @@ namespace internal {
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size = 4 * Assembler::kMinimalBufferSize;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
actual_size, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
size_t allocated;
byte* buffer =
AllocateAssemblerBuffer(&allocated, 4 * Assembler::kMinimalBufferSize);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -116,7 +115,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}

View File

@ -42,19 +42,17 @@
namespace v8 {
namespace internal {
#define __ assm.
#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -109,7 +107,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ ret(kDoubleSize);
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}

View File

@ -47,15 +47,13 @@ namespace internal {
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -130,7 +128,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}

View File

@ -47,15 +47,13 @@ namespace internal {
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -127,7 +125,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}

View File

@ -42,19 +42,17 @@ namespace v8 {
namespace internal {
namespace test_code_stubs_x64 {
#define __ assm.
#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
@ -107,7 +105,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ ret(0);
CodeDesc desc;
assm.GetCode(isolate, &desc);
masm.GetCode(isolate, &desc);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}

View File

@ -45,24 +45,16 @@ typedef void* (*F)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
typedef int (*F5)(void*, void*, void*, void*, void*);
byte* AllocateExecutablePage(int* actual_size) {
size_t allocated = 0;
void* result =
v8::base::OS::Allocate(Assembler::kMinimalBufferSize, &allocated,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*actual_size = static_cast<int>(allocated);
return static_cast<byte*>(result);
}
TEST(LoadAndStoreWithRepresentation) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
__ sub(sp, sp, Operand(1 * kPointerSize));
Label exit;
@ -146,9 +138,10 @@ TEST(ExtractLane) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
@ -286,9 +279,10 @@ TEST(ReplaceLane) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.

View File

@ -52,16 +52,6 @@ typedef int (*F0)();
#define __ masm->
byte* AllocateExecutablePage(int* actual_size) {
size_t allocated = 0;
void* result =
v8::base::OS::Allocate(Assembler::kMinimalBufferSize, &allocated,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*actual_size = static_cast<int>(allocated);
return static_cast<byte*>(result);
}
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
__ pushq(kRootRegister);
@ -109,9 +99,9 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
TEST(SmiMove) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
@ -192,9 +182,9 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiCompare) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -238,9 +228,9 @@ TEST(SmiCompare) {
TEST(Integer32ToSmi) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -335,9 +325,9 @@ TEST(Integer32ToSmi) {
TEST(SmiCheck) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -432,9 +422,9 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiIndex) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -465,9 +455,9 @@ TEST(OperandOffset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@ -813,9 +803,9 @@ TEST(OperandOffset) {
TEST(LoadAndStoreWithRepresentation) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
@ -1080,9 +1070,9 @@ void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
TEST(SIMDMacros) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;

View File

@ -13,18 +13,18 @@ namespace v8 {
namespace internal {
TEST(OSReserveMemory) {
size_t mem_size = 0;
void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocatePageSize(),
OS::GetRandomMmapAddr(), &mem_size);
CHECK_NE(0, mem_size);
size_t page_size = OS::AllocatePageSize();
void* mem_addr = OS::Allocate(OS::GetRandomMmapAddr(), 1 * MB, page_size,
OS::MemoryPermission::kReadWrite);
CHECK_NE(0, page_size);
CHECK_NOT_NULL(mem_addr);
size_t block_size = 4 * KB;
CHECK(OS::CommitRegion(mem_addr, block_size, false));
size_t commit_size = OS::CommitPageSize();
CHECK(OS::CommitRegion(mem_addr, commit_size, false));
// Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr);
addr[KB - 1] = 2;
CHECK(OS::UncommitRegion(mem_addr, block_size));
OS::ReleaseRegion(mem_addr, mem_size);
CHECK(OS::UncommitRegion(mem_addr, commit_size));
OS::ReleaseRegion(mem_addr, page_size);
}
#ifdef V8_CC_GNU

View File

@ -179,13 +179,12 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
void TestPermissions(OS::MemoryPermission permission, bool can_read,
bool can_write) {
const size_t allocation_size = OS::CommitPageSize();
size_t actual = 0;
int* buffer =
static_cast<int*>(OS::Allocate(allocation_size, &actual, permission));
const size_t page_size = OS::AllocatePageSize();
int* buffer = static_cast<int*>(
OS::Allocate(nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
OS::Free(buffer, actual);
OS::Free(buffer, page_size);
}
};