Add mips64 port.

Summary:

- Changes in common code are mainly boilerplate changes,
gyp and test status files updates.

- On mips64 simulator all tests pass from all test units.

- Current issues: mjsunit JS debugger tests fail randomly on HW in release mode.
Corresponding tests are skipped on HW.

- Skipped tests on mips64: test-heap/ReleaseOverReservedPages, mjsunit/debug-*

TEST=
BUG=
R=danno@chromium.org, plind44@gmail.com, ulan@chromium.org

Review URL: https://codereview.chromium.org/371923006

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22297 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
dusan.milosavljevic@rt-rk.com 2014-07-09 11:08:26 +00:00
parent cb778b24ae
commit a0f6878a06
81 changed files with 52123 additions and 36 deletions

View File

@ -221,7 +221,7 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 x32 arm arm64 mips mipsel x87
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug

View File

@ -51,6 +51,8 @@ def DoMain(_):
host_arch = 'arm'
elif host_arch == 'aarch64':
host_arch = 'arm64'
elif host_arch == 'mips64':
host_arch = 'mips64el'
elif host_arch.startswith('mips'):
host_arch = 'mipsel'

View File

@ -98,6 +98,7 @@
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="mips64el" and host_arch!="mips64el") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
'want_separate_host_toolset': 1,

View File

@ -56,7 +56,7 @@
'v8_use_mips_abi_hardfloat%': 'true',
# Default arch variant for MIPS.
'mips_arch_variant%': 'mips32r2',
'mips_arch_variant%': 'r2',
'v8_enable_backtrace%': 0,
@ -247,10 +247,10 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="mips32r1"', {
['mips_arch_variant=="r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
@ -272,7 +272,7 @@
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="mips32r2"', {
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
],
@ -298,10 +298,10 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="mips32r1"', {
['mips_arch_variant=="r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="loongson"', {
@ -326,7 +326,7 @@
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="mips32r2"', {
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
['mips_arch_variant=="loongson"', {
@ -334,6 +334,65 @@
}],
],
}], # v8_target_arch=="mipsel"
['v8_target_arch=="mips64el"', {
'defines': [
'V8_TARGET_ARCH_MIPS64',
],
'variables': {
'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
'cflags': ['-mhard-float'],
'ldflags': ['-mhard-float'],
}, {
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="r2"', {
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'ldflags': [
'-mips64r2', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}],
],
}],
],
}],
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
],
}],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
'CAN_USE_FPU_INSTRUCTIONS',
],
}, {
'defines': [
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
],
}], # v8_target_arch=="mips64el"
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',

View File

@ -66,6 +66,8 @@
#include "src/arm/assembler-arm-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h" // NOLINT
#else
@ -84,6 +86,8 @@
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
@ -1340,6 +1344,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS64
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_X87
function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else

View File

@ -148,6 +148,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "src/base/atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "src/base/atomicops_internals_mips_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
#include "src/base/atomicops_internals_mips64_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif

View File

@ -0,0 +1,307 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
namespace v8 {
namespace base {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev, tmp;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value
"sc %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %2\n" // temp = *ptr
"addu %1, %0, %3\n" // temp2 = temp + increment
"sc %1, %2\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"addu %1, %0, %3\n" // temp2 = temp + increment
".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
MemoryBarrier();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
__asm__ __volatile__("sync" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return *ptr;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions of the atomic ops.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev, tmp;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value
"scd %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"scd %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp, temp2;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %0, %2\n" // temp = *ptr
"daddu %1, %0, %3\n" // temp2 = temp + increment
"scd %1, %2\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"daddu %1, %0, %3\n" // temp2 = temp + increment
".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
MemoryBarrier();
Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
} } // namespace v8::base
#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_

View File

@ -42,6 +42,9 @@
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__mips64)
#define V8_HOST_ARCH_MIPS64 1
#define V8_HOST_ARCH_64_BIT 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
@ -63,7 +66,8 @@
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@ -72,6 +76,8 @@
#define V8_TARGET_ARCH_ARM64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__mips64)
#define V8_TARGET_ARCH_MIPS64 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
#else
@ -96,6 +102,8 @@
#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
@ -123,6 +131,9 @@
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64))
#error Target architecture mips64 is only supported on mips64 and x64 host
#endif
// Determine architecture endianness.
#if V8_TARGET_ARCH_IA32
@ -139,6 +150,8 @@
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
#else

View File

@ -56,7 +56,8 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 \
|| V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#if V8_OS_LINUX
@ -206,7 +207,7 @@ class CPUInfo V8_FINAL {
size_t datalen_;
};
#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
// Checks that a space-separated list of items contains one given 'item'.
static bool HasListItem(const char* list, const char* item) {
@ -232,7 +233,7 @@ static bool HasListItem(const char* list, const char* item) {
return false;
}
#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#endif // V8_OS_LINUX
@ -454,7 +455,7 @@ CPU::CPU() : stepping_(0),
#endif // V8_OS_LINUX
#elif V8_HOST_ARCH_MIPS
#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
// Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration

View File

@ -290,6 +290,8 @@ void OS::DebugBreak() {
asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
#elif V8_HOST_ARCH_IA32
#if defined(__native_client__)
asm("hlt");

View File

@ -105,6 +105,12 @@ namespace internal {
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
V(DirectCEntry) \
V(StoreRegistersState) \
V(RestoreRegistersState)
#elif V8_TARGET_ARCH_MIPS64
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
V(DirectCEntry) \
@ -496,6 +502,8 @@ class RuntimeCallHelper {
#include "src/arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/code-stubs-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/code-stubs-mips64.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/code-stubs-x87.h"
#else

View File

@ -55,6 +55,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "src/arm/codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/codegen-x87.h" // NOLINT
#else

View File

@ -577,7 +577,7 @@ DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
#ifdef V8_TARGET_ARCH_ARM64
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
@ -586,7 +586,8 @@ DEFINE_INT(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
#endif
DEFINE_INT(sim_stack_size, 2 * MB / KB,
"Stack size of the ARM64 simulator in kBytes (default is 2 MB)")
"Stack size of the ARM64 and MIPS64 simulator "
"in kBytes (default is 2 MB)")
DEFINE_BOOL(log_regs_modified, true,
"When logging register values, only print modified registers.")
DEFINE_BOOL(log_colour, true, "When logging, try to use coloured output.")

View File

@ -19,6 +19,8 @@
#include "src/arm/frames-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/frames-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/frames-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/frames-x87.h" // NOLINT
#else

View File

@ -115,6 +115,9 @@ class FullCodeGenerator: public AstVisitor {
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 149;
static const int kBootCodeSizeMultiplier = 120;
#elif V8_TARGET_ARCH_MIPS64
static const int kCodeSizeMultiplier = 149;
static const int kBootCodeSizeMultiplier = 120;
#else
#error Unsupported target architecture.
#endif
@ -320,6 +323,13 @@ class FullCodeGenerator: public AstVisitor {
Label* if_true,
Label* if_false,
Label* fall_through);
#elif V8_TARGET_ARCH_MIPS64
void Split(Condition cc,
Register lhs,
const Operand& rhs,
Label* if_true,
Label* if_false,
Label* fall_through);
#else // All non-mips arch.
void Split(Condition cc,
Label* if_true,

View File

@ -1094,6 +1094,8 @@ class DebugInfoSection : public DebugSection {
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_MIPS
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_MIPS64
UNIMPLEMENTED();
#else
#error Unsupported target architecture.
#endif

View File

@ -48,6 +48,9 @@ namespace internal {
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
#define USE_SIMULATOR 1
#endif
#endif
// Determine whether the architecture uses an out-of-line constant pool.

View File

@ -39,6 +39,10 @@
#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#endif
#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h"
#include "src/mips64/regexp-macro-assembler-mips64.h"
#endif
namespace v8 {
namespace internal {

View File

@ -19,6 +19,8 @@
#include "src/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else

View File

@ -52,6 +52,8 @@
#include "src/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-codegen-x87.h" // NOLINT
#else

View File

@ -1836,7 +1836,8 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
Simulator::Initialize(this);
#endif
#endif

View File

@ -81,7 +81,8 @@ class Debugger;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
class Redirection;
class Simulator;
#endif
@ -293,7 +294,8 @@ class ThreadLocalTop BASE_EMBEDDED {
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
V8_TARGET_ARCH_MIPS64 && !defined(__mips__)
#define ISOLATE_INIT_SIMULATOR_LIST(V) \
V(bool, simulator_initialized, false) \
@ -392,7 +394,8 @@ class Isolate {
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
simulator_(NULL),
#endif
next_(NULL),
@ -406,7 +409,8 @@ class Isolate {
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
FIELD_ACCESSOR(Simulator*, simulator)
#endif
@ -422,7 +426,8 @@ class Isolate {
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
Simulator* simulator_;
#endif

View File

@ -32,6 +32,8 @@
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT
#else
@ -6069,6 +6071,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#elif V8_TARGET_ARCH_MIPS64
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#elif V8_TARGET_ARCH_X87
RegExpMacroAssemblerX87 macro_assembler(mode, (data->capture_count + 1) * 2,
zone);

View File

@ -17,6 +17,8 @@
#include "src/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else

View File

@ -18,6 +18,8 @@
#include "src/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else

View File

@ -21,6 +21,9 @@
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#include "src/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-mips64.h" // NOLINT
#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#include "src/x87/lithium-codegen-x87.h" // NOLINT

View File

@ -23,6 +23,9 @@
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/lithium-arm64.h" // NOLINT
#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-mips64.h" // NOLINT
#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#include "src/x87/lithium-codegen-x87.h" // NOLINT

View File

@ -71,6 +71,13 @@ const int kInvalidProtoDepth = -1;
#include "src/mips/assembler-mips-inl.h"
#include "src/code.h" // must be after assembler_*.h
#include "src/mips/macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h"
#include "src/assembler.h"
#include "src/mips64/assembler-mips64.h" // NOLINT
#include "src/mips64/assembler-mips64-inl.h"
#include "src/code.h" // must be after assembler_*.h
#include "src/mips64/macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_X87
#include "src/assembler.h"
#include "src/x87/assembler-x87.h"

5
src/mips64/OWNERS Normal file
View File

@ -0,0 +1,5 @@
plind44@gmail.com
gergely@homejinni.com
palfia@homejinni.com
kilvadyb@homejinni.com
Dusan.Milosavljevic@rt-rk.com

View File

@ -0,0 +1,453 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
#include "src/mips64/assembler-mips64.h"
#include "src/assembler.h"
#include "src/debug.h"
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
Operand::Operand(int64_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm64_ = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm64_ = reinterpret_cast<int64_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm64_ = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
Operand::Operand(Register rm) {
rm_ = rm;
}
bool Operand::is_reg() const {
return rm_.is_valid();
}
int Register::NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
int DoubleRegister::NumRegisters() {
return FPURegister::kMaxNumRegisters;
}
int DoubleRegister::NumAllocatableRegisters() {
return FPURegister::kMaxNumAllocatableRegisters;
}
int FPURegister::ToAllocationIndex(FPURegister reg) {
ASSERT(reg.code() % 2 == 0);
ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
ASSERT(reg.is_valid());
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kLithiumScratchDouble));
return (reg.code() / 2);
}
// -----------------------------------------------------------------------------
// RelocInfo.
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (IsInternalReference(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
int count = Assembler::RelocateInternalReference(p, delta);
CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
}
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT ||
rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instruction like LUI/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written. In this case the target_address_address function should
// return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
// return reinterpret_cast<Address>(
// pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
return reinterpret_cast<Address>(
pc_ + Assembler::kInstructionsFor64BitConstant * Assembler::kInstrSize);
}
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
return NULL;
}
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
Assembler::set_target_address_at(pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
}
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
ASSERT(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
ASSERT(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
Handle<Cell> RelocInfo::target_cell_handle() {
ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
Cell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
void RelocInfo::set_target_cell(Cell* cell,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), NULL, cell);
}
}
static const int kNoCodeAgeSequenceLength = 9 * Assembler::kInstrSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on Arm.
return Handle<Object>();
}
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
host_,
stub->instruction_start());
}
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
return Assembler::target_address_at(pc_, host_);
}
void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
Object* RelocInfo::call_object() {
return *call_object_address();
}
Object** RelocInfo::call_object_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 6 * Assembler::kInstrSize);
}
void RelocInfo::set_call_object(Object* target) {
*call_object_address() = target;
}
void RelocInfo::WipeOut() {
ASSERT(IsEmbeddedObject(rmode_) ||
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
Assembler::set_target_address_at(pc_, host_, NULL);
}
bool RelocInfo::IsPatchedReturnSequence() {
Instr instr0 = Assembler::instr_at(pc_); // lui.
Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize); // ori.
Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize); // dsll.
Instr instr3 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize); // ori.
Instr instr4 = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize); // jalr.
bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
(instr1 & kOpcodeMask) == ORI &&
(instr2 & kFunctionFieldMask) == DSLL &&
(instr3 & kOpcodeMask) == ORI &&
(instr4 & kFunctionFieldMask) == JALR);
return patched_return;
}
bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
Instr current_instr = Assembler::instr_at(pc_);
return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
}
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
// -----------------------------------------------------------------------------
// Assembler.
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
void Assembler::CheckTrampolinePoolQuick() {
if (pc_offset() >= next_buffer_check_) {
CheckTrampolinePool();
}
}
void Assembler::emit(Instr x) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
CheckTrampolinePoolQuick();
}
void Assembler::emit(uint64_t x) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
*reinterpret_cast<uint64_t*>(pc_) = x;
pc_ += kInstrSize * 2;
CheckTrampolinePoolQuick();
}
} } // namespace v8::internal
#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,450 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_CODE_STUBS_ARM_H_
#define V8_MIPS_CODE_STUBS_ARM_H_
#include "src/ic-inl.h"
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
: PlatformCodeStub(isolate), save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() const { return StoreBufferOverflow; }
int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class StringHelper : public AllStatic {
public:
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
String::Encoding encoding);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class SubStringStub: public PlatformCodeStub {
public:
explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() const { return SubString; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
class StoreRegistersStateStub: public PlatformCodeStub {
public:
explicit StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
: PlatformCodeStub(isolate), save_doubles_(with_fp) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
Major MajorKey() const { return StoreRegistersState; }
int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
SaveFPRegsMode save_doubles_;
void Generate(MacroAssembler* masm);
};
class RestoreRegistersStateStub: public PlatformCodeStub {
public:
explicit RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
: PlatformCodeStub(isolate), save_doubles_(with_fp) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
Major MajorKey() const { return RestoreRegistersState; }
int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
SaveFPRegsMode save_doubles_;
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public PlatformCodeStub {
public:
explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compare two flat ASCII strings and returns result in v0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
// Compares two flat ASCII strings for equality and returns result
// in v0.
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
virtual Major MajorKey() const { return StringCompare; }
virtual int MinorKey() const { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* chars_not_equal);
};
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Isolate* isolate,
Register the_int,
Register the_heap_number,
Register scratch,
Register scratch2)
: PlatformCodeStub(isolate),
the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch),
sign_(scratch2) {
ASSERT(IntRegisterBits::is_valid(the_int_.code()));
ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
ASSERT(SignRegisterBits::is_valid(sign_.code()));
}
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
Register the_int_;
Register the_heap_number_;
Register scratch_;
Register sign_;
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
class SignRegisterBits: public BitField<int, 12, 4> {};
Major MajorKey() const { return WriteInt32ToHeapNumber; }
int MinorKey() const {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
| ScratchRegisterBits::encode(scratch_.code())
| SignRegisterBits::encode(sign_.code());
}
void Generate(MacroAssembler* masm);
};
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Isolate* isolate,
Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
const unsigned offset = masm->instr_at(pos) & kImm16Mask;
masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
(zero_reg.code() << kRtShift) | (offset & kImm16Mask));
ASSERT(Assembler::IsBne(masm->instr_at(pos)));
}
static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
const unsigned offset = masm->instr_at(pos) & kImm16Mask;
masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
(zero_reg.code() << kRtShift) | (offset & kImm16Mask));
ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
}
static Mode GetMode(Code* stub) {
Instr first_instruction = Assembler::instr_at(stub->instruction_start());
Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
2 * Assembler::kInstrSize);
if (Assembler::IsBeq(first_instruction)) {
return INCREMENTAL;
}
ASSERT(Assembler::IsBne(first_instruction));
if (Assembler::IsBeq(second_instruction)) {
return INCREMENTAL_COMPACTION;
}
ASSERT(Assembler::IsBne(second_instruction));
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
MacroAssembler masm(NULL,
stub->instruction_start(),
stub->instruction_size());
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchIntoNop(&masm, 0);
PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 0);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
break;
}
ASSERT(GetMode(stub) == mode);
CpuFeatures::FlushICache(stub->instruction_start(),
4 * Assembler::kInstrSize);
}
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
// the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
: object_(object),
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
}
void Restore(MacroAssembler* masm) {
masm->pop(scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The scratch registers
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
masm->MultiPushFPU(kCallerSavedFPU);
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
masm->MultiPopFPU(kCallerSavedFPU);
}
masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() const { return RecordWrite; }
int MinorKey() const {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
RememberedSetActionBits::encode(remembered_set_action_) |
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 5> {};
class ValueBits: public BitField<int, 5, 5> {};
class AddressBits: public BitField<int, 10, 5> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
class DirectCEntryStub: public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() const { return DirectCEntry; }
int MinorKey() const { return 0; }
bool NeedsImmovableCode() { return true; }
};
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate), mode_(mode) { }
void Generate(MacroAssembler* masm);
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register r0,
Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() const { return NameDictionaryLookup; }
int MinorKey() const { return LookupModeBits::encode(mode_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
LookupMode mode_;
};
} } // namespace v8::internal
#endif // V8_MIPS_CODE_STUBS_ARM_H_

1095
src/mips64/codegen-mips64.cc Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,54 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
#include "src/ast.h"
#include "src/ic-inl.h"
namespace v8 {
namespace internal {
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
// Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_

View File

@ -0,0 +1,358 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg",
"at",
"v0", "v1",
"a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"t8", "t9",
"k0", "k1",
"gp",
"sp",
"fp",
"ra",
"LO", "HI",
"pc"
};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"},
{23, "cp"},
{30, "s8"},
{30, "s8_fp"},
{kInvalidRegister, NULL}
};
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
result = names_[reg];
} else {
result = "noreg";
}
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].reg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].reg;
}
i++;
}
// No register with the reguested name found.
return kInvalidRegister;
}
const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, NULL}
};
const char* FPURegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidFPURegister;
}
// -----------------------------------------------------------------------------
// Instructions.
bool Instruction::IsForbiddenInBranchDelay() const {
const int op = OpcodeFieldRaw();
switch (op) {
case J:
case JAL:
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
case BEQL:
case BNEL:
case BLEZL:
case BGTZL:
return true;
case REGIMM:
switch (RtFieldRaw()) {
case BLTZ:
case BGEZ:
case BLTZAL:
case BGEZAL:
return true;
default:
return false;
}
break;
case SPECIAL:
switch (FunctionFieldRaw()) {
case JR:
case JALR:
return true;
default:
return false;
}
break;
default:
return false;
}
}
bool Instruction::IsLinkingInstruction() const {
const int op = OpcodeFieldRaw();
switch (op) {
case JAL:
return true;
case REGIMM:
switch (RtFieldRaw()) {
case BGEZAL:
case BLTZAL:
return true;
default:
return false;
}
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
return true;
default:
return false;
}
default:
return false;
}
}
bool Instruction::IsTrap() const {
if (OpcodeFieldRaw() != SPECIAL) {
return false;
} else {
switch (FunctionFieldRaw()) {
case BREAK:
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE:
return true;
default:
return false;
}
}
}
Instruction::Type Instruction::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
switch (FunctionFieldRaw()) {
case JR:
case JALR:
case BREAK:
case SLL:
case DSLL:
case DSLL32:
case SRL:
case DSRL:
case DSRL32:
case SRA:
case DSRA:
case DSRA32:
case SLLV:
case DSLLV:
case SRLV:
case DSRLV:
case SRAV:
case DSRAV:
case MFHI:
case MFLO:
case MULT:
case DMULT:
case MULTU:
case DMULTU:
case DIV:
case DDIV:
case DIVU:
case DDIVU:
case ADD:
case DADD:
case ADDU:
case DADDU:
case SUB:
case DSUB:
case SUBU:
case DSUBU:
case AND:
case OR:
case XOR:
case NOR:
case SLT:
case SLTU:
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE:
case MOVZ:
case MOVN:
case MOVCI:
return kRegisterType;
default:
return kUnsupported;
}
break;
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
case CLZ:
return kRegisterType;
default:
return kUnsupported;
}
break;
case SPECIAL3:
switch (FunctionFieldRaw()) {
case INS:
case EXT:
return kRegisterType;
default:
return kUnsupported;
}
break;
case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) {
case BC1: // Branch on coprocessor condition.
return kImmediateType;
default:
return kRegisterType;
}
break;
case COP1X:
return kRegisterType;
// 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
case REGIMM:
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
case ADDI:
case DADDI:
case ADDIU:
case DADDIU:
case SLTI:
case SLTIU:
case ANDI:
case ORI:
case XORI:
case LUI:
case BEQL:
case BNEL:
case BLEZL:
case BGTZL:
case LB:
case LH:
case LWL:
case LW:
case LWU:
case LD:
case LBU:
case LHU:
case LWR:
case SB:
case SH:
case SWL:
case SW:
case SD:
case SWR:
case LWC1:
case LDC1:
case SWC1:
case SDC1:
return kImmediateType;
// 26 bits immediate type instructions. e.g.: j imm26.
case J:
case JAL:
return kJumpType;
default:
return kUnsupported;
}
return kUnsupported;
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS64

View File

@ -0,0 +1,874 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
#else
#define UNIMPLEMENTED_MIPS()
#endif
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants {
kMips32r2,
kMips32r1,
kLoongson,
kMips64r2
};
#ifdef _MIPS_ARCH_MIPS64R2
static const ArchVariants kArchVariant = kMips64r2;
#elif _MIPS_ARCH_LOONGSON
// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
// which predates (and is a subset of) the mips32r2 and r1 architectures.
static const ArchVariants kArchVariant = kLoongson;
#else
static const ArchVariants kArchVariant = kMips64r1;
#endif
// TODO(plind): consider deriving ABI from compiler flags or build system.
// ABI-dependent definitions are made with #define in simulator-mips64.h,
// so the ABI choice must be available to the pre-processor. However, in all
// other cases, we should use the enum AbiVariants with normal if statements.
#define MIPS_ABI_N64 1
// #define MIPS_ABI_O32 1
// The only supported Abi's are O32, and n64.
enum AbiVariants {
kO32,
kN64 // Use upper case N for 'n64' ABI to conform to style standard.
};
#ifdef MIPS_ABI_N64
static const AbiVariants kMipsAbi = kN64;
#else
static const AbiVariants kMipsAbi = kO32;
#endif
// TODO(plind): consider renaming these ...
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
// This flag is raised when -msoft-float is passed to the compiler.
// Although FPU is a base requirement for v8, soft-float ABI is used
// on soft-float systems with FPU kernel emulation.
const bool IsMipsSoftFloatABI = true;
#else
const bool IsMipsSoftFloatABI = true;
#endif
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
// See: MIPS32 Architecture For Programmers
// Volume II: The MIPS32 Instruction Set
// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
// Number of general purpose registers.
const int kNumRegisters = 32;
const int kInvalidRegister = -1;
// Number of registers with HI, LO, and pc.
const int kNumSimuRegisters = 35;
// In the simulator, the PC register is simulated as the 34th register.
const int kPCRegister = 34;
// Number coprocessor registers.
const int kNumFPURegisters = 32;
const int kInvalidFPURegister = -1;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
const uint64_t kFPU64InvalidResult =
static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
const uint32_t kFCSRUnderflowFlagBit = 3;
const uint32_t kFCSROverflowFlagBit = 4;
const uint32_t kFCSRDivideByZeroFlagBit = 5;
const uint32_t kFCSRInvalidOpFlagBit = 6;
const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
kFCSRUnderflowFlagMask |
kFCSROverflowFlagMask |
kFCSRDivideByZeroFlagMask |
kFCSRInvalidOpFlagMask;
const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
// 'pref' instruction hints
const int32_t kPrefHintLoad = 0;
const int32_t kPrefHintStore = 1;
const int32_t kPrefHintLoadStreamed = 4;
const int32_t kPrefHintStoreStreamed = 5;
const int32_t kPrefHintLoadRetained = 6;
const int32_t kPrefHintStoreRetained = 7;
const int32_t kPrefHintWritebackInvalidate = 25;
const int32_t kPrefHintPrepareForStore = 30;
// Helper functions for converting between register numbers and names.
class Registers {
public:
// Return the name of the register.
static const char* Name(int reg);
// Lookup the register number for the name provided.
static int Number(const char* name);
struct RegisterAlias {
int reg;
const char* name;
};
static const int64_t kMaxValue = 0x7fffffffffffffffl;
static const int64_t kMinValue = 0x8000000000000000l;
private:
static const char* names_[kNumSimuRegisters];
static const RegisterAlias aliases_[];
};
// Helper functions for converting between register numbers and names.
class FPURegisters {
public:
// Return the name of the register.
static const char* Name(int reg);
// Lookup the register number for the name provided.
static int Number(const char* name);
struct RegisterAlias {
int creg;
const char* name;
};
private:
static const char* names_[kNumFPURegisters];
static const RegisterAlias aliases_[];
};
// -----------------------------------------------------------------------------
// Instructions encoding constants.
// On MIPS all instructions are 32 bits.
typedef int32_t Instr;
// Special Software Interrupt codes when used in the presence of the MIPS
// simulator.
enum SoftwareInterruptCodes {
// Transition to C code.
call_rt_redirected = 0xfffff
};
// On MIPS Simulator breakpoints can have different codes:
// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
// the simulator will run through them and print the registers.
// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
// instructions (see Assembler::stop()).
// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
// debugger.
const uint32_t kMaxWatchpointCode = 31;
const uint32_t kMaxStopCode = 127;
STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
// ----- Fields offset and length.
const int kOpcodeShift = 26;
const int kOpcodeBits = 6;
const int kRsShift = 21;
const int kRsBits = 5;
const int kRtShift = 16;
const int kRtBits = 5;
const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
const int kImm16Shift = 0;
const int kImm16Bits = 16;
const int kImm26Shift = 0;
const int kImm26Bits = 26;
const int kImm28Shift = 0;
const int kImm28Bits = 28;
const int kImm32Shift = 0;
const int kImm32Bits = 32;
// In branches and jumps immediate fields point to words, not bytes,
// and are therefore shifted by 2.
const int kImmFieldShift = 2;
const int kFrBits = 5;
const int kFrShift = 21;
const int kFsShift = 11;
const int kFsBits = 5;
const int kFtShift = 16;
const int kFtBits = 5;
const int kFdShift = 6;
const int kFdBits = 5;
const int kFCccShift = 8;
const int kFCccBits = 3;
const int kFBccShift = 18;
const int kFBccBits = 3;
const int kFBtrueShift = 16;
const int kFBtrueBits = 1;
// ----- Miscellaneous useful masks.
// Instruction bit masks.
const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
const int kHiMask = 0xffff << 16;
const int kLoMask = 0xffff;
const int kSignMask = 0x80000000;
const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
const int64_t kHi16MaskOf64 = (int64_t)0xffff << 48;
const int64_t kSe16MaskOf64 = (int64_t)0xffff << 32;
const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
enum Opcode {
SPECIAL = 0 << kOpcodeShift,
REGIMM = 1 << kOpcodeShift,
J = ((0 << 3) + 2) << kOpcodeShift,
JAL = ((0 << 3) + 3) << kOpcodeShift,
BEQ = ((0 << 3) + 4) << kOpcodeShift,
BNE = ((0 << 3) + 5) << kOpcodeShift,
BLEZ = ((0 << 3) + 6) << kOpcodeShift,
BGTZ = ((0 << 3) + 7) << kOpcodeShift,
ADDI = ((1 << 3) + 0) << kOpcodeShift,
ADDIU = ((1 << 3) + 1) << kOpcodeShift,
SLTI = ((1 << 3) + 2) << kOpcodeShift,
SLTIU = ((1 << 3) + 3) << kOpcodeShift,
ANDI = ((1 << 3) + 4) << kOpcodeShift,
ORI = ((1 << 3) + 5) << kOpcodeShift,
XORI = ((1 << 3) + 6) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift,
COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
DADDI = ((3 << 3) + 0) << kOpcodeShift,
DADDIU = ((3 << 3) + 1) << kOpcodeShift,
LDL = ((3 << 3) + 2) << kOpcodeShift,
LDR = ((3 << 3) + 3) << kOpcodeShift,
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
LB = ((4 << 3) + 0) << kOpcodeShift,
LH = ((4 << 3) + 1) << kOpcodeShift,
LWL = ((4 << 3) + 2) << kOpcodeShift,
LW = ((4 << 3) + 3) << kOpcodeShift,
LBU = ((4 << 3) + 4) << kOpcodeShift,
LHU = ((4 << 3) + 5) << kOpcodeShift,
LWR = ((4 << 3) + 6) << kOpcodeShift,
LWU = ((4 << 3) + 7) << kOpcodeShift,
SB = ((5 << 3) + 0) << kOpcodeShift,
SH = ((5 << 3) + 1) << kOpcodeShift,
SWL = ((5 << 3) + 2) << kOpcodeShift,
SW = ((5 << 3) + 3) << kOpcodeShift,
SDL = ((5 << 3) + 4) << kOpcodeShift,
SDR = ((5 << 3) + 5) << kOpcodeShift,
SWR = ((5 << 3) + 6) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LLD = ((6 << 3) + 4) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
LD = ((6 << 3) + 7) << kOpcodeShift,
PREF = ((6 << 3) + 3) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SCD = ((7 << 3) + 4) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift,
SD = ((7 << 3) + 7) << kOpcodeShift,
COP1X = ((1 << 4) + 3) << kOpcodeShift
};
enum SecondaryField {
// SPECIAL Encoding of Function Field.
SLL = ((0 << 3) + 0),
MOVCI = ((0 << 3) + 1),
SRL = ((0 << 3) + 2),
SRA = ((0 << 3) + 3),
SLLV = ((0 << 3) + 4),
SRLV = ((0 << 3) + 6),
SRAV = ((0 << 3) + 7),
JR = ((1 << 3) + 0),
JALR = ((1 << 3) + 1),
MOVZ = ((1 << 3) + 2),
MOVN = ((1 << 3) + 3),
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
MFLO = ((2 << 3) + 2),
DSLLV = ((2 << 3) + 4),
DSRLV = ((2 << 3) + 6),
DSRAV = ((2 << 3) + 7),
MULT = ((3 << 3) + 0),
MULTU = ((3 << 3) + 1),
DIV = ((3 << 3) + 2),
DIVU = ((3 << 3) + 3),
DMULT = ((3 << 3) + 4),
DMULTU = ((3 << 3) + 5),
DDIV = ((3 << 3) + 6),
DDIVU = ((3 << 3) + 7),
ADD = ((4 << 3) + 0),
ADDU = ((4 << 3) + 1),
SUB = ((4 << 3) + 2),
SUBU = ((4 << 3) + 3),
AND = ((4 << 3) + 4),
OR = ((4 << 3) + 5),
XOR = ((4 << 3) + 6),
NOR = ((4 << 3) + 7),
SLT = ((5 << 3) + 2),
SLTU = ((5 << 3) + 3),
DADD = ((5 << 3) + 4),
DADDU = ((5 << 3) + 5),
DSUB = ((5 << 3) + 6),
DSUBU = ((5 << 3) + 7),
TGE = ((6 << 3) + 0),
TGEU = ((6 << 3) + 1),
TLT = ((6 << 3) + 2),
TLTU = ((6 << 3) + 3),
TEQ = ((6 << 3) + 4),
TNE = ((6 << 3) + 6),
DSLL = ((7 << 3) + 0),
DSRL = ((7 << 3) + 2),
DSRA = ((7 << 3) + 3),
DSLL32 = ((7 << 3) + 4),
DSRL32 = ((7 << 3) + 6),
DSRA32 = ((7 << 3) + 7),
// drotr in special4?
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
CLZ = ((4 << 3) + 0),
CLO = ((4 << 3) + 1),
// SPECIAL3 Encoding of Function Field.
EXT = ((0 << 3) + 0),
DEXTM = ((0 << 3) + 1),
DEXTU = ((0 << 3) + 2),
DEXT = ((0 << 3) + 3),
INS = ((0 << 3) + 4),
DINSM = ((0 << 3) + 5),
DINSU = ((0 << 3) + 6),
DINS = ((0 << 3) + 7),
DSBH = ((4 << 3) + 4),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
BGEZ = ((0 << 3) + 1) << 16,
BLTZAL = ((2 << 3) + 0) << 16,
BGEZAL = ((2 << 3) + 1) << 16,
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
DMFC1 = ((0 << 3) + 1) << 21,
CFC1 = ((0 << 3) + 2) << 21,
MFHC1 = ((0 << 3) + 3) << 21,
MTC1 = ((0 << 3) + 4) << 21,
DMTC1 = ((0 << 3) + 5) << 21,
CTC1 = ((0 << 3) + 6) << 21,
MTHC1 = ((0 << 3) + 7) << 21,
BC1 = ((1 << 3) + 0) << 21,
S = ((2 << 3) + 0) << 21,
D = ((2 << 3) + 1) << 21,
W = ((2 << 3) + 4) << 21,
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
ROUND_L_S = ((1 << 3) + 0),
TRUNC_L_S = ((1 << 3) + 1),
CEIL_L_S = ((1 << 3) + 2),
FLOOR_L_S = ((1 << 3) + 3),
ROUND_W_S = ((1 << 3) + 4),
TRUNC_W_S = ((1 << 3) + 5),
CEIL_W_S = ((1 << 3) + 6),
FLOOR_W_S = ((1 << 3) + 7),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
ADD_D = ((0 << 3) + 0),
SUB_D = ((0 << 3) + 1),
MUL_D = ((0 << 3) + 2),
DIV_D = ((0 << 3) + 3),
SQRT_D = ((0 << 3) + 4),
ABS_D = ((0 << 3) + 5),
MOV_D = ((0 << 3) + 6),
NEG_D = ((0 << 3) + 7),
ROUND_L_D = ((1 << 3) + 0),
TRUNC_L_D = ((1 << 3) + 1),
CEIL_L_D = ((1 << 3) + 2),
FLOOR_L_D = ((1 << 3) + 3),
ROUND_W_D = ((1 << 3) + 4),
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
C_F_D = ((6 << 3) + 0),
C_UN_D = ((6 << 3) + 1),
C_EQ_D = ((6 << 3) + 2),
C_UEQ_D = ((6 << 3) + 3),
C_OLT_D = ((6 << 3) + 4),
C_ULT_D = ((6 << 3) + 5),
C_OLE_D = ((6 << 3) + 6),
C_ULE_D = ((6 << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
MADD_D = ((4 << 3) + 1),
NULLSF = 0
};
// ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditionnal branch instructions.
// The 'U' prefix is used to specify unsigned comparisons.
// Oppposite conditions must be paired as odd/even numbers
// because 'NegateCondition' function flips LSB to negate condition.
enum Condition {
// Any value < 0 is considered no_condition.
kNoCondition = -1,
overflow = 0,
no_overflow = 1,
Uless = 2,
Ugreater_equal= 3,
equal = 4,
not_equal = 5,
Uless_equal = 6,
Ugreater = 7,
negative = 8,
positive = 9,
parity_even = 10,
parity_odd = 11,
less = 12,
greater_equal = 13,
less_equal = 14,
greater = 15,
ueq = 16, // Unordered or Equal.
nue = 17, // Not (Unordered or Equal).
cc_always = 18,
// Aliases.
carry = Uless,
not_carry = Ugreater_equal,
zero = equal,
eq = equal,
not_zero = not_equal,
ne = not_equal,
nz = not_equal,
sign = negative,
not_sign = positive,
mi = negative,
pl = positive,
hi = Ugreater,
ls = Uless_equal,
ge = greater_equal,
lt = less,
gt = greater,
le = less_equal,
hs = Ugreater_equal,
lo = Uless,
al = cc_always,
cc_default = kNoCondition
};
// Returns the equivalent of !cc.
// Negation of the default kNoCondition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
inline Condition NegateCondition(Condition cc) {
ASSERT(cc != cc_always);
return static_cast<Condition>(cc ^ 1);
}
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) {
switch (cc) {
case Uless:
return Ugreater;
case Ugreater:
return Uless;
case Ugreater_equal:
return Uless_equal;
case Uless_equal:
return Ugreater_equal;
case less:
return greater;
case greater:
return less;
case greater_equal:
return less_equal;
case less_equal:
return greater_equal;
default:
return cc;
}
}
// ----- Coprocessor conditions.
enum FPUCondition {
kNoFPUCondition = -1,
F = 0, // False.
UN = 1, // Unordered.
EQ = 2, // Equal.
UEQ = 3, // Unordered or Equal.
OLT = 4, // Ordered or Less Than.
ULT = 5, // Unordered or Less Than.
OLE = 6, // Ordered or Less Than or Equal.
ULE = 7 // Unordered or Less Than or Equal.
};
// FPU rounding modes.
enum FPURoundingMode {
RN = 0 << 0, // Round to Nearest.
RZ = 1 << 0, // Round towards zero.
RP = 2 << 0, // Round towards Plus Infinity.
RM = 3 << 0, // Round towards Minus Infinity.
// Aliases.
kRoundToNearest = RN,
kRoundToZero = RZ,
kRoundToPlusInf = RP,
kRoundToMinusInf = RM
};
const uint32_t kFPURoundingModeMask = 3 << 0;
enum CheckForInexactConversion {
kCheckForInexactConversion,
kDontCheckForInexactConversion
};
// -----------------------------------------------------------------------------
// Hints.
// Branch hints are not used on the MIPS. They are defined so that they can
// appear in shared function signatures, but will be ignored in MIPS
// implementations.
enum Hint {
no_hint = 0
};
inline Hint NegateHint(Hint hint) {
return no_hint;
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
// These constants are declared in assembler-mips.cc, as they use named
// registers and other constants.
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
extern const Instr kPopInstruction;
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
extern const Instr kPushInstruction;
// sw(r, MemOperand(sp, 0))
extern const Instr kPushRegPattern;
// lw(r, MemOperand(sp, 0))
extern const Instr kPopRegPattern;
extern const Instr kLwRegFpOffsetPattern;
extern const Instr kSwRegFpOffsetPattern;
extern const Instr kLwRegFpNegOffsetPattern;
extern const Instr kSwRegFpNegOffsetPattern;
// A mask for the Rt register for push, pop, lw, sw instructions.
extern const Instr kRtMask;
extern const Instr kLwSwInstrTypeMask;
extern const Instr kLwSwInstrArgumentMask;
extern const Instr kLwSwOffsetMask;
// Break 0xfffff, reserved for redirected real time call.
const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
const Instr nopInstr = 0;
class Instruction {
public:
enum {
kInstrSize = 4,
kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
// always the value of the current instruction being executed.
kPCReadOffset = 0
};
// Get the raw instruction bits.
inline Instr InstructionBits() const {
return *reinterpret_cast<const Instr*>(this);
}
// Set the raw instruction bits to value.
inline void SetInstructionBits(Instr value) {
*reinterpret_cast<Instr*>(this) = value;
}
// Read one particular bit out of the instruction bits.
inline int Bit(int nr) const {
return (InstructionBits() >> nr) & 1;
}
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
}
// Instruction type.
enum Type {
kRegisterType,
kImmediateType,
kJumpType,
kUnsupported = -1
};
// Get the encoding type of the instruction.
Type InstructionType() const;
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeValue() const {
return static_cast<Opcode>(
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
inline int RsValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRsShift + kRsBits - 1, kRsShift);
}
inline int RtValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRtShift + kRtBits - 1, kRtShift);
}
inline int RdValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kRdShift + kRdBits - 1, kRdShift);
}
inline int SaValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
inline int FunctionValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
inline int FdValue() const {
return Bits(kFdShift + kFdBits - 1, kFdShift);
}
inline int FsValue() const {
return Bits(kFsShift + kFsBits - 1, kFsShift);
}
inline int FtValue() const {
return Bits(kFtShift + kFtBits - 1, kFtShift);
}
inline int FrValue() const {
return Bits(kFrShift + kFrBits -1, kFrShift);
}
// Float Compare condition code instruction bits.
inline int FCccValue() const {
return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
}
// Float Branch condition code instruction bits.
inline int FBccValue() const {
return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
}
// Float Branch true/false instruction bit.
inline int FBtrueValue() const {
return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
}
// Return the fields at their original place in the instruction encoding.
inline Opcode OpcodeFieldRaw() const {
return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
}
inline int RsFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return InstructionBits() & kRsFieldMask;
}
// Same as above function, but safe to call within InstructionType().
inline int RsFieldRawNoAssert() const {
return InstructionBits() & kRsFieldMask;
}
inline int RtFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return InstructionBits() & kRtFieldMask;
}
inline int RdFieldRaw() const {
ASSERT(InstructionType() == kRegisterType);
return InstructionBits() & kRdFieldMask;
}
inline int SaFieldRaw() const {
ASSERT(InstructionType() == kRegisterType);
return InstructionBits() & kSaFieldMask;
}
inline int FunctionFieldRaw() const {
return InstructionBits() & kFunctionFieldMask;
}
// Get the secondary field according to the opcode.
inline int SecondaryValue() const {
Opcode op = OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
return FunctionValue();
case COP1:
return RsValue();
case REGIMM:
return RtValue();
default:
return NULLSF;
}
}
inline int32_t Imm16Value() const {
ASSERT(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm26Value() const {
ASSERT(InstructionType() == kJumpType);
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
// Say if the instruction should not be used in a branch delay slot.
bool IsForbiddenInBranchDelay() const;
// Say if the instruction 'links'. e.g. jal, bal.
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
bool IsTrap() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instruction.
static Instruction* At(byte* pc) {
return reinterpret_cast<Instruction*>(pc);
}
private:
// We need to prevent the creation of instances of class Instruction.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
// C/C++ argument slots size.
const int kCArgSlotCount = (kMipsAbi == kN64) ? 0 : 4;
// TODO(plind): below should be based on kPointerSize
// TODO(plind): find all usages and remove the needless instructions for n64.
const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_

59
src/mips64/cpu-mips64.cc Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
#include <sys/syscall.h>
#include <unistd.h>
#ifdef __mips
#include <asm/cachectl.h>
#endif // #ifdef __mips
#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS64
#include "src/assembler.h"
#include "src/macro-assembler.h"
#include "src/simulator.h" // For cache flushing.
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
}
#if !defined (USE_SIMULATOR)
#if defined(ANDROID) && !defined(__LP64__)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char *end = reinterpret_cast<char *>(start) + size;
cacheflush(
reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
#else // ANDROID
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
#endif // ANDROID
#else // USE_SIMULATOR.
// Not generating mips instructions for C-code. This means that we are
// building a mips emulator based target. We should notify the simulator
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
#endif // USE_SIMULATOR.
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS64

335
src/mips64/debug-mips64.cc Normal file
View File

@ -0,0 +1,335 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
#include "src/debug.h"
namespace v8 {
namespace internal {
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
// Mips return sequence:
// mov sp, fp
// lw fp, sp(0)
// lw ra, sp(4)
// addiu sp, sp, 8
// addiu sp, sp, N
// jr ra
// nop (in branch delay slot)
// Make sure this constant matches the number if instructions we emit.
ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
// li and Call pseudo-instructions emit 6 + 2 instructions.
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int64_t>(
debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())),
ADDRESS_LOAD);
patcher.masm()->Call(v8::internal::t9);
// Place nop to match return sequence size.
patcher.masm()->nop();
// TODO(mips): Open issue about using breakpoint instruction instead of nops.
// patcher.masm()->bkpt(0);
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kJSReturnSequenceInstructions);
}
// A debug break in the exit code is identified by the JS frame exit code
// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
// nop(DEBUG_BREAK_NOP)
// nop(DEBUG_BREAK_NOP)
// nop(DEBUG_BREAK_NOP)
// nop(DEBUG_BREAK_NOP)
// nop(DEBUG_BREAK_NOP)
// to a call to the debug break slot code.
// li t9, address (4-instruction sequence on mips64)
// call t9 (jalr t9 / nop instruction pair)
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->li(v8::internal::t9,
Operand(reinterpret_cast<int64_t>(
debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())),
ADDRESS_LOAD);
patcher.masm()->Call(v8::internal::t9);
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
__ Dsubu(sp, sp,
Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
__ sd(at, MemOperand(sp, kPointerSize * i));
}
__ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
__ push(at);
// TODO(plind): This needs to be revised to store pairs of smi's per
// the other 64-bit arch's.
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & non_object_regs) == 0);
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((object_regs & (1 << r)) != 0) {
__ push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
__ PushRegisterAsTwoSmis(reg);
}
}
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ PrepareCEntryArgs(0); // No arguments.
__ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
__ PopRegisterAsTwoSmis(reg, at);
}
if ((object_regs & (1 << r)) != 0) {
__ pop(reg);
}
if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
__ li(reg, kDebugZapValue);
}
}
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference::debug_after_break_target_address(masm->isolate());
__ li(t9, Operand(after_break_target));
__ ld(t9, MemOperand(t9));
__ Jump(t9);
}
void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
// Register state for CallICStub
// ----------- S t a t e -------------
// -- a1 : function
// -- a3 : slot in feedback array (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0);
}
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
Register receiver = LoadIC::ReceiverRegister();
Register name = LoadIC::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-mips.cc).
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
// -- a2 : name
// -- ra : return address
// -----------------------------------
// Registers a0, a1, and a2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
}
void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for keyed IC load (from ic-arm.cc).
GenerateLoadICDebugBreak(masm);
}
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
// -- a2 : receiver
// -- ra : return address
Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
}
void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- a0 : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, a0.bit(), 0);
}
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that v0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
}
void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a1 : function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
}
void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
// -- a1 : constructor function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
}
void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
// -- a1 : constructor function
// -- a2 : feedback array
// -- a3 : feedback slot (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
}
void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
__ nop(MacroAssembler::DEBUG_BREAK_NOP);
}
ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
masm->InstructionsGeneratedSince(&check_codesize));
}
void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0);
}
void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
__ Ret();
}
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
ExternalReference::debug_restarter_frame_function_pointer_address(
masm->isolate());
__ li(at, Operand(restarter_frame_function_slot));
__ sw(zero_reg, MemOperand(at, 0));
// We do not know our frame height, but set sp based on fp.
__ Dsubu(sp, fp, Operand(kPointerSize));
__ Pop(ra, fp, a1); // Return address, Frame, Function.
// Load context from the function.
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Get function code.
__ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
__ Daddu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
// Re-run JSFunction, a1 is function, cp is context.
__ Jump(t9);
}
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS64

View File

@ -0,0 +1,382 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
int Deoptimizer::patch_size() {
const int kCallInstructionSizeInWords = 6;
return kCallInstructionSizeInWords * Assembler::kInstrSize;
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
if (FLAG_zap_code_space) {
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(pointer, 1);
patcher.masm()->break_(0xCC);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
osr_patcher.masm()->break_(0xCC);
}
}
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
SharedFunctionInfo* shared =
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
for (int i = 0; i < Register::kNumRegisters; i++) {
input_->SetRegister(i, i * 4);
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
}
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(s0.code(), params);
output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
output_frame->SetRegister(s2.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// There is no dynamic alignment padding on MIPS in the input frame.
return false;
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
const int kDoubleRegsSize =
kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
// Save all FPU registers before messing with them.
__ Dsubu(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
// Leave gaps for other registers.
__ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
__ sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
// Get the bailout id from the stack.
__ ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register a4.
__ mov(a3, ra);
// Correct one word for bailout id.
__ Daddu(a4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ Dsubu(a4, fp, a4);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6, a5);
// Pass six arguments, according to O32 or n64 ABI. a0..a3 are same for both.
__ li(a1, Operand(type())); // bailout type,
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
if (kMipsAbi == kN64) {
// a4: already has fp-to-sp delta.
__ li(a5, Operand(ExternalReference::isolate_address(isolate())));
} else { // O32 abi.
// Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
__ sd(a4, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
__ li(a5, Operand(ExternalReference::isolate_address(isolate())));
__ sd(a5, CFunctionArgumentOperand(6)); // Isolate.
}
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register v0 and get the input
// frame descriptor pointer to a1 (deoptimizer->input_);
// Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
__ mov(a0, v0);
__ ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((saved_regs & (1 << i)) != 0) {
__ ld(a2, MemOperand(sp, i * kPointerSize));
__ sd(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
__ li(a2, kDebugZapValue);
__ sd(a2, MemOperand(a1, offset));
}
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ Daddu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
__ ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
__ Daddu(a2, a2, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(a4);
__ sd(a4, MemOperand(a3, 0));
__ daddiu(a3, a3, sizeof(uint64_t));
__ bind(&pop_loop_header);
__ BranchShort(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
// a0: deoptimizer object; a1: scratch.
__ PrepareCallCFunction(1, a1);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: a4 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ dsll(a1, a1, kPointerSizeLog2); // Count to offset.
__ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**.
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ ld(a2, MemOperand(a4, 0)); // output_[ix]
__ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
__ Daddu(a6, a2, Operand(a3));
__ ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
__ push(a7);
__ bind(&inner_loop_header);
__ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Daddu(a4, a4, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
__ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
// Push state, pc, and continuation from the last output frame.
__ ld(a6, MemOperand(a2, FrameDescription::state_offset()));
__ push(a6);
__ ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
__ push(a6);
__ ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
__ push(a6);
// Technically restoring 'at' should work unless zero_reg is also restored
// but it's safer to check for this.
ASSERT(!(at.bit() & restored_regs));
// Restore the registers from the last output frame.
__ mov(at, a2);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ ld(ToRegister(i), MemOperand(at, offset));
}
}
__ InitializeRootRegister();
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
__ Jump(at);
__ stop("Unreachable.");
}
// Maximum size of a table entry generated below.
const int Deoptimizer::table_entry_size_ = 11 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
Label table_start;
__ bind(&table_start);
for (int i = 0; i < count(); i++) {
Label start;
__ bind(&start);
__ daddiu(sp, sp, -1 * kPointerSize);
// Jump over the remaining deopt entries (including this one).
// This code is always reached by calling Jump, which puts the target (label
// start) into t9.
const int remaining_entries = (count() - i) * table_entry_size_;
__ Daddu(t9, t9, remaining_entries);
// 'at' was clobbered so we can only load the current entry value here.
__ li(t8, i);
__ jr(t9); // Expose delay slot.
__ sd(t8, MemOperand(sp, 0 * kPointerSize)); // In the delay slot.
// Pad the rest of the code.
while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
__ nop();
}
ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No out-of-line constant pool support.
UNREACHABLE();
}
#undef __
} } // namespace v8::internal

1152
src/mips64/disasm-mips64.cc Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS64
#include "src/assembler.h"
#include "src/frames.h"
#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/assembler-mips64.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
Object*& ExitFrame::constant_pool_slot() const {
UNREACHABLE();
return Memory::Object_at(NULL);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS64

215
src/mips64/frames-mips64.h Normal file
View File

@ -0,0 +1,215 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_FRAMES_MIPS_H_
#define V8_MIPS_FRAMES_MIPS_H_
namespace v8 {
namespace internal {
// Register lists.
// Note that the bit values must match those used in actual instruction
// encoding.
const int kNumRegs = 32;
const RegList kJSCallerSaved =
1 << 2 | // v0
1 << 3 | // v1
1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7 | // a3
1 << 8 | // a4
1 << 9 | // a5
1 << 10 | // a6
1 << 11 | // a7
1 << 12 | // t0
1 << 13 | // t1
1 << 14 | // t2
1 << 15; // t3
const int kNumJSCallerSaved = 14;
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript.
const RegList kCalleeSaved =
1 << 16 | // s0
1 << 17 | // s1
1 << 18 | // s2
1 << 19 | // s3
1 << 20 | // s4
1 << 21 | // s5
1 << 22 | // s6 (roots in Javascript code)
1 << 23 | // s7 (cp in Javascript code)
1 << 30; // fp/s8
const int kNumCalleeSaved = 9;
const RegList kCalleeSavedFPU =
1 << 20 | // f20
1 << 22 | // f22
1 << 24 | // f24
1 << 26 | // f26
1 << 28 | // f28
1 << 30; // f30
const int kNumCalleeSavedFPU = 6;
const RegList kCallerSavedFPU =
1 << 0 | // f0
1 << 2 | // f2
1 << 4 | // f4
1 << 6 | // f6
1 << 8 | // f8
1 << 10 | // f10
1 << 12 | // f12
1 << 14 | // f14
1 << 16 | // f16
1 << 18; // f18
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
const int kNumSafepointRegisters = 24;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved;
const int kUndefIndex = -1;
// Map with indexes on stack that corresponds to codes of saved registers.
const int kSafepointRegisterStackIndexMap[kNumRegs] = {
kUndefIndex, // zero_reg
kUndefIndex, // at
0, // v0
1, // v1
2, // a0
3, // a1
4, // a2
5, // a3
6, // a4
7, // a5
8, // a6
9, // a7
10, // t0
11, // t1
12, // t2
13, // t3
14, // s0
15, // s1
16, // s2
17, // s3
18, // s4
19, // s5
20, // s6
21, // s7
kUndefIndex, // t8
kUndefIndex, // t9
kUndefIndex, // k0
kUndefIndex, // k1
kUndefIndex, // gp
kUndefIndex, // sp
22, // fp
kUndefIndex
};
// ----------------------------------------------------
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = 2 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
static const int kCallerPCOffset = +1 * kPointerSize;
// MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
static const int kCallerSPOffset = +2 * kPointerSize;
// FP-relative displacement of the caller's SP.
static const int kCallerSPDisplacement = +2 * kPointerSize;
static const int kConstantPoolOffset = 0; // Not used.
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kLengthOffset = -4 * kPointerSize;
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InternalFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
inline void StackHandler::SetFp(Address slot, Address fp) {
Memory::Address_at(slot) = fp;
}
} } // namespace v8::internal
#endif

File diff suppressed because it is too large Load Diff

1337
src/mips64/ic-mips64.cc Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,486 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#include "src/deoptimizer.h"
#include "src/lithium-codegen.h"
#include "src/mips64/lithium-gap-resolver-mips64.h"
#include "src/mips64/lithium-mips64.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub() ||
info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
RAStatus GetRAState() const {
return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
}
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
int32_t ToRepresentation_donotuse(LConstantOperand* op,
const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagIU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2,
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
MemOperand PrepareKeyedOperand(Register key,
Register base,
bool key_is_constant,
int constant_key,
int element_size,
int shift_size,
int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
Register scratch0() { return kLithiumScratchReg; }
Register scratch1() { return kLithiumScratchReg2; }
DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
Label* if_false,
Handle<String> class_name,
Register input,
Register temporary,
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
enum A1State {
A1_UNINITIALIZED,
A1_CONTAINS_TARGET
};
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
A1State a1_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition,
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void DeoptimizeIf(Condition condition,
LEnvironment* environment,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
MemOperand BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding);
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr,
Condition condition,
Register src1,
const Operand& src2);
template<class InstrType>
void EmitBranchF(InstrType instr,
Condition condition,
FPURegister src1,
FPURegister src2);
template<class InstrType>
void EmitFalseBranch(InstrType instr,
Condition condition,
Register src1,
const Operand& src2);
template<class InstrType>
void EmitFalseBranchF(InstrType instr,
Condition condition,
FPURegister src1,
FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
// Returns two registers in cmp1 and cmp2 that can be used in the
// Branch instruction after EmitTypeofIs.
Condition EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name,
Register* cmp1,
Operand* cmp2);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object,
Label* is_object);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
Label* is_not_string,
SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset,
AllocationSiteMode mode);
// Emit optimized code for integer division.
// Inputs are signed.
// All registers are clobbered.
// If 'remainder' is no_reg, it is not computed.
void EmitSignedIntegerDivisionByConstant(Register result,
Register dividend,
int32_t divisor,
Register remainder,
Register scratch,
LEnvironment* environment);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
switch (codegen_->expected_safepoint_kind_) {
case Safepoint::kWithRegisters: {
StoreRegistersStateStub stub1(codegen_->masm_->isolate(),
kDontSaveFPRegs);
codegen_->masm_->push(ra);
codegen_->masm_->CallStub(&stub1);
break;
}
case Safepoint::kWithRegistersAndDoubles: {
StoreRegistersStateStub stub2(codegen_->masm_->isolate(),
kSaveFPRegs);
codegen_->masm_->push(ra);
codegen_->masm_->CallStub(&stub2);
break;
}
default:
UNREACHABLE();
}
}
~PushSafepointRegistersScope() {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
case Safepoint::kWithRegisters: {
RestoreRegistersStateStub stub1(codegen_->masm_->isolate(),
kDontSaveFPRegs);
codegen_->masm_->push(ra);
codegen_->masm_->CallStub(&stub1);
break;
}
case Safepoint::kWithRegistersAndDoubles: {
RestoreRegistersStateStub stub2(codegen_->masm_->isolate(),
kSaveFPRegs);
codegen_->masm_->push(ra);
codegen_->masm_->CallStub(&stub2);
break;
}
default:
UNREACHABLE();
}
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} } // namespace v8::internal
#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_

View File

@ -0,0 +1,300 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/mips64/lithium-codegen-mips64.h"
#include "src/mips64/lithium-gap-resolver-mips64.h"
namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
moves_(32, owner->zone()),
root_index_(0),
in_cycle_(false),
saved_destination_(NULL) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when by reaching this move again.
PerformMove(i);
if (in_cycle_) {
RestoreValue();
}
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
ASSERT(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
// We can only find a cycle, when doing a depth-first traversal of moves,
// be encountering the starting move again. So by spilling the source of
// the starting move, we break the cycle. All moves are then unblocked,
// and the starting move is completed by writing the spilled value to
// its destination. All other moves from the spilled source have been
// completed prior to breaking the cycle.
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
ASSERT(!moves_[index].IsPending());
ASSERT(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) {
// We save in a register the value that should end up in the source of
// moves_[root_index]. After performing all moves in the tree rooted
// in that move, we save the value to that source.
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
__ mov(kLithiumScratchReg, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// This move will be done by restoring the saved value to the destination.
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
// Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
} else if (saved_destination_->IsStackSlot()) {
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
ASSERT(destination->IsStackSlot());
__ sd(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ ld(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
// This uses only a single reg of the double reg-pair.
__ ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand);
} else {
__ ld(at, source_operand);
__ sd(at, destination_operand);
}
} else {
__ ld(kLithiumScratchReg, source_operand);
__ sd(kLithiumScratchReg, destination_operand);
}
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsSmi(constant_source)) {
__ li(dst, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32(constant_source)) {
__ li(dst, Operand(cgen_->ToInteger32(constant_source)));
} else {
__ li(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ Move(result, v);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsSmi(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else {
__ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
}
} else if (source->IsDoubleRegister()) {
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
} else {
ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ sdc1(source_register, destination_operand);
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
} else {
ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kLithiumScratchDouble was used to break the cycle,
// but kLithiumScratchReg is free.
MemOperand source_high_operand =
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
__ lw(kLithiumScratchReg, source_operand);
__ sw(kLithiumScratchReg, destination_operand);
__ lw(kLithiumScratchReg, source_high_operand);
__ sw(kLithiumScratchReg, destination_high_operand);
} else {
__ ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand);
}
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
#undef __
} } // namespace v8::internal

View File

@ -0,0 +1,60 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#include "src/v8.h"
#include "src/lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
};
} } // namespace v8::internal
#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_

2544
src/mips64/lithium-mips64.cc Normal file

File diff suppressed because it is too large Load Diff

2821
src/mips64/lithium-mips64.h Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,269 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#include "src/macro-assembler.h"
#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/assembler-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
namespace v8 {
namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerMIPS();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(uint32_t c, Label* on_equal);
virtual void CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
void print_regexp_frame_constants();
private:
#if defined(MIPS_ABI_N64)
// Offsets from frame_pointer() of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
// Registers s0 to s7, fp, and ra.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack frame header.
static const int kStackFrameHeader = kSecondaryReturnAddress;
// Stack parameters placed by caller.
static const int kIsolate = kStackFrameHeader + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kDirectCall = kFramePointer - kPointerSize;
static const int kStackHighEnd = kDirectCall - kPointerSize;
static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
static const int kInputEnd = kRegisterOutput - kPointerSize;
static const int kInputStart = kInputEnd - kPointerSize;
static const int kStartIndex = kInputStart - kPointerSize;
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
#elif defined(MIPS_ABI_O32)
// Offsets from frame_pointer() of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
// Registers s0 to s7, fp, and ra.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput =
kStackFrameHeader + 4 * kPointerSize + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kInputEnd = kFramePointer - kPointerSize;
static const int kInputStart = kInputEnd - kPointerSize;
static const int kStartIndex = kInputStart - kPointerSize;
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
#else
# error "undefined MIPS ABI"
#endif
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
// The ebp-relative location of a regexp register.
MemOperand register_location(int register_index);
// Register holding the current input position as negative offset from
// the end of the string.
inline Register current_input_offset() { return a6; }
// The register containing the current character after LoadCurrentCharacter.
inline Register current_character() { return a7; }
// Register holding address of the end of the input string.
inline Register end_of_input_address() { return t2; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
inline Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
inline Register backtrack_stackpointer() { return t0; }
// Register holding pointer to the current code object.
inline Register code_pointer() { return a5; }
// Byte size of chars in the string to match (decided by the Mode argument).
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Label* to,
Condition condition,
Register rs,
const Operand& rt);
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to,
Condition cond,
Register rs,
const Operand& rt);
inline void SafeReturn();
inline void SafeCallTarget(Label* name);
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer by a word size and stores the register's value there.
inline void Push(Register source);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// and increments it by a word size.
inline void Pop(Register target);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).
Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
int num_saved_registers_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
};
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,479 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Declares a Simulator for MIPS instructions if we are not generating a native
// MIPS binary. This Simulator allows us to run and debug MIPS code generation
// on regular desktop machines.
// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
#ifndef V8_MIPS_SIMULATOR_MIPS_H_
#define V8_MIPS_SIMULATOR_MIPS_H_
#include "src/allocation.h"
#include "src/mips64/constants-mips64.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native mips platform.
namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4)
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth (or ninth) argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#ifdef MIPS_ABI_N64
typedef int (*mips_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<mips_regexp_matcher>(entry)( \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#else // O32 Abi.
typedef int (*mips_regexp_matcher)(String* input,
int32_t start_offset,
const byte* input_start,
const byte* input_end,
void* return_address,
int* output,
int32_t output_size,
Address stack_base,
int32_t direct_call,
Isolate* isolate);
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<mips_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#endif // MIPS_ABI_N64
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
uintptr_t c_limit) {
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
return try_catch_address;
}
static inline void UnregisterCTryCatch() { }
};
} } // namespace v8::internal
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
// calculates the stack limit based on that value.
// NOTE: The check for overflow is not safe as there is no guarantee that the
// running thread has its stack in all memory up to address 0x00000000.
#define GENERATED_CODE_STACK_LIMIT(limit) \
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/hashmap.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Utility functions
class CachePage {
public:
static const int LINE_VALID = 0;
static const int LINE_INVALID = 1;
static const int kPageShift = 12;
static const int kPageSize = 1 << kPageShift;
static const int kPageMask = kPageSize - 1;
static const int kLineShift = 2; // The cache line is only 4 bytes right now.
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
CachePage() {
memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
}
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
char* CachedData(int offset) {
return &data_[offset];
}
private:
char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
class Simulator {
public:
friend class MipsDebugger;
// Registers are declared in order. See SMRL chapter 2.
enum Register {
no_reg = -1,
zero_reg = 0,
at,
v0, v1,
a0, a1, a2, a3, a4, a5, a6, a7,
t0, t1, t2, t3,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9,
k0, k1,
gp,
sp,
s8,
ra,
// LO, HI, and pc.
LO,
HI,
pc, // pc must be the last register.
kNumSimuRegisters,
// aliases
fp = s8
};
// Coprocessor registers.
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
f26, f27, f28, f29, f30, f31,
kNumFPURegisters
};
explicit Simulator(Isolate* isolate);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int64_t value);
void set_register_word(int reg, int32_t value);
void set_dw_register(int dreg, const int* dbl);
int64_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int64_t value);
void set_fpu_register_word(int fpureg, int32_t value);
void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
int64_t get_fpu_register(int fpureg) const;
int32_t get_fpu_register_word(int fpureg) const;
int32_t get_fpu_register_signed_word(int fpureg) const;
uint32_t get_fpu_register_hi_word(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
void set_fcsr_bit(uint32_t cc, bool value);
bool test_fcsr_bit(uint32_t cc);
bool set_fcsr_round_error(double original, double rounded);
bool set_fcsr_round64_error(double original, double rounded);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
int64_t get_pc() const;
Address get_sp() {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
// Call on program start.
static void Initialize(Isolate* isolate);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int64_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
// Debugger input.
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_ra, end_sim_pc).
bool has_bad_pc() const;
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
// without being properly setup.
bad_ra = -1,
// A pc value used to signal the simulator to stop execution. Generally
// the ra is set to this value on transition from native C code to
// simulated execution, so that the simulator can "return" to the native
// C code.
end_sim_pc = -2,
// Unpredictable value.
Unpredictable = 0xbadbeaf
};
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
// Read and write memory.
inline uint32_t ReadBU(int64_t addr);
inline int32_t ReadB(int64_t addr);
inline void WriteB(int64_t addr, uint8_t value);
inline void WriteB(int64_t addr, int8_t value);
inline uint16_t ReadHU(int64_t addr, Instruction* instr);
inline int16_t ReadH(int64_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
inline uint32_t ReadWU(int64_t addr, Instruction* instr);
inline int32_t ReadW(int64_t addr, Instruction* instr);
inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
inline int64_t Read2W(int64_t addr, Instruction* instr);
inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
inline double ReadD(int64_t addr, Instruction* instr);
inline void WriteD(int64_t addr, double value, Instruction* instr);
// Helper for debugging memory access.
inline void DieOrDebug();
// Helpers for data value tracing.
enum TraceType {
BYTE,
HALF,
WORD,
DWORD
// DFLOAT - Floats may have printing issues due to paired lwc1's
};
void TraceRegWr(int64_t value);
void TraceMemWr(int64_t addr, int64_t value, TraceType t);
void TraceMemRd(int64_t addr, int64_t value);
// Operations depending on endianness.
// Get Double Higher / Lower word.
inline int32_t GetDoubleHIW(double* addr);
inline int32_t GetDoubleLOW(double* addr);
// Set Double Higher / Lower word.
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
// Helper function for DecodeTypeRegister.
void ConfigureTypeRegister(Instruction* instr,
int64_t* alu_out,
int64_t* i64hilo,
uint64_t* u64hilo,
int64_t* next_pc,
int64_t* return_addr_reg,
bool* do_interrupt,
int64_t* result128H,
int64_t* result128L);
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
bool IsWatchpoint(uint64_t code);
void PrintWatchpoint(uint64_t code);
void HandleStop(uint64_t code, Instruction* instr);
bool IsStopInstruction(Instruction* instr);
bool IsEnabledStop(uint64_t code);
void EnableStop(uint64_t code);
void DisableStop(uint64_t code);
void IncreaseStopCounter(uint64_t code);
void PrintStopInfo(uint64_t code);
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
void BranchDelayInstructionDecode(Instruction* instr) {
if (instr->InstructionBits() == nopInstr) {
// Short-cut generic nop instructions. They are always valid and they
// never change the simulator state.
return;
}
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
instr->OpcodeValue());
}
InstructionDecode(instr);
}
// ICache.
static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
int size);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
enum Exception {
none,
kIntegerOverflow,
kIntegerUnderflow,
kDivideByZero,
kNumExceptions
};
int16_t exceptions[kNumExceptions];
// Exceptions.
void SignalExceptions();
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void CallInternal(byte* entry);
// Architecture state.
// Registers.
int64_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
int64_t FPUregisters_[kNumFPURegisters];
// FPU control register.
uint32_t FCSR_;
// Simulator support.
// Allocate 1MB for stack.
size_t stack_size_;
char* stack_;
bool pc_modified_;
int64_t icount_;
int break_count_;
EmbeddedVector<char, 128> trace_buf_;
// Debugger input.
char* last_debugger_input_;
// Icache simulation.
v8::internal::HashMap* i_cache_;
v8::internal::Isolate* isolate_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
// Stop is disabled if bit 31 is set.
static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the
// instruction, if bit 31 of watched_stops_[code].count is unset.
// The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
};
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#ifdef MIPS_ABI_N64
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
#else // Must be O32 Abi.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#endif // MIPS_ABI_N64
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. Setting the c_limit to indicate a very small
// stack cause stack overflow errors, since the simulator ignores the input.
// This is unlikely to be an issue in practice, though it might cause testing
// trouble down the line.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
Simulator::current(Isolate::Current())->PopAddress();
}
};
} } // namespace v8::internal
#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_

File diff suppressed because it is too large Load Diff

View File

@ -9012,8 +9012,8 @@ static inline bool CompareRawStringContents(const Char* const a,
// then we have to check that the strings are aligned before
// comparing them blockwise.
const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT
uint32_t pa_addr = reinterpret_cast<uint32_t>(a);
uint32_t pb_addr = reinterpret_cast<uint32_t>(b);
uintptr_t pa_addr = reinterpret_cast<uintptr_t>(a);
uintptr_t pb_addr = reinterpret_cast<uintptr_t>(b);
if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
#endif
const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT

View File

@ -25,6 +25,8 @@
#include "src/arm64/constants-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h" // NOLINT
#endif

View File

@ -256,6 +256,12 @@ class SimulatorHelper {
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#elif V8_TARGET_ARCH_MIPS64
state->pc = reinterpret_cast<Address>(simulator_->get_pc());
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#endif
}
@ -393,6 +399,10 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#elif V8_HOST_ARCH_MIPS64
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_MACOSX
#if V8_HOST_ARCH_X64

View File

@ -1108,7 +1108,8 @@ void Deserializer::ReadChunk(Object** current,
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
defined(V8_TARGET_ARCH_MIPS64)
// Deserialize a new object from pointer found in code and write
// a pointer to it to the current object. Required only for MIPS or ARM
// with ool constant pool, and omitted on the other architectures because
@ -1326,7 +1327,8 @@ int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
if (!root->IsSmi() && root == heap_object) {
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
defined(V8_TARGET_ARCH_MIPS64)
if (from == kFromCode) {
// In order to avoid code bloat in the deserializer we don't have
// support for the encoding that specifies a particular root should

View File

@ -15,6 +15,8 @@
#include "src/arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/simulator-x87.h"
#else

View File

@ -179,6 +179,15 @@
'test-macro-assembler-mips.cc'
],
}],
['v8_target_arch=="mips64el"', {
'sources': [
'test-assembler-mips64.cc',
'test-code-stubs.cc',
'test-code-stubs-mips64.cc',
'test-disasm-mips64.cc',
'test-macro-assembler-mips64.cc'
],
}],
['v8_target_arch=="x87"', {
'sources': [ ### gcmole(arch:x87) ###
'test-assembler-x87.cc',

View File

@ -205,6 +205,22 @@
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
}], # 'arch == mipsel or arch == mips'
##############################################################################
['arch == mips64el', {
# BUG(2657): Test sometimes times out on MIPS simulator.
'test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate': [PASS, TIMEOUT],
# BUG(v8:3154).
'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
# BUG(1075): Unresolved crashes on MIPS also.
'test-serialize/Deserialize': [SKIP],
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
}], # 'arch == mips64el'
##############################################################################
['arch == x87', {

View File

@ -18125,7 +18125,8 @@ THREADED_TEST(QuietSignalingNaNs) {
} else {
uint64_t stored_bits = DoubleToBits(stored_number);
// Check if quiet nan (bits 51..62 all set).
#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
!defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
@ -18145,7 +18146,8 @@ THREADED_TEST(QuietSignalingNaNs) {
} else {
uint64_t stored_bits = DoubleToBits(stored_date);
// Check if quiet nan (bits 51..62 all set).
#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
!defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,188 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Rrdistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Rrdistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Rrdistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "src/v8.h"
#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/mips64/constants-mips64.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
using namespace v8::internal;
#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register source_reg,
Register destination_reg,
bool inline_fastpath) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
inline_fastpath);
byte* start = stub.GetCode()->instruction_start();
Label done;
// Save callee save registers.
__ MultiPush(kCalleeSaved | ra.bit());
// For softfp, move the input value into f12.
if (IsMipsSoftFloatABI) {
__ Move(f12, a0, a1);
}
// Push the double argument.
__ Dsubu(sp, sp, Operand(kDoubleSize));
__ sdc1(f12, MemOperand(sp));
__ Move(source_reg, sp);
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
int reg_num = 2;
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ push(reg);
source_reg_offset += kPointerSize;
}
}
// Re-push the double argument.
__ Dsubu(sp, sp, Operand(kDoubleSize));
__ sdc1(f12, MemOperand(sp));
// Call through to the actual stub
if (inline_fastpath) {
__ ldc1(f12, MemOperand(source_reg));
__ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
// Restore clobbered source_reg.
__ Daddu(source_reg, sp, Operand(source_reg_offset));
}
}
__ Call(start, RelocInfo::EXTERNAL_REFERENCE);
__ bind(&done);
__ Daddu(sp, sp, Operand(kDoubleSize));
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 2; --reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ lw(at, MemOperand(sp, 0));
__ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
__ Daddu(sp, sp, Operand(kPointerSize));
}
}
__ Daddu(sp, sp, Operand(kDoubleSize));
__ Move(v0, destination_reg);
Label ok;
__ Branch(&ok, eq, v0, Operand(zero_reg));
__ bind(&ok);
// Restore callee save registers.
__ MultiPop(kCalleeSaved | ra.bit());
Label ok1;
__ Branch(&ok1, eq, v0, Operand(zero_reg));
__ bind(&ok1);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
CpuFeatures::FlushICache(buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
#undef __
static Isolate* GetIsolateFrom(LocalContext* context) {
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
}
int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
double from) {
#ifdef USE_SIMULATOR
Simulator::current(Isolate::Current())->CallFP(FUNCTION_ADDR(func), from, 0.);
return Simulator::current(Isolate::Current())->get_register(v0.code());
#else
return (*func)(from);
#endif
}
TEST(ConvertDToI) {
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
HandleScope scope(isolate);
#if DEBUG
// Verify that the tests actually work with the C version. In the release
// code, the compiler optimizes it away because it's all constant, but does it
// wrong, triggering an assert on gcc.
RunAllTruncationTests(&ConvertDToICVersion);
#endif
Register source_registers[] = {
sp, v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
Register dest_registers[] = {
v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
RunAllTruncationTests(
RunGeneratedCodeCallWrapper,
MakeConvertDToIFuncTrampoline(isolate,
source_registers[s],
dest_registers[d],
false));
RunAllTruncationTests(
RunGeneratedCodeCallWrapper,
MakeConvertDToIFuncTrampoline(isolate,
source_registers[s],
dest_registers[d],
true));
}
}
}

View File

@ -0,0 +1,539 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <stdlib.h>
#include "src/v8.h"
#include "src/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
EmbeddedVector<char, 128> disasm_buffer;
disasm.InstructionDecode(disasm_buffer, pc);
if (strcmp(compare_string, disasm_buffer.start()) != 0) {
fprintf(stderr,
"expected: \n"
"%s\n"
"disassembled: \n"
"%s\n\n",
compare_string, disasm_buffer.start());
return false;
}
return true;
}
// Set up V8 to a state where we can at least run the assembler and
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
#define SET_UP() \
CcTest::InitializeVM(); \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
Assembler assm(isolate, buffer, 4*1024); \
bool failure = false;
// This macro assembles one instruction using the preallocated assembler and
// disassembles the generated instruction, comparing the output to the expected
// value. If the comparison fails an error message is printed, but the test
// continues to run until the end.
#define COMPARE(asm_, compare_string) \
{ \
int pc_offset = assm.pc_offset(); \
byte *progcounter = &buffer[pc_offset]; \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
}
// Verify that all invocations of the COMPARE macro passed successfully.
// Exit with a failure if at least one of the tests failed.
#define VERIFY_RUN() \
if (failure) { \
V8_Fatal(__FILE__, __LINE__, "MIPS Disassembler tests failed.\n"); \
}
TEST(Type0) {
SET_UP();
COMPARE(addu(a0, a1, a2),
"00a62021 addu a0, a1, a2");
COMPARE(daddu(a0, a1, a2),
"00a6202d daddu a0, a1, a2");
COMPARE(addu(a6, a7, t0),
"016c5021 addu a6, a7, t0");
COMPARE(daddu(a6, a7, t0),
"016c502d daddu a6, a7, t0");
COMPARE(addu(v0, v1, s0),
"00701021 addu v0, v1, s0");
COMPARE(daddu(v0, v1, s0),
"0070102d daddu v0, v1, s0");
COMPARE(subu(a0, a1, a2),
"00a62023 subu a0, a1, a2");
COMPARE(dsubu(a0, a1, a2),
"00a6202f dsubu a0, a1, a2");
COMPARE(subu(a6, a7, t0),
"016c5023 subu a6, a7, t0");
COMPARE(dsubu(a6, a7, t0),
"016c502f dsubu a6, a7, t0");
COMPARE(subu(v0, v1, s0),
"00701023 subu v0, v1, s0");
COMPARE(dsubu(v0, v1, s0),
"0070102f dsubu v0, v1, s0");
COMPARE(mult(a0, a1),
"00850018 mult a0, a1");
COMPARE(dmult(a0, a1),
"0085001c dmult a0, a1");
COMPARE(mult(a6, a7),
"014b0018 mult a6, a7");
COMPARE(dmult(a6, a7),
"014b001c dmult a6, a7");
COMPARE(mult(v0, v1),
"00430018 mult v0, v1");
COMPARE(dmult(v0, v1),
"0043001c dmult v0, v1");
COMPARE(multu(a0, a1),
"00850019 multu a0, a1");
COMPARE(dmultu(a0, a1),
"0085001d dmultu a0, a1");
COMPARE(multu(a6, a7),
"014b0019 multu a6, a7");
COMPARE(dmultu(a6, a7),
"014b001d dmultu a6, a7");
COMPARE(multu(v0, v1),
"00430019 multu v0, v1");
COMPARE(dmultu(v0, v1),
"0043001d dmultu v0, v1");
COMPARE(div(a0, a1),
"0085001a div a0, a1");
COMPARE(div(a6, a7),
"014b001a div a6, a7");
COMPARE(div(v0, v1),
"0043001a div v0, v1");
COMPARE(ddiv(a0, a1),
"0085001e ddiv a0, a1");
COMPARE(ddiv(a6, a7),
"014b001e ddiv a6, a7");
COMPARE(ddiv(v0, v1),
"0043001e ddiv v0, v1");
COMPARE(divu(a0, a1),
"0085001b divu a0, a1");
COMPARE(divu(a6, a7),
"014b001b divu a6, a7");
COMPARE(divu(v0, v1),
"0043001b divu v0, v1");
COMPARE(ddivu(a0, a1),
"0085001f ddivu a0, a1");
COMPARE(ddivu(a6, a7),
"014b001f ddivu a6, a7");
COMPARE(ddivu(v0, v1),
"0043001f ddivu v0, v1");
if (kArchVariant != kLoongson) {
COMPARE(mul(a0, a1, a2),
"70a62002 mul a0, a1, a2");
COMPARE(mul(a6, a7, t0),
"716c5002 mul a6, a7, t0");
COMPARE(mul(v0, v1, s0),
"70701002 mul v0, v1, s0");
}
COMPARE(addiu(a0, a1, 0x0),
"24a40000 addiu a0, a1, 0");
COMPARE(addiu(s0, s1, 32767),
"26307fff addiu s0, s1, 32767");
COMPARE(addiu(a6, a7, -32768),
"256a8000 addiu a6, a7, -32768");
COMPARE(addiu(v0, v1, -1),
"2462ffff addiu v0, v1, -1");
COMPARE(daddiu(a0, a1, 0x0),
"64a40000 daddiu a0, a1, 0");
COMPARE(daddiu(s0, s1, 32767),
"66307fff daddiu s0, s1, 32767");
COMPARE(daddiu(a6, a7, -32768),
"656a8000 daddiu a6, a7, -32768");
COMPARE(daddiu(v0, v1, -1),
"6462ffff daddiu v0, v1, -1");
COMPARE(and_(a0, a1, a2),
"00a62024 and a0, a1, a2");
COMPARE(and_(s0, s1, s2),
"02328024 and s0, s1, s2");
COMPARE(and_(a6, a7, t0),
"016c5024 and a6, a7, t0");
COMPARE(and_(v0, v1, a2),
"00661024 and v0, v1, a2");
COMPARE(or_(a0, a1, a2),
"00a62025 or a0, a1, a2");
COMPARE(or_(s0, s1, s2),
"02328025 or s0, s1, s2");
COMPARE(or_(a6, a7, t0),
"016c5025 or a6, a7, t0");
COMPARE(or_(v0, v1, a2),
"00661025 or v0, v1, a2");
COMPARE(xor_(a0, a1, a2),
"00a62026 xor a0, a1, a2");
COMPARE(xor_(s0, s1, s2),
"02328026 xor s0, s1, s2");
COMPARE(xor_(a6, a7, t0),
"016c5026 xor a6, a7, t0");
COMPARE(xor_(v0, v1, a2),
"00661026 xor v0, v1, a2");
COMPARE(nor(a0, a1, a2),
"00a62027 nor a0, a1, a2");
COMPARE(nor(s0, s1, s2),
"02328027 nor s0, s1, s2");
COMPARE(nor(a6, a7, t0),
"016c5027 nor a6, a7, t0");
COMPARE(nor(v0, v1, a2),
"00661027 nor v0, v1, a2");
COMPARE(andi(a0, a1, 0x1),
"30a40001 andi a0, a1, 0x1");
COMPARE(andi(v0, v1, 0xffff),
"3062ffff andi v0, v1, 0xffff");
COMPARE(ori(a0, a1, 0x1),
"34a40001 ori a0, a1, 0x1");
COMPARE(ori(v0, v1, 0xffff),
"3462ffff ori v0, v1, 0xffff");
COMPARE(xori(a0, a1, 0x1),
"38a40001 xori a0, a1, 0x1");
COMPARE(xori(v0, v1, 0xffff),
"3862ffff xori v0, v1, 0xffff");
COMPARE(lui(a0, 0x1),
"3c040001 lui a0, 0x1");
COMPARE(lui(v0, 0xffff),
"3c02ffff lui v0, 0xffff");
COMPARE(sll(a0, a1, 0),
"00052000 sll a0, a1, 0");
COMPARE(sll(s0, s1, 8),
"00118200 sll s0, s1, 8");
COMPARE(sll(a6, a7, 24),
"000b5600 sll a6, a7, 24");
COMPARE(sll(v0, v1, 31),
"000317c0 sll v0, v1, 31");
COMPARE(dsll(a0, a1, 0),
"00052038 dsll a0, a1, 0");
COMPARE(dsll(s0, s1, 8),
"00118238 dsll s0, s1, 8");
COMPARE(dsll(a6, a7, 24),
"000b5638 dsll a6, a7, 24");
COMPARE(dsll(v0, v1, 31),
"000317f8 dsll v0, v1, 31");
COMPARE(sllv(a0, a1, a2),
"00c52004 sllv a0, a1, a2");
COMPARE(sllv(s0, s1, s2),
"02518004 sllv s0, s1, s2");
COMPARE(sllv(a6, a7, t0),
"018b5004 sllv a6, a7, t0");
COMPARE(sllv(v0, v1, fp),
"03c31004 sllv v0, v1, fp");
COMPARE(dsllv(a0, a1, a2),
"00c52014 dsllv a0, a1, a2");
COMPARE(dsllv(s0, s1, s2),
"02518014 dsllv s0, s1, s2");
COMPARE(dsllv(a6, a7, t0),
"018b5014 dsllv a6, a7, t0");
COMPARE(dsllv(v0, v1, fp),
"03c31014 dsllv v0, v1, fp");
COMPARE(srl(a0, a1, 0),
"00052002 srl a0, a1, 0");
COMPARE(srl(s0, s1, 8),
"00118202 srl s0, s1, 8");
COMPARE(srl(a6, a7, 24),
"000b5602 srl a6, a7, 24");
COMPARE(srl(v0, v1, 31),
"000317c2 srl v0, v1, 31");
COMPARE(dsrl(a0, a1, 0),
"0005203a dsrl a0, a1, 0");
COMPARE(dsrl(s0, s1, 8),
"0011823a dsrl s0, s1, 8");
COMPARE(dsrl(a6, a7, 24),
"000b563a dsrl a6, a7, 24");
COMPARE(dsrl(v0, v1, 31),
"000317fa dsrl v0, v1, 31");
COMPARE(srlv(a0, a1, a2),
"00c52006 srlv a0, a1, a2");
COMPARE(srlv(s0, s1, s2),
"02518006 srlv s0, s1, s2");
COMPARE(srlv(a6, a7, t0),
"018b5006 srlv a6, a7, t0");
COMPARE(srlv(v0, v1, fp),
"03c31006 srlv v0, v1, fp");
COMPARE(dsrlv(a0, a1, a2),
"00c52016 dsrlv a0, a1, a2");
COMPARE(dsrlv(s0, s1, s2),
"02518016 dsrlv s0, s1, s2");
COMPARE(dsrlv(a6, a7, t0),
"018b5016 dsrlv a6, a7, t0");
COMPARE(dsrlv(v0, v1, fp),
"03c31016 dsrlv v0, v1, fp");
COMPARE(sra(a0, a1, 0),
"00052003 sra a0, a1, 0");
COMPARE(sra(s0, s1, 8),
"00118203 sra s0, s1, 8");
COMPARE(sra(a6, a7, 24),
"000b5603 sra a6, a7, 24");
COMPARE(sra(v0, v1, 31),
"000317c3 sra v0, v1, 31");
COMPARE(dsra(a0, a1, 0),
"0005203b dsra a0, a1, 0");
COMPARE(dsra(s0, s1, 8),
"0011823b dsra s0, s1, 8");
COMPARE(dsra(a6, a7, 24),
"000b563b dsra a6, a7, 24");
COMPARE(dsra(v0, v1, 31),
"000317fb dsra v0, v1, 31");
COMPARE(srav(a0, a1, a2),
"00c52007 srav a0, a1, a2");
COMPARE(srav(s0, s1, s2),
"02518007 srav s0, s1, s2");
COMPARE(srav(a6, a7, t0),
"018b5007 srav a6, a7, t0");
COMPARE(srav(v0, v1, fp),
"03c31007 srav v0, v1, fp");
COMPARE(dsrav(a0, a1, a2),
"00c52017 dsrav a0, a1, a2");
COMPARE(dsrav(s0, s1, s2),
"02518017 dsrav s0, s1, s2");
COMPARE(dsrav(a6, a7, t0),
"018b5017 dsrav a6, a7, t0");
COMPARE(dsrav(v0, v1, fp),
"03c31017 dsrav v0, v1, fp");
if (kArchVariant == kMips64r2) {
COMPARE(rotr(a0, a1, 0),
"00252002 rotr a0, a1, 0");
COMPARE(rotr(s0, s1, 8),
"00318202 rotr s0, s1, 8");
COMPARE(rotr(a6, a7, 24),
"002b5602 rotr a6, a7, 24");
COMPARE(rotr(v0, v1, 31),
"002317c2 rotr v0, v1, 31");
COMPARE(drotr(a0, a1, 0),
"0025203a drotr a0, a1, 0");
COMPARE(drotr(s0, s1, 8),
"0031823a drotr s0, s1, 8");
COMPARE(drotr(a6, a7, 24),
"002b563a drotr a6, a7, 24");
COMPARE(drotr(v0, v1, 31),
"002317fa drotr v0, v1, 31");
COMPARE(rotrv(a0, a1, a2),
"00c52046 rotrv a0, a1, a2");
COMPARE(rotrv(s0, s1, s2),
"02518046 rotrv s0, s1, s2");
COMPARE(rotrv(a6, a7, t0),
"018b5046 rotrv a6, a7, t0");
COMPARE(rotrv(v0, v1, fp),
"03c31046 rotrv v0, v1, fp");
COMPARE(drotrv(a0, a1, a2),
"00c52056 drotrv a0, a1, a2");
COMPARE(drotrv(s0, s1, s2),
"02518056 drotrv s0, s1, s2");
COMPARE(drotrv(a6, a7, t0),
"018b5056 drotrv a6, a7, t0");
COMPARE(drotrv(v0, v1, fp),
"03c31056 drotrv v0, v1, fp");
}
COMPARE(break_(0),
"0000000d break, code: 0x00000 (0)");
COMPARE(break_(261120),
"00ff000d break, code: 0x3fc00 (261120)");
COMPARE(break_(1047552),
"03ff000d break, code: 0xffc00 (1047552)");
COMPARE(tge(a0, a1, 0),
"00850030 tge a0, a1, code: 0x000");
COMPARE(tge(s0, s1, 1023),
"0211fff0 tge s0, s1, code: 0x3ff");
COMPARE(tgeu(a0, a1, 0),
"00850031 tgeu a0, a1, code: 0x000");
COMPARE(tgeu(s0, s1, 1023),
"0211fff1 tgeu s0, s1, code: 0x3ff");
COMPARE(tlt(a0, a1, 0),
"00850032 tlt a0, a1, code: 0x000");
COMPARE(tlt(s0, s1, 1023),
"0211fff2 tlt s0, s1, code: 0x3ff");
COMPARE(tltu(a0, a1, 0),
"00850033 tltu a0, a1, code: 0x000");
COMPARE(tltu(s0, s1, 1023),
"0211fff3 tltu s0, s1, code: 0x3ff");
COMPARE(teq(a0, a1, 0),
"00850034 teq a0, a1, code: 0x000");
COMPARE(teq(s0, s1, 1023),
"0211fff4 teq s0, s1, code: 0x3ff");
COMPARE(tne(a0, a1, 0),
"00850036 tne a0, a1, code: 0x000");
COMPARE(tne(s0, s1, 1023),
"0211fff6 tne s0, s1, code: 0x3ff");
COMPARE(mfhi(a0),
"00002010 mfhi a0");
COMPARE(mfhi(s2),
"00009010 mfhi s2");
COMPARE(mfhi(t0),
"00006010 mfhi t0");
COMPARE(mfhi(v1),
"00001810 mfhi v1");
COMPARE(mflo(a0),
"00002012 mflo a0");
COMPARE(mflo(s2),
"00009012 mflo s2");
COMPARE(mflo(t0),
"00006012 mflo t0");
COMPARE(mflo(v1),
"00001812 mflo v1");
COMPARE(slt(a0, a1, a2),
"00a6202a slt a0, a1, a2");
COMPARE(slt(s0, s1, s2),
"0232802a slt s0, s1, s2");
COMPARE(slt(a6, a7, t0),
"016c502a slt a6, a7, t0");
COMPARE(slt(v0, v1, a2),
"0066102a slt v0, v1, a2");
COMPARE(sltu(a0, a1, a2),
"00a6202b sltu a0, a1, a2");
COMPARE(sltu(s0, s1, s2),
"0232802b sltu s0, s1, s2");
COMPARE(sltu(a6, a7, t0),
"016c502b sltu a6, a7, t0");
COMPARE(sltu(v0, v1, a2),
"0066102b sltu v0, v1, a2");
COMPARE(slti(a0, a1, 0),
"28a40000 slti a0, a1, 0");
COMPARE(slti(s0, s1, 32767),
"2a307fff slti s0, s1, 32767");
COMPARE(slti(a6, a7, -32768),
"296a8000 slti a6, a7, -32768");
COMPARE(slti(v0, v1, -1),
"2862ffff slti v0, v1, -1");
COMPARE(sltiu(a0, a1, 0),
"2ca40000 sltiu a0, a1, 0");
COMPARE(sltiu(s0, s1, 32767),
"2e307fff sltiu s0, s1, 32767");
COMPARE(sltiu(a6, a7, -32768),
"2d6a8000 sltiu a6, a7, -32768");
COMPARE(sltiu(v0, v1, -1),
"2c62ffff sltiu v0, v1, -1");
if (kArchVariant != kLoongson) {
COMPARE(movz(a0, a1, a2),
"00a6200a movz a0, a1, a2");
COMPARE(movz(s0, s1, s2),
"0232800a movz s0, s1, s2");
COMPARE(movz(a6, a7, t0),
"016c500a movz a6, a7, t0");
COMPARE(movz(v0, v1, a2),
"0066100a movz v0, v1, a2");
COMPARE(movn(a0, a1, a2),
"00a6200b movn a0, a1, a2");
COMPARE(movn(s0, s1, s2),
"0232800b movn s0, s1, s2");
COMPARE(movn(a6, a7, t0),
"016c500b movn a6, a7, t0");
COMPARE(movn(v0, v1, a2),
"0066100b movn v0, v1, a2");
COMPARE(movt(a0, a1, 1),
"00a52001 movt a0, a1, 1");
COMPARE(movt(s0, s1, 2),
"02298001 movt s0, s1, 2");
COMPARE(movt(a6, a7, 3),
"016d5001 movt a6, a7, 3");
COMPARE(movt(v0, v1, 7),
"007d1001 movt v0, v1, 7");
COMPARE(movf(a0, a1, 0),
"00a02001 movf a0, a1, 0");
COMPARE(movf(s0, s1, 4),
"02308001 movf s0, s1, 4");
COMPARE(movf(a6, a7, 5),
"01745001 movf a6, a7, 5");
COMPARE(movf(v0, v1, 6),
"00781001 movf v0, v1, 6");
COMPARE(clz(a0, a1),
"70a42020 clz a0, a1");
COMPARE(clz(s6, s7),
"72f6b020 clz s6, s7");
COMPARE(clz(v0, v1),
"70621020 clz v0, v1");
}
if (kArchVariant == kMips64r2) {
COMPARE(ins_(a0, a1, 31, 1),
"7ca4ffc4 ins a0, a1, 31, 1");
COMPARE(ins_(s6, s7, 30, 2),
"7ef6ff84 ins s6, s7, 30, 2");
COMPARE(ins_(v0, v1, 0, 32),
"7c62f804 ins v0, v1, 0, 32");
COMPARE(ext_(a0, a1, 31, 1),
"7ca407c0 ext a0, a1, 31, 1");
COMPARE(ext_(s6, s7, 30, 2),
"7ef60f80 ext s6, s7, 30, 2");
COMPARE(ext_(v0, v1, 0, 32),
"7c62f800 ext v0, v1, 0, 32");
}
VERIFY_RUN();
}

View File

@ -114,11 +114,11 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
__ Pop(xzr, root);
__ Ret();
__ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
__ push(kRootRegister);
__ InitializeRootRegister();
__ li(v0, Operand(0));
__ mov(v0, zero_reg);
__ li(t1, Operand(string.at(0)));
StringHelper::GenerateHashInit(masm, v0, t1);
for (int i = 1; i < string.length(); i++) {
@ -170,7 +170,7 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ Pop(xzr, root);
__ Ret();
__ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
__ push(kRootRegister);
__ InitializeRootRegister();
__ li(v0, Operand(key));

View File

@ -179,7 +179,8 @@ TEST(HeapObjects) {
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Handle<Smi>::cast(value)->value());
#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64)
#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64) && \
!defined(V8_TARGET_ARCH_MIPS64)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = factory->NewNumberFromInt(Smi::kMinValue - 1);
CHECK(value->IsHeapNumber());

View File

@ -0,0 +1,217 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "src/macro-assembler.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/mips64/simulator-mips64.h"
using namespace v8::internal;
typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4);
#define __ masm->
static byte to_non_zero(int n) {
return static_cast<unsigned>(n) % 255 + 1;
}
static bool all_zeroes(const byte* beg, const byte* end) {
CHECK(beg);
CHECK(beg <= end);
while (beg < end) {
if (*beg++ != 0)
return false;
}
return true;
}
TEST(CopyBytes) {
CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
const int data_size = 1 * KB;
size_t act_size;
// Allocate two blocks to copy data between.
byte* src_buffer =
static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(src_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
byte* dest_buffer =
static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(dest_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
// Storage for a0 and a1.
byte* a0_;
byte* a1_;
MacroAssembler assembler(isolate, NULL, 0);
MacroAssembler* masm = &assembler;
// Code to be generated: The stuff in CopyBytes followed by a store of a0 and
// a1, respectively.
__ CopyBytes(a0, a1, a2, a3);
__ li(a2, Operand(reinterpret_cast<int64_t>(&a0_)));
__ li(a3, Operand(reinterpret_cast<int64_t>(&a1_)));
__ sd(a0, MemOperand(a2));
__ jr(ra);
__ sd(a1, MemOperand(a3));
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
::F f = FUNCTION_CAST< ::F>(code->entry());
// Initialise source data with non-zero bytes.
for (int i = 0; i < data_size; i++) {
src_buffer[i] = to_non_zero(i);
}
const int fuzz = 11;
for (int size = 0; size < 600; size++) {
for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
memset(dest_buffer, 0, data_size);
CHECK(dest + size < dest_buffer + data_size);
(void) CALL_GENERATED_CODE(f, reinterpret_cast<int64_t>(src),
reinterpret_cast<int64_t>(dest),
size, 0, 0);
// a0 and a1 should point at the first byte after the copied data.
CHECK_EQ(src + size, a0_);
CHECK_EQ(dest + size, a1_);
// Check that we haven't written outside the target area.
CHECK(all_zeroes(dest_buffer, dest));
CHECK(all_zeroes(dest + size, dest_buffer + data_size));
// Check the target area.
CHECK_EQ(0, memcmp(src, dest, size));
}
}
}
// Check that the source data hasn't been clobbered.
for (int i = 0; i < data_size; i++) {
CHECK(src_buffer[i] == to_non_zero(i));
}
}
TEST(LoadConstants) {
CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
int64_t refConstants[64];
int64_t result[64];
int64_t mask = 1;
for (int i = 0; i < 64; i++) {
refConstants[i] = ~(mask << i);
}
MacroAssembler assembler(isolate, NULL, 0);
MacroAssembler* masm = &assembler;
__ mov(a4, a0);
for (int i = 0; i < 64; i++) {
// Load constant.
__ li(a5, Operand(refConstants[i]));
__ sd(a5, MemOperand(a4));
__ Daddu(a4, a4, Operand(kPointerSize));
}
__ jr(ra);
__ nop();
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
::F f = FUNCTION_CAST< ::F>(code->entry());
(void) CALL_GENERATED_CODE(f, reinterpret_cast<int64_t>(result),
0, 0, 0, 0);
// Check results.
for (int i = 0; i < 64; i++) {
CHECK(refConstants[i] == result[i]);
}
}
TEST(LoadAddress) {
CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, NULL, 0);
MacroAssembler* masm = &assembler;
Label to_jump, skip;
__ mov(a4, a0);
__ Branch(&skip);
__ bind(&to_jump);
__ nop();
__ nop();
__ jr(ra);
__ nop();
__ bind(&skip);
__ li(a4, Operand(masm->jump_address(&to_jump)), ADDRESS_LOAD);
int check_size = masm->InstructionsGeneratedSince(&skip);
CHECK_EQ(check_size, 4);
__ jr(a4);
__ nop();
__ stop("invalid");
__ stop("invalid");
__ stop("invalid");
__ stop("invalid");
__ stop("invalid");
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
::F f = FUNCTION_CAST< ::F>(code->entry());
(void) CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0);
// Check results.
}
#undef __

View File

@ -57,6 +57,11 @@
#include "src/mips/macro-assembler-mips.h"
#include "src/mips/regexp-macro-assembler-mips.h"
#endif
#if V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/mips64/regexp-macro-assembler-mips64.h"
#endif
#if V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@ -703,6 +708,8 @@ typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS64
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_X87
typedef RegExpMacroAssemblerX87 ArchRegExpMacroAssembler;
#endif

View File

@ -77,7 +77,7 @@
'array-constructor': [PASS, TIMEOUT],
# Very slow on ARM and MIPS, contains no architecture dependent code.
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips', TIMEOUT]],
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', TIMEOUT]],
##############################################################################
# This test expects to reach a certain recursion depth, which may not work
@ -349,6 +349,62 @@
'math-floor-of-div-minus-zero': [SKIP],
}], # 'arch == mipsel or arch == mips'
##############################################################################
['arch == mips64el', {
# Slow tests which times out in debug mode.
'try': [PASS, ['mode == debug', SKIP]],
'debug-scripts-request': [PASS, ['mode == debug', SKIP]],
'array-constructor': [PASS, ['mode == debug', SKIP]],
# Times out often in release mode on MIPS.
'compiler/regress-stacktrace-methods': [PASS, PASS, ['mode == release', TIMEOUT]],
'array-splice': [PASS, TIMEOUT],
# Long running test.
'mirror-object': [PASS, TIMEOUT],
'string-indexof-2': [PASS, TIMEOUT],
# BUG(3251035): Timeouts in long looping crankshaft optimization
# tests. Skipping because having them timeout takes too long on the
# buildbot.
'compiler/alloc-number': [PASS, SLOW],
'compiler/array-length': [PASS, SLOW],
'compiler/assignment-deopt': [PASS, SLOW],
'compiler/deopt-args': [PASS, SLOW],
'compiler/inline-compare': [PASS, SLOW],
'compiler/inline-global-access': [PASS, SLOW],
'compiler/optimized-function-calls': [PASS, SLOW],
'compiler/pic': [PASS, SLOW],
'compiler/property-calls': [PASS, SLOW],
'compiler/recursive-deopt': [PASS, SLOW],
'compiler/regress-4': [PASS, SLOW],
'compiler/regress-funcaller': [PASS, SLOW],
'compiler/regress-rep-change': [PASS, SLOW],
'compiler/regress-arguments': [PASS, SLOW],
'compiler/regress-funarguments': [PASS, SLOW],
'compiler/regress-3249650': [PASS, SLOW],
'compiler/simple-deopt': [PASS, SLOW],
'regress/regress-490': [PASS, SLOW],
'regress/regress-634': [PASS, SLOW],
'regress/regress-create-exception': [PASS, SLOW],
'regress/regress-3218915': [PASS, SLOW],
'regress/regress-3247124': [PASS, SLOW],
# Requires bigger stack size in the Genesis and if stack size is increased,
# the test requires too much time to run. However, the problem test covers
# should be platform-independent.
'regress/regress-1132': [SKIP],
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
}], # 'arch == mips64el'
['arch == mips64el and simulator_run == False', {
# Random failures on HW, need investigation.
'debug-*': [SKIP],
}],
##############################################################################
# Native Client uses the ARM simulator so will behave similarly to arm
# on mjsunit tests.

View File

@ -862,7 +862,7 @@
}], # 'arch == arm64'
['arch == mipsel', {
['arch == mipsel or arch == mips64el', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
@ -879,7 +879,7 @@
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel'
}], # 'arch == mipsel or arch == mips64el'
['arch == mips', {

View File

@ -99,7 +99,7 @@
'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
['arch == arm or arch == mipsel or arch == mips or arch == arm64', {
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.

View File

@ -857,6 +857,40 @@
'../../src/mips/stub-cache-mips.cc',
],
}],
['v8_target_arch=="mips64el"', {
'sources': [ ### gcmole(arch:mips64el) ###
'../../src/mips64/assembler-mips64.cc',
'../../src/mips64/assembler-mips64.h',
'../../src/mips64/assembler-mips64-inl.h',
'../../src/mips64/builtins-mips64.cc',
'../../src/mips64/codegen-mips64.cc',
'../../src/mips64/codegen-mips64.h',
'../../src/mips64/code-stubs-mips64.cc',
'../../src/mips64/code-stubs-mips64.h',
'../../src/mips64/constants-mips64.cc',
'../../src/mips64/constants-mips64.h',
'../../src/mips64/cpu-mips64.cc',
'../../src/mips64/debug-mips64.cc',
'../../src/mips64/deoptimizer-mips64.cc',
'../../src/mips64/disasm-mips64.cc',
'../../src/mips64/frames-mips64.cc',
'../../src/mips64/frames-mips64.h',
'../../src/mips64/full-codegen-mips64.cc',
'../../src/mips64/ic-mips64.cc',
'../../src/mips64/lithium-codegen-mips64.cc',
'../../src/mips64/lithium-codegen-mips64.h',
'../../src/mips64/lithium-gap-resolver-mips64.cc',
'../../src/mips64/lithium-gap-resolver-mips64.h',
'../../src/mips64/lithium-mips64.cc',
'../../src/mips64/lithium-mips64.h',
'../../src/mips64/macro-assembler-mips64.cc',
'../../src/mips64/macro-assembler-mips64.h',
'../../src/mips64/regexp-macro-assembler-mips64.cc',
'../../src/mips64/regexp-macro-assembler-mips64.h',
'../../src/mips64/simulator-mips64.cc',
'../../src/mips64/stub-cache-mips64.cc',
],
}],
['v8_target_arch=="x64" or v8_target_arch=="x32"', {
'sources': [ ### gcmole(arch:x64) ###
'../../src/x64/assembler-x64-inl.h',

View File

@ -83,6 +83,7 @@ SUPPORTED_ARCHS = ["android_arm",
"x87",
"mips",
"mipsel",
"mips64el",
"nacl_ia32",
"nacl_x64",
"x64",
@ -95,6 +96,7 @@ SLOW_ARCHS = ["android_arm",
"arm",
"mips",
"mipsel",
"mips64el",
"nacl_ia32",
"nacl_x64",
"x87",

View File

@ -53,7 +53,7 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
"arm", "arm64", "ia32", "mips", "mipsel", "x64", "x87", "nacl_ia32",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
"nacl_x64", "macos", "windows", "linux"]:
VARIABLES[var] = var