Re-establish mips basic infrastructure.

This commit adds current working versions of assembler, macro-assembler,
disassembler, and simulator.

All other mips arch files are replaced with stubbed-out versions that
will build.

Arch independent files are updated as needed to support building and
running mips.

The only test is cctest/test-assembler-mips, and this passes on the
simulator and on mips hardware.

TEST=none
BUG=none

Patch by Paul Lind from MIPS.

Review URL: http://codereview.chromium.org/6730029/


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7388 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
sgjesse@chromium.org 2011-03-28 13:05:36 +00:00
parent 5310b07c04
commit 2531480d10
59 changed files with 13234 additions and 3631 deletions

View File

@ -224,14 +224,37 @@ LIBRARY_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LDFLAGS': ['-EL']
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
}
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
}
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
@ -345,6 +368,9 @@ V8_EXTRA_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
},
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
@ -519,10 +545,29 @@ SAMPLE_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
'LDFLAGS': ['-EL']
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
}
},
'simulator:arm': {
@ -531,7 +576,10 @@ SAMPLE_FLAGS = {
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
}
},
'mode:release': {
'CCFLAGS': ['-O2']
@ -818,6 +866,16 @@ SIMPLE_OPTIONS = {
'values': ['off', 'instrument', 'optimize'],
'default': 'off',
'help': 'select profile guided optimization variant',
},
'mipsabi': {
'values': ['hardfloat', 'softfloat', 'none'],
'default': 'hardfloat',
'help': 'generate calling conventiont according to selected mips ABI'
},
'mips_arch_variant': {
'values': ['mips32r2', 'mips32r1'],
'default': 'mips32r2',
'help': 'mips variant'
}
}
@ -1014,11 +1072,12 @@ def PostprocessOptions(options, os):
if 'msvcltcg' in ARGUMENTS:
print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
options['msvcltcg'] = 'on'
if options['arch'] == 'mips':
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
if (options['simulator'] == 'mips' and options['mipsabi'] != 'softfloat'):
# Print a warning if soft-float ABI is not selected for mips simulator
print "Warning: forcing soft-float mips ABI when running on simulator"
options['mipsabi'] = 'softfloat'
if (options['mipsabi'] != 'none') and (options['arch'] != 'mips') and (options['simulator'] != 'mips'):
options['mipsabi'] = 'none'
if options['liveobjectlist'] == 'on':
if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
# Print a warning that liveobjectlist will implicitly enable the debugger

View File

@ -163,18 +163,23 @@ SOURCES = {
arm/assembler-arm.cc
"""),
'arch:mips': Split("""
jump-target-light.cc
virtual-frame-light.cc
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/code-stubs-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
mips/deoptimizer-mips.cc
mips/disasm-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/full-codegen-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
mips/regexp-macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc

View File

@ -55,6 +55,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/regexp-macro-assembler-mips.h"
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@ -795,6 +797,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#else
UNREACHABLE();
#endif

View File

@ -158,6 +158,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif

View File

@ -0,0 +1,169 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
namespace v8 {
namespace internal {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("1:\n"
"ll %0, %1\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"nop\n" // delay slot nop
"sc %2, %1\n" // *ptr = new_value (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
: "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
__asm__ __volatile__("1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
__asm__ __volatile__("1:\n"
"ll %0, %2\n" // temp = *ptr
"addu %0, %3\n" // temp = temp + increment
"move %1, %0\n" // temp2 = temp
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
ATOMICOPS_COMPILER_BARRIER();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
} } // namespace v8::internal
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_

View File

@ -80,10 +80,19 @@ namespace internal {
#define CODE_STUB_LIST_ARM(V)
#endif
// List of code stubs only used on MIPS platforms.
#ifdef V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_MIPS(V)
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };

View File

@ -97,7 +97,11 @@ private:
#define FLAG FLAG_FULL
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
#ifdef V8_TARGET_ARCH_MIPS
DEFINE_bool(crankshaft, false, "use crankshaft")
#else
DEFINE_bool(crankshaft, true, "use crankshaft")
#endif
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
@ -161,6 +165,8 @@ DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available (ARM only)")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")

View File

@ -54,7 +54,7 @@ namespace internal {
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_HOST_CAN_READ_UNALIGNED 1
#endif
#elif defined(_MIPS_ARCH_MIPS32R2)
#elif defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
@ -72,7 +72,7 @@ namespace internal {
#define V8_TARGET_ARCH_IA32 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(_MIPS_ARCH_MIPS32R2)
#elif defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
#else
#error Target architecture was not detected as supported by v8

View File

@ -49,6 +49,10 @@
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
#include "regexp-macro-assembler.h"
#include "mips/regexp-macro-assembler-mips.h"
#endif
namespace v8 {
namespace internal {

View File

@ -36,6 +36,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#else
#error Unsupported target architecture.
#endif

View File

@ -43,6 +43,8 @@
#include "x64/lithium-codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-codegen-mips.h"
#else
#error Unsupported target architecture.
#endif

View File

@ -382,7 +382,8 @@ Isolate::Isolate()
zone_.isolate_ = this;
stack_guard_.isolate_ = this;
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
simulator_initialized_ = false;
simulator_i_cache_ = NULL;
simulator_redirection_ = NULL;
@ -658,10 +659,8 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM)
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator::Initialize();
#elif defined(V8_TARGET_ARCH_MIPS)
::assembler::mips::Simulator::Initialize();
#endif
#endif

View File

@ -93,11 +93,13 @@ class Debugger;
class DebuggerAgent;
#endif
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
class Redirection;
class Simulator;
#endif
// Static indirection table for handles to constants. If a frame
// element represents a constant, the data contains an index into
// this table of handles to the actual constants.
@ -195,10 +197,8 @@ class ThreadLocalTop BASE_EMBEDDED {
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#elif V8_TARGET_ARCH_MIPS
assembler::mips::Simulator* simulator_;
#endif
#endif // USE_SIMULATOR
@ -221,7 +221,7 @@ class ThreadLocalTop BASE_EMBEDDED {
Address try_catch_handler_address_;
};
#if defined(V8_TARGET_ARCH_ARM)
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
#define ISOLATE_PLATFORM_INIT_LIST(V) \
/* VirtualFrame::SpilledScope state */ \
@ -229,7 +229,7 @@ class ThreadLocalTop BASE_EMBEDDED {
/* CodeGenerator::EmitNamedStore state */ \
V(int, inlined_write_barrier_size, -1)
#if !defined(__arm__)
#if !defined(__arm__) && !defined(__mips__)
class HashMap;
#endif
@ -341,7 +341,8 @@ class Isolate {
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
simulator_(NULL),
#endif
next_(NULL),
@ -353,7 +354,8 @@ class Isolate {
ThreadState* thread_state() const { return thread_state_; }
void set_thread_state(ThreadState* value) { thread_state_ = value; }
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator() const { return simulator_; }
void set_simulator(Simulator* simulator) {
simulator_ = simulator;
@ -370,7 +372,8 @@ class Isolate {
uintptr_t stack_limit_;
ThreadState* thread_state_;
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
@ -854,7 +857,8 @@ class Isolate {
int* code_kind_statistics() { return code_kind_statistics_; }
#endif
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized() { return simulator_initialized_; }
void set_simulator_initialized(bool initialized) {
simulator_initialized_ = initialized;
@ -1076,7 +1080,8 @@ class Isolate {
ZoneObjectList frame_element_constant_list_;
ZoneObjectList result_constant_list_;
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
Redirection* simulator_redirection_;

View File

@ -50,6 +50,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/regexp-macro-assembler-mips.h"
#else
#error Unsupported target architecture.
#endif
@ -5340,6 +5342,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2);
#endif
#else // V8_INTERPRETED_REGEXP

View File

@ -36,6 +36,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#else
#error "Unknown architecture."
#endif

View File

@ -37,6 +37,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#else
#error "Unknown architecture."
#endif

View File

@ -38,20 +38,12 @@
#include "mips/assembler-mips.h"
#include "cpu.h"
#include "debug.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Condition
Condition NegateCondition(Condition cc) {
ASSERT(cc != cc_always);
return static_cast<Condition>(cc ^ 1);
}
// -----------------------------------------------------------------------------
// Operand and MemOperand
@ -61,17 +53,13 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(const char* s) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
@ -79,10 +67,12 @@ Operand::Operand(Smi* value) {
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Register rm) {
rm_ = rm;
}
bool Operand::is_reg() const {
return rm_.is_valid();
}
@ -105,8 +95,29 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return reinterpret_cast<Address>(pc_);
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instructions like LUI/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written. In this case the target_address_address function should
// return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target. In our case, that is the
// address of the instruction that follows LUI/ORI instruction pair.
return reinterpret_cast<Address>(
pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
}
int RelocInfo::target_address_size() {
return Assembler::kExternalTargetSize;
}
@ -130,8 +141,15 @@ Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
Object** RelocInfo::target_object_address() {
// Provide a "natural pointer" to the embedded object,
// which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(pc_);
// TODO(mips): Commenting out, to simplify arch-independent changes.
// GC won't work like this, but this commit is for asm/disasm/sim.
// reconstructed_obj_ptr_ =
// reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
// return &reconstructed_obj_ptr_;
return NULL;
}
@ -143,23 +161,55 @@ void RelocInfo::set_target_object(Object* target) {
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(pc_);
// TODO(mips): Commenting out, to simplify arch-independent changes.
// GC won't work like this, but this commit is for asm/disasm/sim.
// reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
// return &reconstructed_adr_ptr_;
return NULL;
}
Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
return Handle<JSGlobalPropertyCell>(
reinterpret_cast<JSGlobalPropertyCell**>(address));
}
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
Object* object = HeapObject::FromAddress(
address - JSGlobalPropertyCell::kValueOffset);
return reinterpret_cast<JSGlobalPropertyCell*>(object);
}
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
}
Address RelocInfo::call_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
return Assembler::target_address_at(pc_);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
Assembler::set_target_address_at(pc_, target);
}
@ -169,9 +219,8 @@ Object* RelocInfo::call_object() {
Object** RelocInfo::call_object_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
@ -182,13 +231,76 @@ void RelocInfo::set_call_object(Object* target) {
bool RelocInfo::IsPatchedReturnSequence() {
#ifdef DEBUG
PrintF("%s - %d - %s : Checking for jal(r)",
__FILE__, __LINE__, __func__);
Instr instr0 = Assembler::instr_at(pc_);
Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
(instr1 & kOpcodeMask) == ORI &&
(instr2 & kOpcodeMask) == SPECIAL &&
(instr2 & kFunctionFieldMask) == JALR);
return patched_return;
}
bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
Instr current_instr = Assembler::instr_at(pc_);
return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
}
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
// RelocInfo is needed when pointer must be updated/serialized, such as
// UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
// It is ignored by visitors that do not need it.
// Commenting out, to simplify arch-independednt changes.
// GC won't work like this, but this commit is for asm/disasm/sim.
// visitor->VisitPointer(target_object_address(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
// RelocInfo is needed when external-references must be serialized by
// Serializer Visitor in serialize.cc. It is ignored by visitors that
// do not need it.
// Commenting out, to simplify arch-independednt changes.
// Serializer won't work like this, but this commit is for asm/disasm/sim.
// visitor->VisitExternalReference(target_reference_address(), this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
(((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
visitor->VisitRuntimeEntry(this);
}
}
template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
@ -203,10 +315,18 @@ void Assembler::CheckBuffer() {
}
void Assembler::CheckTrampolinePoolQuick() {
if (pc_offset() >= next_buffer_check_) {
CheckTrampolinePool();
}
}
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
CheckTrampolinePoolQuick();
}

File diff suppressed because it is too large Load Diff

View File

@ -41,8 +41,6 @@
#include "constants-mips.h"
#include "serialize.h"
using namespace assembler::mips;
namespace v8 {
namespace internal {
@ -73,6 +71,44 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
static const int kNumAllocatableRegisters = 14; // v0 through t7
static int ToAllocationIndex(Register reg) {
return reg.code() - 2; // zero_reg and 'at' are skipped.
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index + 2); // zero_reg and 'at' are skipped.
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"v0",
"v1",
"a0",
"a1",
"a2",
"a3",
"t0",
"t1",
"t2",
"t3",
"t4",
"t5",
"t6",
"t7",
};
return names[index];
}
static Register from_code(int code) {
Register r = { code };
return r;
}
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
int code() const {
@ -88,40 +124,41 @@ struct Register {
int code_;
};
extern const Register no_reg;
const Register no_reg = { -1 };
const Register zero_reg = { 0 };
const Register at = { 1 };
const Register v0 = { 2 };
const Register v1 = { 3 };
const Register a0 = { 4 };
const Register a1 = { 5 };
const Register a2 = { 6 };
const Register a3 = { 7 };
const Register t0 = { 8 };
const Register t1 = { 9 };
const Register t2 = { 10 };
const Register t3 = { 11 };
const Register t4 = { 12 };
const Register t5 = { 13 };
const Register t6 = { 14 };
const Register t7 = { 15 };
const Register s0 = { 16 };
const Register s1 = { 17 };
const Register s2 = { 18 };
const Register s3 = { 19 };
const Register s4 = { 20 };
const Register s5 = { 21 };
const Register s6 = { 22 };
const Register s7 = { 23 };
const Register t8 = { 24 };
const Register t9 = { 25 };
const Register k0 = { 26 };
const Register k1 = { 27 };
const Register gp = { 28 };
const Register sp = { 29 };
const Register s8_fp = { 30 };
const Register ra = { 31 };
extern const Register zero_reg;
extern const Register at;
extern const Register v0;
extern const Register v1;
extern const Register a0;
extern const Register a1;
extern const Register a2;
extern const Register a3;
extern const Register t0;
extern const Register t1;
extern const Register t2;
extern const Register t3;
extern const Register t4;
extern const Register t5;
extern const Register t6;
extern const Register t7;
extern const Register s0;
extern const Register s1;
extern const Register s2;
extern const Register s3;
extern const Register s4;
extern const Register s5;
extern const Register s6;
extern const Register s7;
extern const Register t8;
extern const Register t9;
extern const Register k0;
extern const Register k1;
extern const Register gp;
extern const Register sp;
extern const Register s8_fp;
extern const Register ra;
int ToNumber(Register reg);
@ -129,7 +166,50 @@ Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
static const int kNumRegisters = v8::internal::kNumFPURegisters;
// f0 has been excluded from allocation. This is following ia32
// where xmm0 is excluded.
static const int kNumAllocatableRegisters = 15;
static int ToAllocationIndex(FPURegister reg) {
ASSERT(reg.code() != 0);
ASSERT(reg.code() % 2 == 0);
return (reg.code() / 2) - 1;
}
static FPURegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code((index + 1) * 2);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"f2",
"f4",
"f6",
"f8",
"f10",
"f12",
"f14",
"f16",
"f18",
"f20",
"f22",
"f24",
"f26",
"f28",
"f30"
};
return names[index];
}
static FPURegister from_code(int code) {
FPURegister r = { code };
return r;
}
bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
int code() const {
ASSERT(is_valid());
@ -139,84 +219,77 @@ struct FPURegister {
ASSERT(is_valid());
return 1 << code_;
}
void setcode(int f) {
code_ = f;
ASSERT(is_valid());
}
// Unfortunately we can't make this private in a struct.
int code_;
};
extern const FPURegister no_creg;
typedef FPURegister DoubleRegister;
extern const FPURegister f0;
extern const FPURegister f1;
extern const FPURegister f2;
extern const FPURegister f3;
extern const FPURegister f4;
extern const FPURegister f5;
extern const FPURegister f6;
extern const FPURegister f7;
extern const FPURegister f8;
extern const FPURegister f9;
extern const FPURegister f10;
extern const FPURegister f11;
extern const FPURegister f12; // arg
extern const FPURegister f13;
extern const FPURegister f14; // arg
extern const FPURegister f15;
extern const FPURegister f16;
extern const FPURegister f17;
extern const FPURegister f18;
extern const FPURegister f19;
extern const FPURegister f20;
extern const FPURegister f21;
extern const FPURegister f22;
extern const FPURegister f23;
extern const FPURegister f24;
extern const FPURegister f25;
extern const FPURegister f26;
extern const FPURegister f27;
extern const FPURegister f28;
extern const FPURegister f29;
extern const FPURegister f30;
extern const FPURegister f31;
const FPURegister no_creg = { -1 };
const FPURegister f0 = { 0 }; // Return value in hard float mode.
const FPURegister f1 = { 1 };
const FPURegister f2 = { 2 };
const FPURegister f3 = { 3 };
const FPURegister f4 = { 4 };
const FPURegister f5 = { 5 };
const FPURegister f6 = { 6 };
const FPURegister f7 = { 7 };
const FPURegister f8 = { 8 };
const FPURegister f9 = { 9 };
const FPURegister f10 = { 10 };
const FPURegister f11 = { 11 };
const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
const FPURegister f13 = { 13 };
const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
const FPURegister f15 = { 15 };
const FPURegister f16 = { 16 };
const FPURegister f17 = { 17 };
const FPURegister f18 = { 18 };
const FPURegister f19 = { 19 };
const FPURegister f20 = { 20 };
const FPURegister f21 = { 21 };
const FPURegister f22 = { 22 };
const FPURegister f23 = { 23 };
const FPURegister f24 = { 24 };
const FPURegister f25 = { 25 };
const FPURegister f26 = { 26 };
const FPURegister f27 = { 27 };
const FPURegister f28 = { 28 };
const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
// Returns the equivalent of !cc.
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
inline Condition NegateCondition(Condition cc);
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
static const int kFCSRRegister = 31;
static const int kInvalidFPUControlRegister = -1;
inline Condition ReverseCondition(Condition cc) {
switch (cc) {
case Uless:
return Ugreater;
case Ugreater:
return Uless;
case Ugreater_equal:
return Uless_equal;
case Uless_equal:
return Ugreater_equal;
case less:
return greater;
case greater:
return less;
case greater_equal:
return less_equal;
case less_equal:
return greater_equal;
default:
return cc;
};
}
enum Hint {
no_hint = 0
bool is_valid() const { return code_ == kFCSRRegister; }
bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
void setcode(int f) {
code_ = f;
ASSERT(is_valid());
}
// Unfortunately we can't make this private in a struct.
int code_;
};
inline Hint NegateHint(Hint hint) {
return no_hint;
}
const FPUControlRegister no_fpucreg = { -1 };
const FPUControlRegister FCSR = { kFCSRRegister };
// -----------------------------------------------------------------------------
@ -258,16 +331,75 @@ class Operand BASE_EMBEDDED {
class MemOperand : public Operand {
public:
explicit MemOperand(Register rn, int16_t offset = 0);
explicit MemOperand(Register rn, int32_t offset = 0);
private:
int16_t offset_;
int32_t offset_;
friend class Assembler;
};
class Assembler : public Malloced {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
class CpuFeatures {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
void Probe(bool portable);
// Check whether a feature is supported by the target CPU.
bool IsSupported(CpuFeature f) const {
if (f == FPU && !FLAG_enable_fpu) return false;
return (supported_ & (1u << f)) != 0;
}
// Check whether a feature is currently enabled.
bool IsEnabled(CpuFeature f) const {
return (enabled_ & (1u << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f)
: cpu_features_(Isolate::Current()->cpu_features()),
isolate_(Isolate::Current()) {
ASSERT(cpu_features_->IsSupported(f));
ASSERT(!Serializer::enabled() ||
(cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
old_enabled_ = cpu_features_->enabled_;
cpu_features_->enabled_ |= 1u << f;
}
~Scope() {
ASSERT_EQ(Isolate::Current(), isolate_);
cpu_features_->enabled_ = old_enabled_;
}
private:
unsigned old_enabled_;
CpuFeatures* cpu_features_;
Isolate* isolate_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
private:
CpuFeatures();
unsigned supported_;
unsigned enabled_;
unsigned found_by_runtime_probing_;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@ -285,6 +417,9 @@ class Assembler : public Malloced {
Assembler(void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@ -320,12 +455,6 @@ class Assembler : public Malloced {
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset.
static const int kBranchPCOffset = 4;
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
@ -344,8 +473,25 @@ class Assembler : public Malloced {
set_target_address_at(instruction_payload, target);
}
static const int kCallTargetSize = 3 * kPointerSize;
static const int kExternalTargetSize = 3 * kPointerSize;
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset.
static const int kBranchPCOffset = 4;
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
// MIPS platform, as Code, Embedded Object or External-reference pointers
// are split across two consecutive instructions and don't exist separately
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
static const int kCallTargetSize = 0 * kInstrSize;
static const int kExternalTargetSize = 0 * kInstrSize;
// Number of consecutive instructions used to store 32bit constant.
// Used in RelocInfo::target_address_address() function to tell serializer
// address of the instruction that follows LUI/ORI instruction pair.
static const int kInstructionsFor32BitConstant = 2;
// Distance between the instruction referring to the address of the call
// target and the return address.
@ -353,16 +499,53 @@ class Assembler : public Malloced {
// Distance between start of patched return sequence and the emitted address
// to jump to.
static const int kPatchReturnSequenceAddressOffset = kInstrSize;
static const int kPatchReturnSequenceAddressOffset = 0;
// Distance between start of patched debug break slot and the emitted address
// to jump to.
static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 4;
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceInstructions = 7;
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
// ---------------------------------------------------------------------------
// Code generation.
void nop() { sll(zero_reg, zero_reg, 0); }
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// Different nop operations are used by the code generator to detect certain
// states of the generated code.
enum NopMarkerTypes {
NON_MARKING_NOP = 0,
DEBUG_BREAK_NOP,
// IC markers.
PROPERTY_ACCESS_INLINED,
PROPERTY_ACCESS_INLINED_CONTEXT,
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
// Helper values.
LAST_CODE_MARKER,
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
};
// type == 0 is the default non-marking type.
void nop(unsigned int type = 0) {
ASSERT(type < 32);
sll(zero_reg, zero_reg, type, true);
}
//------- Branch and jump instructions --------
@ -400,9 +583,7 @@ class Assembler : public Malloced {
//-------Data-processing-instructions---------
// Arithmetic.
void add(Register rd, Register rs, Register rt);
void addu(Register rd, Register rs, Register rt);
void sub(Register rd, Register rs, Register rt);
void subu(Register rd, Register rs, Register rt);
void mult(Register rs, Register rt);
void multu(Register rs, Register rt);
@ -410,7 +591,6 @@ class Assembler : public Malloced {
void divu(Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
void addi(Register rd, Register rs, int32_t j);
void addiu(Register rd, Register rs, int32_t j);
// Logical.
@ -425,21 +605,33 @@ class Assembler : public Malloced {
void lui(Register rd, int32_t j);
// Shifts.
void sll(Register rd, Register rt, uint16_t sa);
// Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
// and may cause problems in normal code. coming_from_nop makes sure this
// doesn't happen.
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
void sllv(Register rd, Register rt, Register rs);
void srl(Register rd, Register rt, uint16_t sa);
void srlv(Register rd, Register rt, Register rs);
void sra(Register rt, Register rd, uint16_t sa);
void srav(Register rt, Register rd, Register rs);
void rotr(Register rd, Register rt, uint16_t sa);
void rotrv(Register rd, Register rt, Register rs);
//------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
void lbu(Register rd, const MemOperand& rs);
void lh(Register rd, const MemOperand& rs);
void lhu(Register rd, const MemOperand& rs);
void lw(Register rd, const MemOperand& rs);
void lwl(Register rd, const MemOperand& rs);
void lwr(Register rd, const MemOperand& rs);
void sb(Register rd, const MemOperand& rs);
void sh(Register rd, const MemOperand& rs);
void sw(Register rd, const MemOperand& rs);
void swl(Register rd, const MemOperand& rs);
void swr(Register rd, const MemOperand& rs);
//-------------Misc-instructions--------------
@ -463,6 +655,16 @@ class Assembler : public Malloced {
void slti(Register rd, Register rs, int32_t j);
void sltiu(Register rd, Register rs, int32_t j);
// Conditional move.
void movz(Register rd, Register rs, Register rt);
void movn(Register rd, Register rs, Register rt);
void movt(Register rd, Register rs, uint16_t cc = 0);
void movf(Register rd, Register rs, uint16_t cc = 0);
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
//--------Coprocessor-instructions----------------
@ -473,19 +675,44 @@ class Assembler : public Malloced {
void swc1(FPURegister fs, const MemOperand& dst);
void sdc1(FPURegister fs, const MemOperand& dst);
// When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
// executed first, followed by the MTHC1.
void mtc1(FPURegister fs, Register rt);
void mthc1(FPURegister fs, Register rt);
void mfc1(FPURegister fs, Register rt);
void mfhc1(FPURegister fs, Register rt);
void mtc1(Register rt, FPURegister fs);
void mfc1(Register rt, FPURegister fs);
void ctc1(Register rt, FPUControlRegister fs);
void cfc1(Register rt, FPUControlRegister fs);
// Arithmetic.
void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
void neg_d(FPURegister fd, FPURegister fs);
void sqrt_d(FPURegister fd, FPURegister fs);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
void cvt_w_d(FPURegister fd, FPURegister fs);
void trunc_w_s(FPURegister fd, FPURegister fs);
void trunc_w_d(FPURegister fd, FPURegister fs);
void round_w_s(FPURegister fd, FPURegister fs);
void round_w_d(FPURegister fd, FPURegister fs);
void floor_w_s(FPURegister fd, FPURegister fs);
void floor_w_d(FPURegister fd, FPURegister fs);
void ceil_w_s(FPURegister fd, FPURegister fs);
void ceil_w_d(FPURegister fd, FPURegister fs);
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
void trunc_l_s(FPURegister fd, FPURegister fs);
void trunc_l_d(FPURegister fd, FPURegister fs);
void round_l_s(FPURegister fd, FPURegister fs);
void round_l_d(FPURegister fd, FPURegister fs);
void floor_l_s(FPURegister fd, FPURegister fs);
void floor_l_d(FPURegister fd, FPURegister fs);
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
@ -503,32 +730,60 @@ class Assembler : public Malloced {
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
void bc1t(int16_t offset, uint16_t cc = 0);
void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
}
// Class for scoping postponing the trampoline pool generation.
class BlockTrampolinePoolScope {
public:
explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockTrampolinePool();
}
~BlockTrampolinePoolScope() {
assem_->EndBlockTrampolinePool();
}
private:
Assembler* assem_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
};
// Debugging.
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
// Mark address of a debug break slot.
void RecordDebugBreakSlot();
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
// Use --code-comments to enable.
void RecordComment(const char* msg);
void RecordPosition(int pos);
void RecordStatementPosition(int pos);
bool WriteRecordedPositions();
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
int32_t pc_offset() const { return pc_ - buffer_; }
int32_t current_position() const { return current_position_; }
int32_t current_statement_position() const {
return current_statement_position_;
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
bool can_peephole_optimize(int instructions) {
if (!allow_peephole_optimization_) return false;
if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
}
// Postpone the generation of the trampoline pool for the specified number of
// instructions.
void BlockTrampolinePoolFor(int instructions);
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
@ -537,12 +792,9 @@ class Assembler : public Malloced {
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
protected:
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions.
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
void instr_at_put(byte* pc, Instr instr) {
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@ -551,7 +803,34 @@ class Assembler : public Malloced {
}
// Check if an instruction is a branch of some kind.
bool is_branch(Instr instr);
static bool IsBranch(Instr instr);
static bool IsNop(Instr instr, unsigned int type);
static bool IsPop(Instr instr);
static bool IsPush(Instr instr);
static bool IsLwRegFpOffset(Instr instr);
static bool IsSwRegFpOffset(Instr instr);
static bool IsLwRegFpNegOffset(Instr instr);
static bool IsSwRegFpNegOffset(Instr instr);
static Register GetRt(Instr instr);
static int32_t GetBranchOffset(Instr instr);
static bool IsLw(Instr instr);
static int16_t GetLwOffset(Instr instr);
static Instr SetLwOffset(Instr instr, int16_t offset);
static bool IsSw(Instr instr);
static Instr SetSwOffset(Instr instr, int16_t offset);
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
void CheckTrampolinePool(bool force_emit = false);
protected:
bool emit_debug_code() const { return emit_debug_code_; }
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
int target_at(int32_t pos);
@ -560,11 +839,28 @@ class Assembler : public Malloced {
void target_at_put(int32_t pos, int32_t target_pos);
// Say if we need to relocate with this mode.
bool MustUseAt(RelocInfo::Mode rmode);
bool MustUseReg(RelocInfo::Mode rmode);
// Record reloc info for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
// Block the emission of the trampoline pool before pc_offset.
void BlockTrampolinePoolBefore(int pc_offset) {
if (no_trampoline_pool_before_ < pc_offset)
no_trampoline_pool_before_ = pc_offset;
}
void StartBlockTrampolinePool() {
trampoline_pool_blocked_nesting_++;
}
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
}
bool is_trampoline_pool_blocked() const {
return trampoline_pool_blocked_nesting_ > 0;
}
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
@ -585,6 +881,22 @@ class Assembler : public Malloced {
static const int kGap = 32;
byte* pc_; // The program counter - moves forward.
// Repeated checking whether the trampoline pool should be emitted is rather
// expensive. By default we only check again once a number of instructions
// has been generated.
static const int kCheckConstIntervalInst = 32;
static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
int next_buffer_check_; // pc offset of next buffer check.
// Emission of the trampoline pool may be blocked in some code sequences.
int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
int no_trampoline_pool_before_; // Block emission before this pc offset.
// Keep track of the last emitted pool to guarantee a maximal distance.
int last_trampoline_pool_end_; // pc offset of the end of the last pool.
// Relocation information generation.
// Each relocation is encoded as a variable size value.
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@ -593,16 +905,11 @@ class Assembler : public Malloced {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Source position information.
int current_position_;
int current_statement_position_;
int written_position_;
int written_statement_position_;
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
inline void CheckTrampolinePoolQuick();
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
@ -619,6 +926,13 @@ class Assembler : public Malloced {
uint16_t sa = 0,
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
uint16_t msb,
uint16_t lsb,
SecondaryField func);
void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
@ -633,6 +947,12 @@ class Assembler : public Malloced {
FPURegister fd,
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPUControlRegister fs,
SecondaryField func = NULLSF);
void GenInstrImmediate(Opcode opcode,
Register rs,
@ -651,6 +971,8 @@ class Assembler : public Malloced {
void GenInstrJump(Opcode opcode,
uint32_t address);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels.
void print(Label* L);
@ -658,8 +980,85 @@ class Assembler : public Malloced {
void link_to(Label* L, Label* appendix);
void next(Label* L);
// One trampoline consists of:
// - space for trampoline slots,
// - space for labels.
//
// Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
// Space for trampoline slots preceeds space for labels. Each label is of one
// instruction size, so total amount for labels is equal to
// label_count * kInstrSize.
class Trampoline {
public:
Trampoline(int start, int slot_count, int label_count) {
start_ = start;
next_slot_ = start;
free_slot_count_ = slot_count;
next_label_ = start + slot_count * 2 * kInstrSize;
free_label_count_ = label_count;
end_ = next_label_ + (label_count - 1) * kInstrSize;
}
int start() {
return start_;
}
int end() {
return end_;
}
int take_slot() {
int trampoline_slot = next_slot_;
ASSERT(free_slot_count_ > 0);
free_slot_count_--;
next_slot_ += 2 * kInstrSize;
return trampoline_slot;
}
int take_label() {
int label_pos = next_label_;
ASSERT(free_label_count_ > 0);
free_label_count_--;
next_label_ += kInstrSize;
return label_pos;
}
private:
int start_;
int end_;
int next_slot_;
int free_slot_count_;
int next_label_;
int free_label_count_;
};
int32_t get_label_entry(int32_t pos, bool next_pool = true);
int32_t get_trampoline_entry(int32_t pos, bool next_pool = true);
static const int kSlotsPerTrampoline = 2304;
static const int kLabelsPerTrampoline = 8;
static const int kTrampolineInst =
2 * kSlotsPerTrampoline + kLabelsPerTrampoline;
static const int kTrampolineSize = kTrampolineInst * kInstrSize;
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
static const int kMaxDistBetweenPools =
kMaxBranchOffset - 2 * kTrampolineSize;
List<Trampoline> trampolines_;
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
PositionsRecorder positions_recorder_;
bool allow_peephole_optimization_;
bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) {
assembler->CheckBuffer();
}
};
} } // namespace v8::internal

View File

@ -33,6 +33,8 @@
#include "codegen-inl.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "runtime.h"
namespace v8 {
@ -59,11 +61,21 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
}
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@ -74,111 +86,43 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody
// Registers:
// a0: entry_address
// a1: function
// a2: reveiver_pointer
// a3: argc
// s0: argv
//
// Stack:
// arguments slots
// handler frame
// entry frame
// callee saved registers + ra
// 4 args slots
// args
// Clear the context before we push it when entering the JS frame.
__ li(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
__ EnterInternalFrame();
// Set up the context from the function argument.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ li(s6, Operand(roots_address));
// Push the function and the receiver onto the stack.
__ MultiPushReversed(a1.bit() | a2.bit());
// Copy arguments to the stack in a loop.
// a3: argc
// s0: argv, ie points to first arg
Label loop, entry;
__ sll(t0, a3, kPointerSizeLog2);
__ add(t2, s0, t0);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// t2 points past last arg.
__ bind(&loop);
__ lw(t0, MemOperand(s0)); // Read next parameter.
__ addiu(s0, s0, kPointerSize);
__ lw(t0, MemOperand(t0)); // Dereference handle.
__ Push(t0); // Push parameter.
__ bind(&entry);
__ Branch(ne, &loop, s0, Operand(t2));
// Registers:
// a0: entry_address
// a1: function
// a2: reveiver_pointer
// a3: argc
// s0: argv
// s6: roots_address
//
// Stack:
// arguments
// receiver
// function
// arguments slots
// handler frame
// entry frame
// callee saved registers + ra
// 4 args slots
// args
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
__ mov(s1, t4);
__ mov(s2, t4);
__ mov(s3, t4);
__ mov(s4, s4);
__ mov(s5, t4);
// s6 holds the root address. Do not clobber.
// s7 is cp. Do not init.
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
UNIMPLEMENTED_MIPS();
__ break_(0x164);
} else {
ParameterCount actual(a0);
__ InvokeFunction(a1, actual, CALL_FUNCTION);
}
__ LeaveInternalFrame();
__ Jump(ra);
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@ -194,7 +138,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x201);
}

752
src/mips/code-stubs-mips.cc Normal file
View File

@ -0,0 +1,752 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
#include "code-stubs.h"
#include "codegen-inl.h"
#include "regexp-macro-assembler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
class ConvertToDoubleStub : public CodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
Register source_reg,
Register scratch_reg)
: result1_(result_reg_1),
result2_(result_reg_2),
source_(source_reg),
zeros_(scratch_reg) { }
private:
Register result1_;
Register result2_;
Register source_;
Register zeros_;
// Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 14> {};
Major MajorKey() { return ConvertToDouble; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return result1_.code() +
(result2_.code() << 4) +
(source_.code() << 8) +
(zeros_.code() << 12);
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "ConvertToDoubleStub"; }
#ifdef DEBUG
void Print() { PrintF("ConvertToDoubleStub\n"); }
#endif
};
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
class FloatingPointHelper : public AllStatic {
public:
enum Destination {
kFPURegisters,
kCoreRegisters
};
// Loads smis from a0 and a1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
// is floating point registers FPU must be supported. If core registers are
// requested when FPU is supported f12 and f14 will be scratched.
static void LoadSmis(MacroAssembler* masm,
Destination destination,
Register scratch1,
Register scratch2);
// Loads objects from a0 and a1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
// is floating point registers FPU must be supported. If core registers are
// requested when FPU is supported f12 and f14 will still be scratched. If
// either a0 or a1 is not a number (not smi and not heap number object) the
// not_number label is jumped to with a0 and a1 intact.
static void LoadOperands(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
// Loads the number from object into dst as a 32-bit integer if possible. If
// the object is not a 32-bit integer control continues at the label
// not_int32. If FPU is supported double_scratch is used but not scratch2.
static void LoadNumberAsInteger(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
FPURegister double_scratch,
Label* not_int32);
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
FPURegister dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
};
void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void FloatingPointHelper::LoadOperands(
MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* slow) {
UNIMPLEMENTED_MIPS();
}
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
FPURegister dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number) {
UNIMPLEMENTED_MIPS();
}
void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
FPURegister double_scratch,
Label* not_int32) {
UNIMPLEMENTED_MIPS();
}
// See comment for class, this does NOT work for int32's that are in Smi range.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void EmitNanCheck(MacroAssembler* masm, Condition cc) {
UNIMPLEMENTED_MIPS();
}
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
bool object_is_smi,
Label* not_found) {
UNIMPLEMENTED_MIPS();
}
void NumberToStringStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
// On exit, v0 is 0, positive, or negative (smi) to indicate the result
// of the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
// We fall into this code if the operands were Smis, but the result was
// not (eg. overflow). We branch into this code (to the not_smi label) if
// the operands were not both Smi. The operands are in lhs and rhs.
// To call the C-implemented binary fp operation routines we need to end up
// with the double precision floating point operands in a0 and a1 (for the
// value in a1) and a2 and a3 (for the value in a0).
void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
Register rhs,
const Builtins::JavaScript& builtin) {
UNIMPLEMENTED_MIPS();
}
// For bitwise ops where the inputs are not both Smis we here try to determine
// whether both inputs are either Smis or at least heap numbers that can be
// represented by a 32 bit signed value. We truncate towards zero as required
// by the ES spec. If this is the case we do the bitwise op and see if the
// result is a Smi. If so, great, otherwise we try to find a heap number to
// write the answer into (either by allocating or by overwriting).
// On entry the operands are in lhs (x) and rhs (y). (Result = x op y).
// On exit the result is in v0.
void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
Register lhs,
Register rhs) {
UNIMPLEMENTED_MIPS();
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
GenericBinaryOpStub stub(key, type_info);
return stub.GetCode();
}
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
return stub.GetCode();
}
void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
MacroAssembler* masm) {
UNIMPLEMENTED();
}
void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
const char* TypeRecordingBinaryOpStub::GetName() {
UNIMPLEMENTED_MIPS();
return name_;
}
void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
bool smi_operands,
Label* not_numbers,
Label* gc_required) {
UNIMPLEMENTED_MIPS();
}
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
// heap number cannot be allocated the code jumps to the lable gc_required.
void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
Label* gc_required,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm,
Register result,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* gc_required) {
UNIMPLEMENTED_MIPS();
}
void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
UNIMPLEMENTED_MIPS();
return Runtime::kAbort;
}
void StackCheckStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
bool CEntryStub::NeedsImmovableCode() {
return true;
}
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
UNIMPLEMENTED_MIPS();
}
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
UNIMPLEMENTED_MIPS();
}
void CEntryStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
UNIMPLEMENTED_MIPS();
}
// Uses registers a0 to t0. Expected input is
// object in a0 (or at sp+1*kPointerSize) and function in
// a1 (or at sp), depending on whether or not
// args_in_registers() is true.
void InstanceofStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
UNIMPLEMENTED_MIPS();
return name_;
}
int CompareStub::MinorKey() {
UNIMPLEMENTED_MIPS();
return 0;
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
UNIMPLEMENTED_MIPS();
}
// -------------------------------------------------------------------------
// StringCharFromCodeGenerator
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
UNIMPLEMENTED_MIPS();
}
// -------------------------------------------------------------------------
// StringCharAtGenerator
void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StringCharAtGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
UNIMPLEMENTED_MIPS();
}
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersLong adds too much
// overhead. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharactersLong(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
int flags);
// Probe the symbol table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the symbol table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii) {
UNIMPLEMENTED_MIPS();
}
enum CopyCharactersFlags {
COPY_ASCII = 1,
DEST_ALWAYS_ALIGNED = 2
};
void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
int flags) {
UNIMPLEMENTED_MIPS();
}
void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
Label* not_found) {
UNIMPLEMENTED_MIPS();
}
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
UNIMPLEMENTED_MIPS();
}
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character) {
UNIMPLEMENTED_MIPS();
}
void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Register hash) {
UNIMPLEMENTED_MIPS();
}
void SubStringStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register right,
Register left,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4) {
UNIMPLEMENTED_MIPS();
}
void StringCompareStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StringAddStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements_map,
Register elements,
Register scratch1,
Register scratch2,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

511
src/mips/code-stubs-mips.h Normal file
View File

@ -0,0 +1,511 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_CODE_STUBS_ARM_H_
#define V8_MIPS_CODE_STUBS_ARM_H_
#include "ic-inl.h"
namespace v8 {
namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
explicit TranscendentalCacheStub(TranscendentalCache::Type type)
: type_(type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
};
class ToBooleanStub: public CodeStub {
public:
explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
private:
Register tos_;
Major MajorKey() { return ToBoolean; }
int MinorKey() { return tos_.code(); }
};
class GenericBinaryOpStub : public CodeStub {
public:
static const int kUnknownIntValue = -1;
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
Register lhs,
Register rhs,
int constant_rhs = kUnknownIntValue)
: op_(op),
mode_(mode),
lhs_(lhs),
rhs_(rhs),
constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
name_(NULL) { }
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
lhs_(LhsRegister(RegisterBits::decode(key))),
rhs_(RhsRegister(RegisterBits::decode(key))),
constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
runtime_operands_type_(type_info),
name_(NULL) { }
private:
Token::Value op_;
OverwriteMode mode_;
Register lhs_;
Register rhs_;
int constant_rhs_;
bool specialized_on_rhs_;
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
static const int kMaxKnownRhs = 0x40000000;
static const int kKnownRhsKeyBits = 6;
// Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 6> {};
class TypeInfoBits: public BitField<int, 8, 3> {};
class RegisterBits: public BitField<bool, 11, 1> {};
class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
(lhs_.is(a1) && rhs_.is(a0)));
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| KnownIntBits::encode(MinorKeyForKnownInt())
| TypeInfoBits::encode(runtime_operands_type_)
| RegisterBits::encode(lhs_.is(a0));
}
void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm,
Register lhs,
Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
Register rhs,
const Builtins::JavaScript& builtin);
void GenerateTypeTransition(MacroAssembler* masm);
static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
if (constant_rhs == kUnknownIntValue) return false;
if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
if (op == Token::MOD) {
if (constant_rhs <= 1) return false;
if (constant_rhs <= 10) return true;
if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
return false;
}
return false;
}
int MinorKeyForKnownInt() {
if (!specialized_on_rhs_) return 0;
if (constant_rhs_ <= 10) return constant_rhs_ + 1;
ASSERT(IsPowerOf2(constant_rhs_));
int key = 12;
int d = constant_rhs_;
while ((d & 1) == 0) {
key++;
d >>= 1;
}
ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
return key;
}
int KnownBitsForMinorKey(int key) {
if (!key) return 0;
if (key <= 11) return key - 1;
int d = 1;
while (key != 12) {
key--;
d <<= 1;
}
return d;
}
Register LhsRegister(bool lhs_is_a0) {
return lhs_is_a0 ? a0 : a1;
}
Register RhsRegister(bool lhs_is_a0) {
return lhs_is_a0 ? a1 : a0;
}
bool HasSmiSmiFastPath() {
return op_ != Token::DIV;
}
bool ShouldGenerateSmiCode() {
return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
const char* GetName();
virtual void FinishCode(Code* code) {
code->set_binary_op_type(runtime_operands_type_);
}
#ifdef DEBUG
void Print() {
if (!specialized_on_rhs_) {
PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
} else {
PrintF("GenericBinaryOpStub (%s by %d)\n",
Token::String(op_),
constant_rhs_);
}
}
#endif
};
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
UNIMPLEMENTED_MIPS();
}
TypeRecordingBinaryOpStub(
int key,
TRBinaryOpIC::TypeInfo operands_type,
TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_fpu_(FPUBits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) { }
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_fpu_;
// Operand type information determined at runtime.
TRBinaryOpIC::TypeInfo operands_type_;
TRBinaryOpIC::TypeInfo result_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("TypeRecordingBinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
TRBinaryOpIC::GetName(operands_type_));
}
#endif
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class FPUBits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return TypeRecordingBinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FPUBits::encode(use_fpu_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiSmiOperation(MacroAssembler* masm);
void GenerateFPOperation(MacroAssembler* masm,
bool smi_operands,
Label* not_numbers,
Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm,
Register result,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* gc_required);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return TRBinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
code->set_type_recording_binary_op_type(operands_type_);
code->set_type_recording_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
void Generate(MacroAssembler* masm);
// Should the stub check whether arguments are strings?
bool string_check_;
};
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public CodeStub {
public:
StringCompareStub() { }
// Compare two flat ASCII strings and returns result in v0.
// Does not use the stack.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
private:
Major MajorKey() { return StringCompare; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
Register scratch,
Register scratch2)
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch),
sign_(scratch2) { }
private:
Register the_int_;
Register the_heap_number_;
Register scratch_;
Register sign_;
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
| ScratchRegisterBits::encode(scratch_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
bool object_is_smi,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM and MIPS.
class RegExpCEntryStub: public CodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "RegExpCEntryStub"; }
};
// Generate code the to load an element from a pixel array. The receiver is
// assumed to not be a smi and to have elements, the caller must guarantee this
// precondition. If the receiver does not have elements that are pixel arrays,
// the generated code jumps to not_pixel_array. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated . If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements_map,
Register elements,
Register scratch1,
Register scratch2,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range);
} } // namespace v8::internal
#endif // V8_MIPS_CODE_STUBS_ARM_H_

View File

@ -29,6 +29,8 @@
#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
#define V8_MIPS_CODEGEN_MIPS_INL_H_
#include "virtual-frame-mips.h"
namespace v8 {
namespace internal {
@ -42,26 +44,18 @@ void DeferredCode::Jump() {
}
// Note: this has been hacked for submisson. Mips branches require two
// additional operands: Register src1, const Operand& src2.
void DeferredCode::Branch(Condition cond) {
__ Branch(&entry_label_, cond, zero_reg, Operand(0));
}
void Reference::GetValueAndSpill() {
GetValue();
}
void CodeGenerator::VisitAndSpill(Statement* statement) {
Visit(statement);
}
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
VisitStatements(statements);
}
void CodeGenerator::LoadAndSpill(Expression* expression) {
Load(expression);
}
#undef __
} } // namespace v8::internal

File diff suppressed because it is too large Load Diff

View File

@ -29,17 +29,37 @@
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
#include "ast.h"
#include "code-stubs-mips.h"
#include "ic-inl.h"
namespace v8 {
namespace internal {
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
static const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
// Not using floating-point coprocessor instructions. This flag is raised when
// -msoft-float is passed to the compiler.
static const bool IsMipsSoftFloatABI = true;
#else
static const bool IsMipsSoftFloatABI = true;
#endif
// Forward declarations
class CompilationInfo;
class DeferredCode;
class JumpTarget;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
// -----------------------------------------------------------------------------
@ -101,7 +121,12 @@ class Reference BASE_EMBEDDED {
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
void SetValue(InitState init_state, WriteBarrierCharacter wb);
// This is in preparation for something that uses the reference on the stack.
// If we need this reference afterwards get then dup it now. Otherwise mark
// it as used.
inline void DupIfPersist();
private:
CodeGenerator* cgen_;
@ -126,31 +151,24 @@ class CodeGenState BASE_EMBEDDED {
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state has its own typeof state and pair of branch
// labels.
CodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
virtual ~CodeGenState();
TypeofState typeof_state() const { return typeof_state_; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
virtual JumpTarget* true_target() const { return NULL; }
virtual JumpTarget* false_target() const { return NULL; }
protected:
inline CodeGenerator* owner() { return owner_; }
inline CodeGenState* previous() const { return previous_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
JumpTarget* true_target_;
JumpTarget* false_target_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
@ -158,6 +176,50 @@ class CodeGenState BASE_EMBEDDED {
};
class ConditionCodeGenState : public CodeGenState {
public:
// Create a code generator state based on a code generator's current
// state. The new state has its own pair of branch labels.
ConditionCodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target);
virtual JumpTarget* true_target() const { return true_target_; }
virtual JumpTarget* false_target() const { return false_target_; }
private:
JumpTarget* true_target_;
JumpTarget* false_target_;
};
class TypeInfoCodeGenState : public CodeGenState {
public:
TypeInfoCodeGenState(CodeGenerator* owner,
Slot* slot_number,
TypeInfo info);
virtual ~TypeInfoCodeGenState();
virtual JumpTarget* true_target() const { return previous()->true_target(); }
virtual JumpTarget* false_target() const {
return previous()->false_target();
}
private:
Slot* slot_;
TypeInfo old_type_info_;
};
// -------------------------------------------------------------------------
// Arguments allocation mode
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
EAGER_ARGUMENTS_ALLOCATION,
LAZY_ARGUMENTS_ALLOCATION
};
// -----------------------------------------------------------------------------
// CodeGenerator
@ -173,9 +235,7 @@ class CodeGenerator: public AstVisitor {
SECONDARY
};
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(CompilationInfo* info);
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
@ -185,6 +245,9 @@ class CodeGenerator: public AstVisitor {
Code::Flags flags,
CompilationInfo* info);
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@ -194,7 +257,9 @@ class CodeGenerator: public AstVisitor {
bool is_toplevel,
Handle<Script> script);
static void RecordPositions(MacroAssembler* masm, int pos);
static bool RecordPositions(MacroAssembler* masm,
int pos,
bool right_here = false);
// Accessors
MacroAssembler* masm() { return masm_; }
@ -216,73 +281,105 @@ class CodeGenerator: public AstVisitor {
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
TypeInfo type_info(Slot* slot) {
int index = NumberOfSlot(slot);
if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
return (*type_info_)[index];
}
TypeInfo set_type_info(Slot* slot, TypeInfo info) {
int index = NumberOfSlot(slot);
ASSERT(index >= kInvalidSlotNumber);
if (index != kInvalidSlotNumber) {
TypeInfo previous_value = (*type_info_)[index];
(*type_info_)[index] = info;
return previous_value;
}
return TypeInfo::Unknown();
}
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
static const int kUnknownIntValue = -1;
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceLength = 7;
// If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
// This is in correlation with the padding in MacroAssembler::Abort.
return FLAG_debug_code ? 45 : 20;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 9;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
// Magic number 5: instruction count after patched map load:
// li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
return Isolate::Current()->inlined_write_barrier_size() + 5;
}
private:
// Type of a member function that generates inline code for a native function.
typedef void (CodeGenerator::*InlineFunctionGenerator)
(ZoneList<Expression*>*);
static const InlineFunctionGenerator kInlineFunctionGenerators[];
// Construction/Destruction.
explicit CodeGenerator(MacroAssembler* masm);
// Accessors.
inline bool is_eval();
inline Scope* scope();
inline bool is_strict_mode();
inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
static const int kInvalidSlotNumber = -1;
int NumberOfSlot(Slot* slot);
// State
bool has_cc() const { return cc_reg_ != cc_always; }
TypeofState typeof_state() const { return state_->typeof_state(); }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
// We don't track loop nesting level on mips yet.
int loop_nesting() const { return 0; }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitSlot(Slot* node);
#define DEF_VISIT(type) \
void Visit##type(type* node);
virtual void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
inline void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
void Generate(CompilationInfo* info);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
// target (which can not be done more than once). The return value should
// be in v0.
void GenerateReturnSequence();
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();
// Store the arguments object and allocate it if necessary.
void StoreArgumentsObject(bool initial);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
MemOperand ContextOperand(Register context, int index) const {
return MemOperand(context, Context::SlotOffset(index));
}
MemOperand SlotOperand(Slot* slot, Register tmp);
// Expressions
MemOperand GlobalObject() const {
return ContextOperand(cp, Context::GLOBAL_INDEX);
}
MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
Register tmp,
Register tmp2,
JumpTarget* slow);
void LoadCondition(Expression* x,
JumpTarget* true_target,
@ -290,35 +387,113 @@ class CodeGenerator: public AstVisitor {
bool force_cc);
void Load(Expression* x);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
inline void LoadAndSpill(Expression* expression);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
// non-existing properties of the global object, so we must make it
// look like an explicit property access, instead of an access
// through the context chain.
void LoadTypeofExpression(Expression* x);
// Store a keyed property. Key and receiver are on the stack and the value is
// in a0. Result is returned in r0.
void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
// Support for loading from local/global variables and arguments
// whose location is known unless they are shadowed by
// eval-introduced bindings. Generates no code for unsupported slot
// types and therefore expects to fall through to the slow jump target.
void EmitDynamicLoadFromSlotFastCase(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow,
JumpTarget* done);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
int nargs;
};
// Support for compiling assignment expressions.
void EmitSlotAssignment(Assignment* node);
void EmitNamedPropertyAssignment(Assignment* node);
void EmitKeyedPropertyAssignment(Assignment* node);
// Load a named property, returning it in v0. The receiver is passed on the
// stack, and remains there.
void EmitNamedLoad(Handle<String> name, bool is_contextual);
// Store to a named property. If the store is contextual, value is passed on
// the frame and consumed. Otherwise, receiver and value are passed on the
// frame and consumed. The result is returned in v0.
void EmitNamedStore(Handle<String> name, bool is_contextual);
// Load a keyed property, leaving it in v0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad();
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
GenerateInlineSmi inline_smi,
int known_rhs =
GenericBinaryOpStub::kUnknownIntValue);
void VirtualFrameBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int known_rhs =
GenericBinaryOpStub::kUnknownIntValue);
void SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode);
void Comparison(Condition cc,
Expression* left,
Expression* right,
bool strict = false);
void CallWithArguments(ZoneList<Expression*>* arguments,
CallFunctionFlags flags,
int position);
// An optimized implementation of expressions of the form
// x.apply(y, arguments). We call x the applicand and y the receiver.
// The optimization avoids allocating an arguments object if possible.
void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
// Control flow
void Branch(bool if_true, JumpTarget* target);
void CheckStack();
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function based on the shared function info.
void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
bool pretenure);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
@ -338,10 +513,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateCharFromCode(ZoneList<Expression*>* args);
void GenerateStringCharFromCode(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@ -358,14 +536,38 @@ class CodeGenerator: public AstVisitor {
void GenerateStringAdd(ZoneList<Expression*>* args);
void GenerateSubString(ZoneList<Expression*>* args);
void GenerateStringCompare(ZoneList<Expression*>* args);
void GenerateIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args);
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast swapping of elements.
void GenerateSwapElements(ZoneList<Expression*>* args);
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
void GenerateMathSqrt(ZoneList<Expression*>* args);
void GenerateMathLog(ZoneList<Expression*>* args);
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
@ -389,9 +591,6 @@ class CodeGenerator: public AstVisitor {
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
List<DeferredCode*> deferred_;
// Assembler
@ -404,7 +603,9 @@ class CodeGenerator: public AstVisitor {
RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
int loop_nesting_;
Vector<TypeInfo>* type_info_;
// Jump targets
BreakTarget function_return_;
@ -413,14 +614,15 @@ class CodeGenerator: public AstVisitor {
// to some unlinking code).
bool function_return_is_shadowed_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
friend class Isolate;
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
friend class InlineRuntimeFunctionsTable;
friend class LCodeGen;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};

View File

@ -31,10 +31,8 @@
#include "constants-mips.h"
namespace assembler {
namespace mips {
namespace v8i = v8::internal;
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
@ -102,20 +100,20 @@ int Registers::Number(const char* name) {
}
const char* FPURegister::names_[kNumFPURegister] = {
const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
// List of alias names which can be used when referring to MIPS registers.
const FPURegister::RegisterAlias FPURegister::aliases_[] = {
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, NULL}
};
const char* FPURegister::Name(int creg) {
const char* FPURegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegister)) {
if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
@ -124,9 +122,9 @@ const char* FPURegister::Name(int creg) {
}
int FPURegister::Number(const char* name) {
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
@ -149,8 +147,8 @@ int FPURegister::Number(const char* name) {
// -----------------------------------------------------------------------------
// Instruction
bool Instruction::IsForbiddenInBranchDelay() {
int op = OpcodeFieldRaw();
bool Instruction::IsForbiddenInBranchDelay() const {
const int op = OpcodeFieldRaw();
switch (op) {
case J:
case JAL:
@ -189,13 +187,18 @@ bool Instruction::IsForbiddenInBranchDelay() {
}
bool Instruction::IsLinkingInstruction() {
int op = OpcodeFieldRaw();
bool Instruction::IsLinkingInstruction() const {
const int op = OpcodeFieldRaw();
switch (op) {
case JAL:
case BGEZAL:
case BLTZAL:
return true;
case REGIMM:
switch (RtFieldRaw()) {
case BGEZAL:
case BLTZAL:
return true;
default:
return false;
};
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
@ -209,7 +212,7 @@ bool Instruction::IsLinkingInstruction() {
}
bool Instruction::IsTrap() {
bool Instruction::IsTrap() const {
if (OpcodeFieldRaw() != SPECIAL) {
return false;
} else {
@ -264,6 +267,9 @@ Instruction::Type Instruction::InstructionType() const {
case TLTU:
case TEQ:
case TNE:
case MOVZ:
case MOVN:
case MOVCI:
return kRegisterType;
default:
UNREACHABLE();
@ -272,13 +278,23 @@ Instruction::Type Instruction::InstructionType() const {
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
case CLZ:
return kRegisterType;
default:
UNREACHABLE();
};
break;
case SPECIAL3:
switch (FunctionFieldRaw()) {
case INS:
case EXT:
return kRegisterType;
default:
UNREACHABLE();
};
break;
case COP1: // Coprocessor instructions
switch (FunctionFieldRaw()) {
switch (RsFieldRawNoAssert()) {
case BC1: // branch on coprocessor condition
return kImmediateType;
default:
@ -304,10 +320,17 @@ Instruction::Type Instruction::InstructionType() const {
case BLEZL:
case BGTZL:
case LB:
case LH:
case LWL:
case LW:
case LBU:
case LHU:
case LWR:
case SB:
case SH:
case SWL:
case SW:
case SWR:
case LWC1:
case LDC1:
case SWC1:
@ -323,6 +346,7 @@ Instruction::Type Instruction::InstructionType() const {
return kUnsupported;
}
} } // namespace assembler::mips
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

View File

@ -28,15 +28,25 @@
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
#include "checks.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
#else
#define UNIMPLEMENTED_MIPS()
#endif
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
#ifdef _MIPS_ARCH_MIPS32R2
#define mips32r2 1
#else
#define mips32r2 0
#endif
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
@ -44,8 +54,8 @@
// Volume II: The MIPS32 Instruction Set
// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
namespace assembler {
namespace mips {
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers and FPURegister.
@ -61,9 +71,18 @@ static const int kNumSimuRegisters = 35;
static const int kPCRegister = 34;
// Number coprocessor registers.
static const int kNumFPURegister = 32;
static const int kNumFPURegisters = 32;
static const int kInvalidFPURegister = -1;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
static const int kFCSRRegister = 31;
static const int kInvalidFPUControlRegister = -1;
static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
// FCSR constants.
static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
static const uint32_t kFCSRFlagShift = 2;
// Helper functions for converting between register numbers and names.
class Registers {
public:
@ -88,7 +107,7 @@ class Registers {
};
// Helper functions for converting between register numbers and names.
class FPURegister {
class FPURegisters {
public:
// Return the name of the register.
static const char* Name(int reg);
@ -103,7 +122,7 @@ class FPURegister {
private:
static const char* names_[kNumFPURegister];
static const char* names_[kNumFPURegisters];
static const RegisterAlias aliases_[];
};
@ -136,6 +155,7 @@ static const int kSaShift = 6;
static const int kSaBits = 5;
static const int kFunctionShift = 0;
static const int kFunctionBits = 6;
static const int kLuiShift = 16;
static const int kImm16Shift = 0;
static const int kImm16Bits = 16;
@ -146,6 +166,14 @@ static const int kFsShift = 11;
static const int kFsBits = 5;
static const int kFtShift = 16;
static const int kFtBits = 5;
static const int kFdShift = 6;
static const int kFdBits = 5;
static const int kFCccShift = 8;
static const int kFCccBits = 3;
static const int kFBccShift = 18;
static const int kFBccBits = 3;
static const int kFBtrueShift = 16;
static const int kFBtrueBits = 1;
// ----- Miscellianous useful masks.
// Instruction bit masks.
@ -159,9 +187,9 @@ static const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
static const int kFunctionFieldMask =
((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
static const int HIMask = 0xffff << 16;
static const int LOMask = 0xffff;
static const int signMask = 0x80000000;
static const int kHiMask = 0xffff << 16;
static const int kLoMask = 0xffff;
static const int kSignMask = 0x80000000;
// ----- MIPS Opcodes and Function Fields.
@ -194,12 +222,20 @@ enum Opcode {
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
LB = ((4 << 3) + 0) << kOpcodeShift,
LH = ((4 << 3) + 1) << kOpcodeShift,
LWL = ((4 << 3) + 2) << kOpcodeShift,
LW = ((4 << 3) + 3) << kOpcodeShift,
LBU = ((4 << 3) + 4) << kOpcodeShift,
LHU = ((4 << 3) + 5) << kOpcodeShift,
LWR = ((4 << 3) + 6) << kOpcodeShift,
SB = ((5 << 3) + 0) << kOpcodeShift,
SH = ((5 << 3) + 1) << kOpcodeShift,
SWL = ((5 << 3) + 2) << kOpcodeShift,
SW = ((5 << 3) + 3) << kOpcodeShift,
SWR = ((5 << 3) + 6) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
@ -216,9 +252,12 @@ enum SecondaryField {
SLLV = ((0 << 3) + 4),
SRLV = ((0 << 3) + 6),
SRAV = ((0 << 3) + 7),
MOVCI = ((0 << 3) + 1),
JR = ((1 << 3) + 0),
JALR = ((1 << 3) + 1),
MOVZ = ((1 << 3) + 2),
MOVN = ((1 << 3) + 3),
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
@ -250,6 +289,12 @@ enum SecondaryField {
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
CLZ = ((4 << 3) + 0),
CLO = ((4 << 3) + 1),
// SPECIAL3 Encoding of Function Field.
EXT = ((0 << 3) + 0),
INS = ((0 << 3) + 4),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
@ -259,8 +304,10 @@ enum SecondaryField {
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
CFC1 = ((0 << 3) + 2) << 21,
MFHC1 = ((0 << 3) + 3) << 21,
MTC1 = ((0 << 3) + 4) << 21,
CTC1 = ((0 << 3) + 6) << 21,
MTHC1 = ((0 << 3) + 7) << 21,
BC1 = ((1 << 3) + 0) << 21,
S = ((2 << 3) + 0) << 21,
@ -269,14 +316,46 @@ enum SecondaryField {
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
ROUND_L_S = ((1 << 3) + 0),
TRUNC_L_S = ((1 << 3) + 1),
CEIL_L_S = ((1 << 3) + 2),
FLOOR_L_S = ((1 << 3) + 3),
ROUND_W_S = ((1 << 3) + 4),
TRUNC_W_S = ((1 << 3) + 5),
CEIL_W_S = ((1 << 3) + 6),
FLOOR_W_S = ((1 << 3) + 7),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
ADD_D = ((0 << 3) + 0),
SUB_D = ((0 << 3) + 1),
MUL_D = ((0 << 3) + 2),
DIV_D = ((0 << 3) + 3),
SQRT_D = ((0 << 3) + 4),
ABS_D = ((0 << 3) + 5),
MOV_D = ((0 << 3) + 6),
NEG_D = ((0 << 3) + 7),
ROUND_L_D = ((1 << 3) + 0),
TRUNC_L_D = ((1 << 3) + 1),
CEIL_L_D = ((1 << 3) + 2),
FLOOR_L_D = ((1 << 3) + 3),
ROUND_W_D = ((1 << 3) + 4),
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
C_F_D = ((6 << 3) + 0),
C_UN_D = ((6 << 3) + 1),
C_EQ_D = ((6 << 3) + 2),
C_UEQ_D = ((6 << 3) + 3),
C_OLT_D = ((6 << 3) + 4),
C_ULT_D = ((6 << 3) + 5),
C_OLE_D = ((6 << 3) + 6),
C_ULE_D = ((6 << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
@ -293,7 +372,7 @@ enum SecondaryField {
// the 'U' prefix is used to specify unsigned comparisons.
enum Condition {
// Any value < 0 is considered no_condition.
no_condition = -1,
kNoCondition = -1,
overflow = 0,
no_overflow = 1,
@ -321,12 +400,59 @@ enum Condition {
eq = equal,
not_zero = not_equal,
ne = not_equal,
nz = not_equal,
sign = negative,
not_sign = positive,
mi = negative,
pl = positive,
hi = Ugreater,
ls = Uless_equal,
ge = greater_equal,
lt = less,
gt = greater,
le = less_equal,
hs = Ugreater_equal,
lo = Uless,
al = cc_always,
cc_default = no_condition
cc_default = kNoCondition
};
// Returns the equivalent of !cc.
// Negation of the default kNoCondition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
inline Condition NegateCondition(Condition cc) {
ASSERT(cc != cc_always);
return static_cast<Condition>(cc ^ 1);
}
inline Condition ReverseCondition(Condition cc) {
switch (cc) {
case Uless:
return Ugreater;
case Ugreater:
return Uless;
case Ugreater_equal:
return Uless_equal;
case Uless_equal:
return Ugreater_equal;
case less:
return greater;
case greater:
return less;
case greater_equal:
return less_equal;
case less_equal:
return greater_equal;
default:
return cc;
};
}
// ----- Coprocessor conditions.
enum FPUCondition {
F, // False
@ -340,6 +466,46 @@ enum FPUCondition {
};
// -----------------------------------------------------------------------------
// Hints.
// Branch hints are not used on the MIPS. They are defined so that they can
// appear in shared function signatures, but will be ignored in MIPS
// implementations.
enum Hint {
no_hint = 0
};
inline Hint NegateHint(Hint hint) {
return no_hint;
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
// These constants are declared in assembler-mips.cc, as they use named
// registers and other constants.
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
extern const Instr kPopInstruction;
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
extern const Instr kPushInstruction;
// sw(r, MemOperand(sp, 0))
extern const Instr kPushRegPattern;
// lw(r, MemOperand(sp, 0))
extern const Instr kPopRegPattern;
extern const Instr kLwRegFpOffsetPattern;
extern const Instr kSwRegFpOffsetPattern;
extern const Instr kLwRegFpNegOffsetPattern;
extern const Instr kSwRegFpNegOffsetPattern;
// A mask for the Rt register for push, pop, lw, sw instructions.
extern const Instr kRtMask;
extern const Instr kLwSwInstrTypeMask;
extern const Instr kLwSwInstrArgumentMask;
extern const Instr kLwSwOffsetMask;
// Break 0xfffff, reserved for redirected real time call.
const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
@ -348,10 +514,10 @@ const Instr nopInstr = 0;
class Instruction {
public:
enum {
kInstructionSize = 4,
kInstructionSizeLog2 = 2,
kInstrSize = 4,
kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
// always the value of the current instruction being exectued.
// always the value of the current instruction being executed.
kPCReadOffset = 0
};
@ -388,45 +554,64 @@ class Instruction {
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeField() const {
inline Opcode OpcodeValue() const {
return static_cast<Opcode>(
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
inline int RsField() const {
inline int RsValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRsShift + kRsBits - 1, kRsShift);
}
inline int RtField() const {
inline int RtValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRtShift + kRtBits - 1, kRtShift);
}
inline int RdField() const {
inline int RdValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kRdShift + kRdBits - 1, kRdShift);
}
inline int SaField() const {
inline int SaValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
inline int FunctionField() const {
inline int FunctionValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
inline int FsField() const {
return Bits(kFsShift + kRsBits - 1, kFsShift);
inline int FdValue() const {
return Bits(kFdShift + kFdBits - 1, kFdShift);
}
inline int FtField() const {
return Bits(kFtShift + kRsBits - 1, kFtShift);
inline int FsValue() const {
return Bits(kFsShift + kFsBits - 1, kFsShift);
}
inline int FtValue() const {
return Bits(kFtShift + kFtBits - 1, kFtShift);
}
// Float Compare condition code instruction bits.
inline int FCccValue() const {
return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
}
// Float Branch condition code instruction bits.
inline int FBccValue() const {
return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
}
// Float Branch true/false instruction bit.
inline int FBtrueValue() const {
return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
}
// Return the fields at their original place in the instruction encoding.
@ -440,6 +625,11 @@ class Instruction {
return InstructionBits() & kRsFieldMask;
}
// Same as above function, but safe to call within InstructionType().
inline int RsFieldRawNoAssert() const {
return InstructionBits() & kRsFieldMask;
}
inline int RtFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
@ -461,37 +651,37 @@ class Instruction {
}
// Get the secondary field according to the opcode.
inline int SecondaryField() const {
inline int SecondaryValue() const {
Opcode op = OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
return FunctionField();
return FunctionValue();
case COP1:
return RsField();
return RsValue();
case REGIMM:
return RtField();
return RtValue();
default:
return NULLSF;
}
}
inline int32_t Imm16Field() const {
inline int32_t Imm16Value() const {
ASSERT(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm26Field() const {
inline int32_t Imm26Value() const {
ASSERT(InstructionType() == kJumpType);
return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
}
// Say if the instruction should not be used in a branch delay slot.
bool IsForbiddenInBranchDelay();
bool IsForbiddenInBranchDelay() const;
// Say if the instruction 'links'. eg: jal, bal.
bool IsLinkingInstruction();
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
bool IsTrap();
bool IsTrap() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
@ -510,16 +700,24 @@ class Instruction {
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
static const int kArgsSlotsSize = 4 * Instruction::kInstructionSize;
static const int kArgsSlotsSize = 4 * Instruction::kInstrSize;
static const int kArgsSlotsNum = 4;
// C/C++ argument slots size.
static const int kCArgsSlotsSize = 4 * Instruction::kInstrSize;
// JS argument slots size.
static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
// Assembly builtins argument slots size.
static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
static const int kDoubleAlignment = 2 * 8;
static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
static const int kDoubleAlignmentBits = 3;
static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
} } // namespace assembler::mips
} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_

View File

@ -39,16 +39,25 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "cpu.h"
#include "macro-assembler.h"
#include "simulator.h" // For cache flushing.
namespace v8 {
namespace internal {
void CPU::Setup() {
// Nothing to do.
CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
cpu_features->Probe(true);
if (!cpu_features->IsSupported(FPU) || Serializer::enabled()) {
V8::DisableCrankshaft();
}
}
void CPU::FlushICache(void* start, size_t size) {
#ifdef __mips
#if !defined (USE_SIMULATOR)
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall
@ -58,7 +67,14 @@ void CPU::FlushICache(void* start, size_t size) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
#endif // #ifdef __mips
#else // USE_SIMULATOR.
// Not generating mips instructions for C-code. This means that we are
// building a mips emulator based target. We should notify the simulator
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
#endif // USE_SIMULATOR.
}
@ -68,6 +84,7 @@ void CPU::DebugBreak() {
#endif // #ifdef __mips
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

View File

@ -38,8 +38,10 @@ namespace v8 {
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
UNIMPLEMENTED_MIPS();
return false;
}
@ -54,18 +56,33 @@ void BreakLocationIterator::ClearDebugBreakAtReturn() {
}
// A debug break in the exit code is identified by a call.
// A debug break in the exit code is identified by the JS frame exit code
// having been patched with li/call psuedo-instrunction (liu/ori/jalr)
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
UNIMPLEMENTED_MIPS();
return false;
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
UNIMPLEMENTED_MIPS();
return false;
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
UNIMPLEMENTED_MIPS();
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
UNIMPLEMENTED_MIPS();
}
#define __ ACCESS_MASM(masm)
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@ -106,12 +123,23 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
void Debug::GenerateSlot(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
UNIMPLEMENTED_MIPS();
}

View File

@ -0,0 +1,91 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "safepoint-table.h"
// Note: this file was taken from the X64 version. ARM has a partially working
// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
int Deoptimizer::table_entry_size_ = 10;
int Deoptimizer::patch_size() {
const int kCallInstructionSizeInWords = 3;
return kCallInstructionSizeInWords * Assembler::kInstrSize;
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
UNIMPLEMENTED();
}
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
void Deoptimizer::DoComputeOsrOutputFrame() {
UNIMPLEMENTED();
}
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
UNIMPLEMENTED();
}
void Deoptimizer::EntryGenerator::Generate() {
UNIMPLEMENTED();
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UNIMPLEMENTED();
}
} } // namespace v8::internal

View File

@ -34,10 +34,9 @@
// NameConverter converter;
// Disassembler d(converter);
// for (byte_* pc = begin; pc < end;) {
// char buffer[128];
// buffer[0] = '\0';
// byte_* prev_pc = pc;
// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
// v8::internal::EmbeddedVector<char, 256> buffer;
// byte* prev_pc = pc;
// pc += d.InstructionDecode(buffer, pc);
// printf("%p %08x %s\n",
// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
// }
@ -59,17 +58,13 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "constants-mips.h"
#include "mips/constants-mips.h"
#include "disasm.h"
#include "macro-assembler.h"
#include "platform.h"
namespace assembler {
namespace mips {
namespace v8i = v8::internal;
namespace v8 {
namespace internal {
//------------------------------------------------------------------------------
@ -99,7 +94,7 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintCRegister(int creg);
void PrintFPURegister(int freg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
@ -107,6 +102,9 @@ class Decoder {
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
void PrintSd(Instruction* instr);
void PrintBc(Instruction* instr);
void PrintCc(Instruction* instr);
void PrintFunction(Instruction* instr);
void PrintSecondaryField(Instruction* instr);
void PrintUImm16(Instruction* instr);
@ -119,7 +117,7 @@ class Decoder {
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
int FormatCRegister(Instruction* instr, const char* option);
int FormatFPURegister(Instruction* instr, const char* option);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
@ -166,84 +164,100 @@ void Decoder::PrintRegister(int reg) {
void Decoder::PrintRs(Instruction* instr) {
int reg = instr->RsField();
int reg = instr->RsValue();
PrintRegister(reg);
}
void Decoder::PrintRt(Instruction* instr) {
int reg = instr->RtField();
int reg = instr->RtValue();
PrintRegister(reg);
}
void Decoder::PrintRd(Instruction* instr) {
int reg = instr->RdField();
int reg = instr->RdValue();
PrintRegister(reg);
}
// Print the Cregister name according to the active name converter.
void Decoder::PrintCRegister(int creg) {
Print(converter_.NameOfXMMRegister(creg));
// Print the FPUregister name according to the active name converter.
void Decoder::PrintFPURegister(int freg) {
Print(converter_.NameOfXMMRegister(freg));
}
void Decoder::PrintFs(Instruction* instr) {
int creg = instr->RsField();
PrintCRegister(creg);
int freg = instr->RsValue();
PrintFPURegister(freg);
}
void Decoder::PrintFt(Instruction* instr) {
int creg = instr->RtField();
PrintCRegister(creg);
int freg = instr->RtValue();
PrintFPURegister(freg);
}
void Decoder::PrintFd(Instruction* instr) {
int creg = instr->RdField();
PrintCRegister(creg);
int freg = instr->RdValue();
PrintFPURegister(freg);
}
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
int sa = instr->SaField();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", sa);
int sa = instr->SaValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
}
// Print the integer value of the rd field, (when it is not used as reg).
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
}
// Print the integer value of the cc field for the bc1t/f instructions.
void Decoder::PrintBc(Instruction* instr) {
int cc = instr->FBccValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
}
// Print the integer value of the cc field for the FP compare instructions.
void Decoder::PrintCc(Instruction* instr) {
int cc = instr->FCccValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
}
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
int32_t imm = instr->Imm16Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%u", imm);
int32_t imm = instr->Imm16Value();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
}
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
int32_t imm = ((instr->Imm16Field())<<16)>>16;
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", imm);
int32_t imm = ((instr->Imm16Value())<<16)>>16;
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
int32_t imm = instr->Imm16Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"0x%x", imm);
int32_t imm = instr->Imm16Value();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintImm26(Instruction* instr) {
int32_t imm = instr->Imm26Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", imm);
int32_t imm = instr->Imm26Value();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
@ -254,8 +268,8 @@ void Decoder::PrintCode(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
out_buffer_pos_ +=
v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"0x%05x (%d)", code, code);
break;
}
case TGE:
@ -266,7 +280,7 @@ void Decoder::PrintCode(Instruction* instr) {
case TNE: {
int32_t code = instr->Bits(15, 6);
out_buffer_pos_ +=
v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
break;
}
default: // Not a break or trap instruction.
@ -285,15 +299,15 @@ void Decoder::PrintInstructionName(Instruction* instr) {
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
if (format[1] == 's') { // 'rs: Rs register
int reg = instr->RsField();
int reg = instr->RsValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 't') { // 'rt: rt register
int reg = instr->RtField();
int reg = instr->RtValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'rd: rd register
int reg = instr->RdField();
int reg = instr->RdValue();
PrintRegister(reg);
return 2;
}
@ -302,21 +316,21 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
}
// Handle all Cregister based formatting in this function to reduce the
// Handle all FPUregister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatCRegister(Instruction* instr, const char* format) {
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'f');
if (format[1] == 's') { // 'fs: fs register
int reg = instr->RsField();
PrintCRegister(reg);
int reg = instr->FsValue();
PrintFPURegister(reg);
return 2;
} else if (format[1] == 't') { // 'ft: ft register
int reg = instr->RtField();
PrintCRegister(reg);
int reg = instr->FtValue();
PrintFPURegister(reg);
return 2;
} else if (format[1] == 'd') { // 'fd: fd register
int reg = instr->RdField();
PrintCRegister(reg);
int reg = instr->FdValue();
PrintFPURegister(reg);
return 2;
}
UNREACHABLE();
@ -359,12 +373,31 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
case 'r': { // 'r: registers
return FormatRegister(instr, format);
}
case 'f': { // 'f: Cregisters
return FormatCRegister(instr, format);
case 'f': { // 'f: FPUregisters
return FormatFPURegister(instr, format);
}
case 's': { // 'sa
ASSERT(STRING_STARTS_WITH(format, "sa"));
PrintSa(instr);
switch (format[1]) {
case 'a': {
ASSERT(STRING_STARTS_WITH(format, "sa"));
PrintSa(instr);
return 2;
}
case 'd': {
ASSERT(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
return 2;
}
}
}
case 'b': { // 'bc - Special for bc1 cc field.
ASSERT(STRING_STARTS_WITH(format, "bc"));
PrintBc(instr);
return 2;
}
case 'C': { // 'Cc - Special for c.xx.d cc field.
ASSERT(STRING_STARTS_WITH(format, "Cc"));
PrintCc(instr);
return 2;
}
};
@ -401,45 +434,160 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case COP1: // Coprocessor instructions
switch (instr->RsFieldRaw()) {
case BC1: // branch on coprocessor condition
case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
break;
case MFC1:
Format(instr, "mfc1 'rt, 'fs");
Format(instr, "mfc1 'rt, 'fs");
break;
case MFHC1:
Format(instr, "mfhc1 rt, 'fs");
Format(instr, "mfhc1 'rt, 'fs");
break;
case MTC1:
Format(instr, "mtc1 'rt, 'fs");
Format(instr, "mtc1 'rt, 'fs");
break;
// These are called "fs" too, although they are not FPU registers.
case CTC1:
Format(instr, "ctc1 'rt, 'fs");
break;
case CFC1:
Format(instr, "cfc1 'rt, 'fs");
break;
case MTHC1:
Format(instr, "mthc1 rt, 'fs");
Format(instr, "mthc1 'rt, 'fs");
break;
case D:
switch (instr->FunctionFieldRaw()) {
case ADD_D:
Format(instr, "add.d 'fd, 'fs, 'ft");
break;
case SUB_D:
Format(instr, "sub.d 'fd, 'fs, 'ft");
break;
case MUL_D:
Format(instr, "mul.d 'fd, 'fs, 'ft");
break;
case DIV_D:
Format(instr, "div.d 'fd, 'fs, 'ft");
break;
case ABS_D:
Format(instr, "abs.d 'fd, 'fs");
break;
case MOV_D:
Format(instr, "mov.d 'fd, 'fs");
break;
case NEG_D:
Format(instr, "neg.d 'fd, 'fs");
break;
case SQRT_D:
Format(instr, "sqrt.d 'fd, 'fs");
break;
case CVT_W_D:
Format(instr, "cvt.w.d 'fd, 'fs");
break;
case CVT_L_D: {
if (mips32r2) {
Format(instr, "cvt.l.d 'fd, 'fs");
} else {
Unknown(instr);
}
break;
}
case TRUNC_W_D:
Format(instr, "trunc.w.d 'fd, 'fs");
break;
case TRUNC_L_D: {
if (mips32r2) {
Format(instr, "trunc.l.d 'fd, 'fs");
} else {
Unknown(instr);
}
break;
}
case ROUND_W_D:
Format(instr, "round.w.d 'fd, 'fs");
break;
case FLOOR_W_D:
Format(instr, "floor.w.d 'fd, 'fs");
break;
case CEIL_W_D:
Format(instr, "ceil.w.d 'fd, 'fs");
break;
case CVT_S_D:
Format(instr, "cvt.s.d 'fd, 'fs");
break;
case C_F_D:
Format(instr, "c.f.d 'fs, 'ft, 'Cc");
break;
case C_UN_D:
Format(instr, "c.un.d 'fs, 'ft, 'Cc");
break;
case C_EQ_D:
Format(instr, "c.eq.d 'fs, 'ft, 'Cc");
break;
case C_UEQ_D:
Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
break;
case C_OLT_D:
Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
break;
case C_ULT_D:
Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
break;
case C_OLE_D:
Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
break;
case C_ULE_D:
Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
break;
default:
Format(instr, "unknown.cop1.d");
break;
}
break;
case S:
case D:
UNIMPLEMENTED_MIPS();
break;
case W:
switch (instr->FunctionFieldRaw()) {
case CVT_S_W:
UNIMPLEMENTED_MIPS();
case CVT_S_W: // Convert word to float (single).
Format(instr, "cvt.s.w 'fd, 'fs");
break;
case CVT_D_W: // Convert word to double.
Format(instr, "cvt.d.w 'fd, 'fs");
Format(instr, "cvt.d.w 'fd, 'fs");
break;
default:
UNREACHABLE();
};
}
break;
case L:
switch (instr->FunctionFieldRaw()) {
case CVT_D_L: {
if (mips32r2) {
Format(instr, "cvt.d.l 'fd, 'fs");
} else {
Unknown(instr);
}
break;
}
case CVT_S_L: {
if (mips32r2) {
Format(instr, "cvt.s.l 'fd, 'fs");
} else {
Unknown(instr);
}
break;
}
default:
UNREACHABLE();
}
break;
case PS:
UNIMPLEMENTED_MIPS();
break;
break;
default:
UNREACHABLE();
};
}
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
@ -456,7 +604,15 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "sll 'rd, 'rt, 'sa");
break;
case SRL:
Format(instr, "srl 'rd, 'rt, 'sa");
if (instr->RsValue() == 0) {
Format(instr, "srl 'rd, 'rt, 'sa");
} else {
if (mips32r2) {
Format(instr, "rotr 'rd, 'rt, 'sa");
} else {
Unknown(instr);
}
}
break;
case SRA:
Format(instr, "sra 'rd, 'rt, 'sa");
@ -465,7 +621,15 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "sllv 'rd, 'rt, 'rs");
break;
case SRLV:
Format(instr, "srlv 'rd, 'rt, 'rs");
if (instr->SaValue() == 0) {
Format(instr, "srlv 'rd, 'rt, 'rs");
} else {
if (mips32r2) {
Format(instr, "rotrv 'rd, 'rt, 'rs");
} else {
Unknown(instr);
}
}
break;
case SRAV:
Format(instr, "srav 'rd, 'rt, 'rs");
@ -504,9 +668,9 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "and 'rd, 'rs, 'rt");
break;
case OR:
if (0 == instr->RsField()) {
if (0 == instr->RsValue()) {
Format(instr, "mov 'rd, 'rt");
} else if (0 == instr->RtField()) {
} else if (0 == instr->RtValue()) {
Format(instr, "mov 'rd, 'rs");
} else {
Format(instr, "or 'rd, 'rs, 'rt");
@ -545,27 +709,79 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case TNE:
Format(instr, "tne 'rs, 'rt, code: 'code");
break;
case MOVZ:
Format(instr, "movz 'rd, 'rs, 'rt");
break;
case MOVN:
Format(instr, "movn 'rd, 'rs, 'rt");
break;
case MOVCI:
if (instr->Bit(16)) {
Format(instr, "movt 'rd, 'rs, 'Cc");
} else {
Format(instr, "movf 'rd, 'rs, 'Cc");
}
break;
default:
UNREACHABLE();
};
}
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
Format(instr, "mul 'rd, 'rs, 'rt");
break;
case CLZ:
Format(instr, "clz 'rd, 'rs");
break;
default:
UNREACHABLE();
};
}
break;
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
case INS: {
if (mips32r2) {
Format(instr, "ins 'rt, 'rs, 'sd, 'sa");
} else {
Unknown(instr);
}
break;
}
case EXT: {
if (mips32r2) {
Format(instr, "ext 'rt, 'rs, 'sd, 'sa");
} else {
Unknown(instr);
}
break;
}
default:
UNREACHABLE();
}
break;
default:
UNREACHABLE();
};
}
}
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
// ------------- REGIMM class.
case COP1:
switch (instr->RsFieldRaw()) {
case BC1:
if (instr->FBtrueValue()) {
Format(instr, "bc1t 'bc, 'imm16u");
} else {
Format(instr, "bc1f 'bc, 'imm16u");
}
break;
default:
UNREACHABLE();
};
break; // Case COP1.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
@ -582,8 +798,8 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
};
break; // case REGIMM
}
break; // Case REGIMM.
// ------------- Branch instructions.
case BEQ:
Format(instr, "beq 'rs, 'rt, 'imm16u");
@ -626,18 +842,39 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case LB:
Format(instr, "lb 'rt, 'imm16s('rs)");
break;
case LH:
Format(instr, "lh 'rt, 'imm16s('rs)");
break;
case LWL:
Format(instr, "lwl 'rt, 'imm16s('rs)");
break;
case LW:
Format(instr, "lw 'rt, 'imm16s('rs)");
break;
case LBU:
Format(instr, "lbu 'rt, 'imm16s('rs)");
break;
case LHU:
Format(instr, "lhu 'rt, 'imm16s('rs)");
break;
case LWR:
Format(instr, "lwr 'rt, 'imm16s('rs)");
break;
case SB:
Format(instr, "sb 'rt, 'imm16s('rs)");
break;
case SH:
Format(instr, "sh 'rt, 'imm16s('rs)");
break;
case SWL:
Format(instr, "swl 'rt, 'imm16s('rs)");
break;
case SW:
Format(instr, "sw 'rt, 'imm16s('rs)");
break;
case SWR:
Format(instr, "swr 'rt, 'imm16s('rs)");
break;
case LWC1:
Format(instr, "lwc1 'ft, 'imm16s('rs)");
break;
@ -645,10 +882,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "ldc1 'ft, 'imm16s('rs)");
break;
case SWC1:
Format(instr, "swc1 'rt, 'imm16s('fs)");
Format(instr, "swc1 'ft, 'imm16s('rs)");
break;
case SDC1:
Format(instr, "sdc1 'rt, 'imm16s('fs)");
Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
default:
UNREACHABLE();
@ -675,7 +912,7 @@ void Decoder::DecodeTypeJump(Instruction* instr) {
int Decoder::InstructionDecode(byte_* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
switch (instr->InstructionType()) {
@ -695,11 +932,11 @@ int Decoder::InstructionDecode(byte_* instr_ptr) {
UNSUPPORTED_MIPS();
}
}
return Instruction::kInstructionSize;
return Instruction::kInstrSize;
}
} } // namespace assembler::mips
} } // namespace v8::internal
@ -707,8 +944,7 @@ int Decoder::InstructionDecode(byte_* instr_ptr) {
namespace disasm {
namespace v8i = v8::internal;
using v8::internal::byte_;
const char* NameConverter::NameOfAddress(byte_* addr) const {
v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
@ -722,12 +958,12 @@ const char* NameConverter::NameOfConstant(byte_* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
return assembler::mips::Registers::Name(reg);
return v8::internal::Registers::Name(reg);
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
return assembler::mips::FPURegister::Name(reg);
return v8::internal::FPURegisters::Name(reg);
}
@ -755,13 +991,13 @@ Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte_* instruction) {
assembler::mips::Decoder d(converter_, buffer);
v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
// The MIPS assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
UNIMPLEMENTED_MIPS();
return -1;
}
@ -779,6 +1015,7 @@ void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
}
}
#undef UNSUPPORTED
} // namespace disasm

View File

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -37,57 +37,9 @@ namespace v8 {
namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
Address ExitFrame::ComputeStackPointer(Address fp) {
Address sp = fp + ExitFrameConstants::kSPDisplacement;
const int offset = ExitFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp + offset);
bool is_debug_exit = code->IsSmi();
if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
}
return sp;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
// Do nothing
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
}
Address InternalFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
return fp;
}

View File

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -40,16 +40,17 @@ namespace internal {
static const int kNumRegs = 32;
static const RegList kJSCallerSaved =
1 << 2 | // v0
1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7; // a3
static const int kNumJSCallerSaved = 4;
static const int kNumJSCallerSaved = 5;
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
int JSCallerSavedCode(int n);
@ -64,6 +65,18 @@ static const RegList kCalleeSaved =
static const int kNumCalleeSaved = 11;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(mips): Only 8 registers may actually be sufficient. Revisit.
static const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
static const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
@ -88,15 +101,14 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
// Exit frames have a debug marker on the stack.
static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer.
static const int kDebugMarkOffset = -1 * kPointerSize;
// Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
static const int kCodeOffset = -1 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
// TODO(mips): Use a patched sp value on the stack instead.
// A marker of 0 indicates that double registers are saved.
static const int kMarkerOffset = -2 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
@ -126,6 +138,8 @@ class StandardFrameConstants : public AllStatic {
static const int kCArgsSlotsSize = 4 * kPointerSize;
// JS argument slots size.
static const int kJSArgsSlotsSize = 0 * kPointerSize;
// Assembly builtins argument slots size.
static const int kBArgsSlotsSize = 0 * kPointerSize;
};
@ -159,6 +173,7 @@ inline Object* JavaScriptFrame::function_slot_object() const {
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal
#endif

View File

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,18 +29,55 @@
#if defined(V8_TARGET_ARCH_MIPS)
// Note on Mips implementation:
//
// The result_register() for mips is the 'v0' register, which is defined
// by the ABI to contain function return values. However, the first
// parameter to a function is defined to be 'a0'. So there are many
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
#include "mips/code-stubs-mips.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
// function.
//
// The live registers are:
// o a1: the JS function object being called (ie, ourselves)
// o cp: our context
// o fp: our caller's frame pointer
// o sp: stack pointer
// o ra: return address
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-mips.h for its layout.
void FullCodeGenerator::Generate(CompilationInfo* info) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::ClearAccumulator() {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
UNIMPLEMENTED_MIPS();
}
@ -50,47 +87,165 @@ void FullCodeGenerator::EmitReturnSequence() {
}
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::ApplyTOS(Expression::Context context) {
void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DoTest(Expression::Context context) {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Heap::RootListIndex index) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EffectContext::DropAndPlug(int count,
Register reg) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
int count,
Register reg) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::TestContext::DropAndPlug(int count,
Register reg) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::AccumulatorValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
Label* materialize_false) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EffectContext::Plug(bool flag) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::TestContext::Plug(bool flag) const {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DoTest(Label* if_true,
Label* if_false,
Label* fall_through) {
UNIMPLEMENTED_MIPS();
}
// Original prototype for mips, needs arch-indep change. Leave out for now.
// void FullCodeGenerator::Split(Condition cc,
// Register lhs,
// const Operand& rhs,
// Label* if_true,
// Label* if_false,
// Label* fall_through) {
void FullCodeGenerator::Split(Condition cc,
Label* if_true,
Label* if_false,
Label* fall_through) {
UNIMPLEMENTED_MIPS();
}
MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
UNIMPLEMENTED_MIPS();
return MemOperand(zero_reg, 0); // UNIMPLEMENTED RETURN
return MemOperand(zero_reg, 0);
}
@ -99,6 +254,14 @@ void FullCodeGenerator::Move(Register destination, Slot* source) {
}
void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
bool should_normalize,
Label* if_true,
Label* if_false) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
@ -107,6 +270,13 @@ void FullCodeGenerator::Move(Slot* dst,
}
void FullCodeGenerator::EmitDeclaration(Variable* variable,
Variable::Mode mode,
FunctionLiteral* function) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
UNIMPLEMENTED_MIPS();
}
@ -117,7 +287,18 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
UNIMPLEMENTED_MIPS();
}
@ -127,8 +308,32 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
void FullCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Slot* slot,
Label* slow) {
UNIMPLEMENTED_MIPS();
return MemOperand(zero_reg, 0);
}
void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
Slot* slot,
TypeofState typeof_state,
Label* slow,
Label* done) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
Slot* slot,
TypeofState typeof_state,
Label* slow) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
UNIMPLEMENTED_MIPS();
}
@ -163,14 +368,28 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Expression* left,
Expression* right) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
OverwriteMode mode) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
Token::Value op) {
UNIMPLEMENTED_MIPS();
}
@ -189,13 +408,21 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
Handle<Object> name,
RelocInfo::Mode mode) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Expression* key,
RelocInfo::Mode mode) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
UNIMPLEMENTED_MIPS();
}
@ -211,6 +438,202 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
UNIMPLEMENTED_MIPS();
}
@ -226,25 +649,52 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
UNIMPLEMENTED_MIPS();
}
bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
Expression* left,
Expression* right,
Label* if_true,
Label* if_false,
Label* fall_through) {
UNIMPLEMENTED_MIPS();
return false;
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNIMPLEMENTED_MIPS();
}
Register FullCodeGenerator::result_register() { return v0; }
Register FullCodeGenerator::result_register() {
UNIMPLEMENTED_MIPS();
return v0;
}
Register FullCodeGenerator::context_register() { return cp; }
Register FullCodeGenerator::context_register() {
UNIMPLEMENTED_MIPS();
return cp;
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {

View File

@ -32,6 +32,7 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "codegen-inl.h"
#include "code-stubs.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@ -52,7 +53,7 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
UNIMPLEMENTED_MIPS();
}
@ -65,6 +66,12 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
@ -74,51 +81,22 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
// Registers:
// a2: name
// ra: return address
// Get the receiver of the function from the stack.
__ lw(a3, MemOperand(sp, argc*kPointerSize));
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ MultiPush(a2.bit() | a3.bit());
// Call the entry.
__ li(a0, Operand(2));
__ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
CEntryStub stub(1);
__ CallStub(&stub);
// Move result to r1 and leave the internal frame.
__ mov(a1, v0);
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
Label invoke, global;
__ lw(a2, MemOperand(sp, argc * kPointerSize));
__ andi(t0, a2, kSmiTagMask);
__ Branch(eq, &invoke, t0, Operand(zero_reg));
__ GetObjectType(a2, a3, a3);
__ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
__ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
// Patch the receiver on the stack.
__ bind(&global);
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
__ sw(a2, MemOperand(sp, argc * kPointerSize));
// Invoke the function.
ParameterCount actual(argc);
__ bind(&invoke);
__ InvokeFunction(a1, actual, JUMP_FUNCTION);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
@ -137,19 +115,35 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
void LoadIC::ClearInlinedVersion(Address address) {}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
UNIMPLEMENTED_MIPS();
return false;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
bool LoadIC::PatchInlinedContextualLoad(Address address,
Object* map,
Object* cell,
bool is_dont_delete) {
UNIMPLEMENTED_MIPS();
return false;
}
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
UNIMPLEMENTED_MIPS();
return false;
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
UNIMPLEMENTED_MIPS();
return false;
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
UNIMPLEMENTED_MIPS();
return false;
}
@ -162,6 +156,11 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@ -172,7 +171,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
UNIMPLEMENTED_MIPS();
}
@ -187,7 +193,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode) {
UNIMPLEMENTED_MIPS();
}
@ -201,8 +208,37 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode) {
UNIMPLEMENTED_MIPS();
}
#undef __
Condition CompareIC::ComputeCondition(Token::Value op) {
UNIMPLEMENTED_MIPS();
return kNoCondition;
}
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
UNIMPLEMENTED_MIPS();
}
void PatchInlinedSmiCode(Address address) {
// Currently there is no smi inlining in the MIPS full code generator.
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

View File

@ -43,41 +43,19 @@ namespace internal {
#define __ ACCESS_MASM(cgen()->masm())
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
(cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
void JumpTarget::DoJump() {
ASSERT(cgen()->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
ASSERT(cgen()->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There already a frame expectation at the target.
ASSERT(direction_ == BIDIRECTIONAL);
cgen()->frame()->MergeTo(entry_frame_);
cgen()->DeleteFrame();
} else {
// Use the current frame as the expected one at the target if necessary.
if (entry_frame_ == NULL) {
entry_frame_ = cgen()->frame();
RegisterFile empty;
cgen()->SetFrame(NULL, &empty);
} else {
cgen()->frame()->MergeTo(entry_frame_);
cgen()->DeleteFrame();
}
// The predicate is_linked() should be made true. Its implementation
// detects the presence of a frame pointer in the reaching_frames_ list.
if (!is_linked()) {
reaching_frames_.Add(NULL);
ASSERT(is_linked());
}
}
__ b(&entry_label_);
__ nop(); // Branch delay slot nop.
UNIMPLEMENTED_MIPS();
}
// Original prototype for mips, needs arch-indep change. Leave out for now.
// void JumpTarget::DoBranch(Condition cc, Hint ignored,
// Register src1, const Operand& src2) {
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
UNIMPLEMENTED_MIPS();
}
@ -89,85 +67,12 @@ void JumpTarget::Call() {
void JumpTarget::DoBind() {
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (cgen()->has_valid_frame()) {
// If there is a current frame we can use it on the fall through.
if (entry_frame_ == NULL) {
entry_frame_ = new VirtualFrame(cgen()->frame());
} else {
ASSERT(cgen()->frame()->Equals(entry_frame_));
}
} else {
// If there is no current frame we must have an entry frame which we can
// copy.
ASSERT(entry_frame_ != NULL);
RegisterFile empty;
cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
// The predicate is_linked() should be made false. Its implementation
// detects the presence (or absence) of frame pointers in the
// reaching_frames_ list. If we inserted a bogus frame to make
// is_linked() true, remove it now.
if (is_linked()) {
reaching_frames_.Clear();
}
__ bind(&entry_label_);
}
void BreakTarget::Jump() {
// On ARM we do not currently emit merge code for jumps, so we need to do
// it explicitly here. The only merging necessary is to drop extra
// statement state from the stack.
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
cgen()->frame()->Drop(count);
DoJump();
}
void BreakTarget::Jump(Result* arg) {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Bind() {
#ifdef DEBUG
// All the forward-reaching frames should have been adjusted at the
// jumps to this target.
for (int i = 0; i < reaching_frames_.length(); i++) {
ASSERT(reaching_frames_[i] == NULL ||
reaching_frames_[i]->height() == expected_height_);
}
#endif
// Drop leftover statement state from the frame before merging, even
// on the fall through. This is so we can bind the return target
// with state on the frame.
if (cgen()->has_valid_frame()) {
int count = cgen()->frame()->height() - expected_height_;
// On ARM we do not currently emit merge code at binding sites, so we need
// to do it explicitly here. The only merging necessary is to drop extra
// statement state from the stack.
cgen()->frame()->Drop(count);
}
DoBind();
}
void BreakTarget::Bind(Result* arg) {
UNIMPLEMENTED_MIPS();
}
#undef __
#undef BRANCH_ARGS_CHECK
} } // namespace v8::internal

View File

@ -25,53 +25,41 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#if defined(V8_TARGET_ARCH_MIPS)
#include "mips/lithium-mips.h"
#include "codegen-inl.h"
#include "fast-codegen.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
// Note: this file was taken from the X64 version. ARM has a partially working
// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Forward declarations.
class LDeferredCode;
Register FastCodeGenerator::accumulator0() { return no_reg; }
Register FastCodeGenerator::accumulator1() { return no_reg; }
Register FastCodeGenerator::scratch0() { return no_reg; }
Register FastCodeGenerator::scratch1() { return no_reg; }
Register FastCodeGenerator::receiver_reg() { return no_reg; }
Register FastCodeGenerator::context_reg() { return no_reg; }
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode() {
UNIMPLEMENTED();
return false;
}
void FastCodeGenerator::Generate(CompilationInfo* info) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitBitOr() {
UNIMPLEMENTED_MIPS();
}
#undef __
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
};
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_

304
src/mips/lithium-mips.h Normal file
View File

@ -0,0 +1,304 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_LITHIUM_MIPS_H_
#define V8_MIPS_LITHIUM_MIPS_H_
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
// Note: this file was taken from the X64 version. ARM has a partially working
// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
// Forward declarations.
class LCodeGen;
class LEnvironment;
class Translation;
class LInstruction: public ZoneObject {
public:
LInstruction() { }
virtual ~LInstruction() { }
// Predicates should be generated by macro as in lithium-ia32.h.
virtual bool IsLabel() const {
UNIMPLEMENTED();
return false;
}
virtual bool IsOsrEntry() const {
UNIMPLEMENTED();
return false;
}
LPointerMap* pointer_map() const {
UNIMPLEMENTED();
return NULL;
}
bool HasPointerMap() const {
UNIMPLEMENTED();
return false;
}
void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
LEnvironment* environment() const {
UNIMPLEMENTED();
return NULL;
}
bool HasEnvironment() const {
UNIMPLEMENTED();
return NULL;
}
virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
virtual bool IsControl() const {
UNIMPLEMENTED();
return false;
}
void MarkAsCall() { UNIMPLEMENTED(); }
void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const {
UNIMPLEMENTED();
return false;
}
bool IsMarkedAsSaveDoubles() const {
UNIMPLEMENTED();
return false;
}
virtual bool HasResult() const {
UNIMPLEMENTED();
return false;
}
virtual LOperand* result() {
UNIMPLEMENTED();
return NULL;
}
virtual int InputCount() {
UNIMPLEMENTED();
return 0;
}
virtual LOperand* InputAt(int i) {
UNIMPLEMENTED();
return NULL;
}
virtual int TempCount() {
UNIMPLEMENTED();
return 0;
}
virtual LOperand* TempAt(int i) {
UNIMPLEMENTED();
return NULL;
}
LOperand* FirstInput() {
UNIMPLEMENTED();
return NULL;
}
LOperand* Output() {
UNIMPLEMENTED();
return NULL;
}
#ifdef DEBUG
void VerifyCall() { UNIMPLEMENTED(); }
#endif
};
class LGap: public LInstruction {
public:
explicit LGap(HBasicBlock* block) { }
HBasicBlock* block() const {
UNIMPLEMENTED();
return NULL;
}
enum InnerPosition {
BEFORE,
START,
END,
AFTER,
FIRST_INNER_POSITION = BEFORE,
LAST_INNER_POSITION = AFTER
};
LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
UNIMPLEMENTED();
return NULL;
}
LParallelMove* GetParallelMove(InnerPosition pos) {
UNIMPLEMENTED();
return NULL;
}
};
class LLabel: public LGap {
public:
explicit LLabel(HBasicBlock* block) : LGap(block) { }
};
class LOsrEntry: public LInstruction {
public:
// Function could be generated by a macro as in lithium-ia32.h.
static LOsrEntry* cast(LInstruction* instr) {
UNIMPLEMENTED();
return NULL;
}
LOperand** SpilledRegisterArray() {
UNIMPLEMENTED();
return NULL;
}
LOperand** SpilledDoubleRegisterArray() {
UNIMPLEMENTED();
return NULL;
}
void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
UNIMPLEMENTED();
}
void MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand) {
UNIMPLEMENTED();
}
};
class LChunk: public ZoneObject {
public:
explicit LChunk(CompilationInfo* info, HGraph* graph) { }
HGraph* graph() const {
UNIMPLEMENTED();
return NULL;
}
CompilationInfo* info() const { return NULL; }
const ZoneList<LPointerMap*>* pointer_maps() const {
UNIMPLEMENTED();
return NULL;
}
LOperand* GetNextSpillSlot(bool double_slot) {
UNIMPLEMENTED();
return NULL;
}
LConstantOperand* DefineConstantOperand(HConstant* constant) {
UNIMPLEMENTED();
return NULL;
}
LLabel* GetLabel(int block_id) const {
UNIMPLEMENTED();
return NULL;
}
const ZoneList<LInstruction*>* instructions() const {
UNIMPLEMENTED();
return NULL;
}
int GetParameterStackSlot(int index) const {
UNIMPLEMENTED();
return 0;
}
void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
LGap* GetGapAt(int index) const {
UNIMPLEMENTED();
return NULL;
}
bool IsGapAt(int index) const {
UNIMPLEMENTED();
return false;
}
int NearestGapPos(int index) const {
UNIMPLEMENTED();
return 0;
}
void MarkEmptyBlocks() { UNIMPLEMENTED(); }
#ifdef DEBUG
void Verify() { UNIMPLEMENTED(); }
#endif
};
class LChunkBuilder BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) { }
// Build the sequence for the graph.
LChunk* Build() {
UNIMPLEMENTED();
return NULL;
};
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
UNIMPLEMENTED(); \
return NULL; \
}
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
} } // namespace v8::internal
#endif // V8_MIPS_LITHIUM_MIPS_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,478 @@
// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "unicode.h"
#include "log.h"
#include "code-stubs.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
#include "mips/regexp-macro-assembler-mips.h"
namespace v8 {
namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - t1 : Pointer to current code object (Code*) including heap object tag.
* - t2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - t3 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
* - t4 : points to tip of backtrack stack
* - t5 : Unused.
* - t6 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - sp : points to tip of C stack.
*
* The remaining registers are free for computations.
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
* - int* capture_array (int[num_saved_registers_], for output).
* - stack frame header (16 bytes in size)
* --- sp when called ---
* - link address
* - backup of registers s0..s7
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
* - start index (character index of start)
* --- frame pointer ----
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
* - At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
* --- sp ---
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
* character of the string). The remaining registers start out as garbage.
*
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* int (*match)(String* input_string,
* int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
* byte* stack_area_base,
* bool direct_call)
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc).
*/
#define __ ACCESS_MASM(masm_)
RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
Mode mode,
int registers_to_save)
: masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
entry_label_(),
start_label_(),
success_label_(),
backtrack_label_(),
exit_label_() {
ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
}
RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
success_label_.Unuse();
backtrack_label_.Unuse();
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
}
int RegExpMacroAssemblerMIPS::stack_limit_slack() {
return RegExpStack::kStackLimitSlack;
}
void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::Backtrack() {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::Bind(Label* label) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
int start_reg,
Label* on_no_match) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckNotBackReference(
int start_reg,
Label* on_no_match) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckNotRegistersEqual(int reg1,
int reg2,
Label* on_not_equal) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
Label* on_not_equal) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_not_equal) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal) {
UNIMPLEMENTED_MIPS();
}
bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
UNIMPLEMENTED_MIPS();
return false;
}
void RegExpMacroAssemblerMIPS::Fail() {
UNIMPLEMENTED_MIPS();
}
Handle<Object> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
UNIMPLEMENTED_MIPS();
return Handle<Object>::null();
}
void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
__ lw(a0, register_location(reg));
BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
}
void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
Label* if_eq) {
UNIMPLEMENTED_MIPS();
}
RegExpMacroAssembler::IrregexpImplementation
RegExpMacroAssemblerMIPS::Implementation() {
return kMIPSImplementation;
}
void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
Push(current_input_offset());
}
void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::Succeed() {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
UNIMPLEMENTED_MIPS();
}
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
UNIMPLEMENTED_MIPS();
}
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
}
int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
UNIMPLEMENTED_MIPS();
return 0;
}
MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
UNIMPLEMENTED_MIPS();
return MemOperand(zero_reg, 0);
}
void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
Label* on_outside_input) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
Condition condition,
Register rs,
const Operand& rt) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
const Operand& rt) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::SafeReturn() {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::Push(Register source) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::Pop(Register target) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckPreemption() {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CheckStackLimit() {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
ExternalReference function,
int num_arguments) {
UNIMPLEMENTED_MIPS();
}
void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
UNIMPLEMENTED_MIPS();
}
void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
UNIMPLEMENTED_MIPS();
}
#undef __
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

View File

@ -0,0 +1,250 @@
// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
namespace v8 {
namespace internal {
#ifdef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
public:
RegExpMacroAssemblerMIPS();
virtual ~RegExpMacroAssemblerMIPS();
};
#else // V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save);
virtual ~RegExpMacroAssemblerMIPS();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(uint32_t c, Label* on_equal);
virtual void CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
private:
// Offsets from frame_pointer() of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
// Registers s0 to s7, fp, and ra.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 16;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kInputEnd = kFramePointer - kPointerSize;
static const int kInputStart = kInputEnd - kPointerSize;
static const int kStartIndex = kInputStart - kPointerSize;
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
// The ebp-relative location of a regexp register.
MemOperand register_location(int register_index);
// Register holding the current input position as negative offset from
// the end of the string.
inline Register current_input_offset() { return t2; }
// The register containing the current character after LoadCurrentCharacter.
inline Register current_character() { return t3; }
// Register holding address of the end of the input string.
inline Register end_of_input_address() { return t6; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
inline Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
inline Register backtrack_stackpointer() { return t4; }
// Register holding pointer to the current code object.
inline Register code_pointer() { return t1; }
// Byte size of chars in the string to match (decided by the Mode argument)
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Label* to,
Condition condition,
Register rs,
const Operand& rt);
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to,
Condition cond,
Register rs,
const Operand& rt);
inline void SafeReturn();
inline void SafeCallTarget(Label* name);
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer by a word size and stores the register's value there.
inline void Push(Register source);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// and increments it by a word size.
inline void Pop(Register target);
// Calls a C function and cleans up the frame alignment done by
// by FrameAlign. The called function *is* allowed to trigger a garbage
// collection, but may not take more than four arguments (no arguments
// passed on the stack), and the first argument will be a pointer to the
// return address.
inline void CallCFunctionUsingStub(ExternalReference function,
int num_arguments);
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).
Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
};
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_

View File

@ -125,9 +125,6 @@ Register RegisterAllocator::ToRegister(int num) {
void RegisterAllocator::Initialize() {
Reset();
// The non-reserved a1 and ra registers are live on JS function entry.
Use(a1); // JS function.
Use(ra); // Return address.
}

View File

@ -35,8 +35,9 @@ namespace internal {
class RegisterAllocatorConstants : public AllStatic {
public:
static const int kNumRegisters = assembler::mips::kNumRegisters;
static const int kInvalidRegister = assembler::mips::kInvalidRegister;
// No registers are currently managed by the register allocator on MIPS.
static const int kNumRegisters = 0;
static const int kInvalidRegister = -1;
};

File diff suppressed because it is too large Load Diff

View File

@ -37,12 +37,31 @@
#define V8_MIPS_SIMULATOR_MIPS_H_
#include "allocation.h"
#include "constants-mips.h"
#if defined(__mips) && !defined(USE_SIMULATOR)
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native mips platform.
namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
entry(p0, p1, p2, p3, p4)
typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<mips_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@ -60,6 +79,8 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
} } // namespace v8::internal
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
@ -70,39 +91,50 @@ class SimulatorStack : public v8::internal::AllStatic {
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
#include "hashmap.h"
namespace v8 {
namespace internal {
#else // #if !defined(__mips) || defined(USE_SIMULATOR)
// -----------------------------------------------------------------------------
// Utility functions
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(\
assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
class CachePage {
public:
static const int LINE_VALID = 0;
static const int LINE_INVALID = 1;
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::mips::Simulator::current()->Call(\
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
static const int kPageShift = 12;
static const int kPageSize = 1 << kPageShift;
static const int kPageMask = kPageSize - 1;
static const int kLineShift = 2; // The cache line is only 4 bytes right now.
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
CachePage() {
memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
}
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
namespace assembler {
namespace mips {
char* CachedData(int offset) {
return &data_[offset];
}
private:
char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
class Simulator {
public:
friend class Debugger;
friend class MipsDebugger;
// Registers are declared in order. See SMRL chapter 2.
enum Register {
@ -143,7 +175,7 @@ class Simulator {
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* current();
static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
@ -152,9 +184,15 @@ class Simulator {
int32_t get_register(int reg) const;
// Same for FPURegisters
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
int32_t get_fpu_register(int fpureg) const;
int64_t get_fpu_register_long(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
void set_fcsr_bit(uint32_t cc, bool value);
bool test_fcsr_bit(uint32_t cc);
bool set_fcsr_round_error(double original, double rounded);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
@ -172,7 +210,7 @@ class Simulator {
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte_* entry, int argument_count, ...);
int32_t Call(byte* entry, int argument_count, ...);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@ -180,6 +218,14 @@ class Simulator {
// Pop an address from the JS stack.
uintptr_t PopAddress();
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_ra, end_sim_pc).
bool has_bad_pc() const;
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@ -223,9 +269,17 @@ class Simulator {
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
// Helper function for DecodeTypeRegister.
void ConfigureTypeRegister(Instruction* instr,
int32_t& alu_out,
int64_t& i64hilo,
uint64_t& u64hilo,
int32_t& next_pc,
bool& do_interrupt);
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
@ -239,11 +293,18 @@ class Simulator {
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
instr->OpcodeField());
instr->OpcodeValue());
}
InstructionDecode(instr);
}
// ICache.
static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
int size);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
enum Exception {
none,
kIntegerOverflow,
@ -258,7 +319,7 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
bool fp_return);
ExternalReference::Type type);
// Used for real time calls that takes two double values as arguments and
// returns a double.
@ -269,19 +330,40 @@ class Simulator {
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
int32_t FPUregisters_[kNumFPURegisters];
// FPU control register.
uint32_t FCSR_;
// Simulator support.
char* stack_;
size_t stack_size_;
bool pc_modified_;
int icount_;
static bool initialized_;
int break_count_;
// Icache simulation
v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
v8::internal::Isolate* isolate_;
};
} } // namespace assembler::mips
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
Simulator::current(Isolate::Current())->Call( \
entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
@ -292,20 +374,21 @@ class Simulator {
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::mips::Simulator::current()->StackLimit();
return Simulator::current(Isolate::Current())->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
assembler::mips::Simulator::current()->PopAddress();
Simulator::current(Isolate::Current())->PopAddress();
}
};
#endif // !defined(__mips) || defined(USE_SIMULATOR)
} } // namespace v8::internal
#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_

View File

@ -57,6 +57,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
}
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
UNIMPLEMENTED_MIPS();
}
// Load a fast property out of a holder object (src). In-object properties
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
@ -75,6 +81,20 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
// Generate code to load the length from a string object and return the length.
// If the receiver object is not a string or a wrapped string object the
// execution continues at the miss label. The register containing the
// receiver is potentially clobbered.
void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss,
bool support_wrappers) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@ -84,7 +104,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
// Generate StoreField code, value is passed in r0 register.
// Generate StoreField code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@ -104,15 +124,94 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
const ParameterCount& arguments,
Register name)
: stub_compiler_(stub_compiler),
arguments_(arguments),
name_(name) {}
void Compile(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
LookupResult* lookup,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss) {
UNIMPLEMENTED_MIPS();
}
private:
void CompileCacheable(MacroAssembler* masm,
JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
const CallOptimization& optimization,
Label* miss_label) {
UNIMPLEMENTED_MIPS();
}
void CompileRegular(MacroAssembler* masm,
JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
String* name,
JSObject* interceptor_holder,
Label* miss_label) {
UNIMPLEMENTED_MIPS();
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
JSObject* holder_obj,
Register scratch,
Label* interceptor_succeeded) {
UNIMPLEMENTED_MIPS();
}
StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
};
#undef __
#define __ ACCESS_MASM(masm())
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
Register scratch1,
Register scratch2,
String* name,
int save_at_depth,
Label* miss) {
UNIMPLEMENTED_MIPS();
return no_reg;
}
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
String* name,
Label* miss) {
@ -125,6 +224,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
Object* value,
String* name,
Label* miss) {
@ -132,282 +232,365 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
}
bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure) {
MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
__ break_(0x470);
return false; // UNIMPLEMENTED RETURN
return NULL;
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
__ break_(0x505);
}
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
// Registers:
// a1: function
// ra: return address
// Enter an internal frame.
__ EnterInternalFrame();
// Preserve the function.
__ Push(a1);
// Setup aligned call.
__ SetupAlignedCall(t0, 1);
// Push the function on the stack as the argument to the runtime function.
__ Push(a1);
// Call the runtime function
__ CallRuntime(Runtime::kLazyCompile, 1);
__ ReturnFromAlignedCall();
// Calculate the entry point.
__ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
// Restore saved function.
__ Pop(a1);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(t9);
return GetCodeWithFlags(flags, "LazyCompileStub");
}
Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
String* name) {
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileArrayPushCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
JSObject* holder,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileArrayPopCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
JSFunction* function,
Label* miss) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
MaybeObject* CallStubCompiler::GenerateMissBranch() {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
__ break_(0x782);
return GetCode(INTERCEPTOR, name);
}
Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
AccessorInfo* callback,
MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
String* name) {
UNIMPLEMENTED_MIPS();
__ break_(0x906);
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,
String* name) {
MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
JSObject* holder,
Object* value,
String* name) {
MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name) {
MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
MaybeObject* CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* receiver,
JSObject* holder,
int index) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
JSObject* receiver,
MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
AccessorInfo* callback) {
JSFunction* function,
String* name,
CheckType check) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
JSObject* receiver,
JSObject* holder,
Object* value) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
// TODO(1224671): implement the fast case.
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* ConstructStubCompiler::CompileConstructStub(
SharedFunctionInfo* shared) {
MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
AccessorInfo* callback,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
return NULL;
}
MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,
String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
JSObject* holder,
Object* value,
String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* receiver,
JSObject* holder,
int index) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
String* name,
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
JSObject* receiver,
JSObject* holder,
Object* value) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
JSObject* receiver) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
JSObject* receiver_object,
ExternalArrayType array_type,
Code::Flags flags) {
UNIMPLEMENTED_MIPS();
return NULL;
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
JSObject* receiver_object,
ExternalArrayType array_type,
Code::Flags flags) {
UNIMPLEMENTED_MIPS();
return NULL;
}

View File

@ -25,28 +25,34 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
#define V8_VIRTUAL_FRAME_MIPS_INL_H_
#include "v8.h"
#include "execution.h"
#include "assembler-mips.h"
#include "virtual-frame-mips.h"
#include "cctest.h"
namespace v8 {
namespace internal {
using ::v8::Local;
using ::v8::String;
using ::v8::Script;
namespace i = ::v8::internal;
TEST(MIPSFunctionCalls) {
// Disable compilation of natives.
i::FLAG_disable_native_files = true;
i::FLAG_full_compiler = false;
v8::HandleScope scope;
LocalContext env; // from cctest.h
const char* c_source = "function foo() { return 0x1234; }; foo();";
Local<String> source = ::v8::String::New(c_source);
Local<Script> script = ::v8::Script::Compile(source);
CHECK_EQ(0x1234, script->Run()->Int32Value());
MemOperand VirtualFrame::ParameterAt(int index) {
UNIMPLEMENTED_MIPS();
return MemOperand(zero_reg, 0);
}
// The receiver frame slot.
MemOperand VirtualFrame::Receiver() {
UNIMPLEMENTED_MIPS();
return MemOperand(zero_reg, 0);
}
void VirtualFrame::Forget(int count) {
UNIMPLEMENTED_MIPS();
}
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_MIPS_INL_H_

View File

@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
@ -39,44 +37,50 @@
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
#define __ ACCESS_MASM(masm())
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
void VirtualFrame::PopToA1A0() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SyncElementByPushing(int index) {
UNREACHABLE();
void VirtualFrame::PopToA1() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SyncRange(int begin, int end) {
// All elements are in memory on MIPS (ie, synced).
#ifdef DEBUG
for (int i = begin; i <= end; i++) {
ASSERT(elements_[i].is_synced());
}
#endif
void VirtualFrame::PopToA0() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
void VirtualFrame::MergeTo(const VirtualFrame* expected,
Condition cond,
Register r1,
const Operand& r2) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::MergeTo(VirtualFrame* expected,
Condition cond,
Register r1,
const Operand& r2) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::MergeTOSTo(
VirtualFrame::TopOfStack expected_top_of_stack_state,
Condition cond,
Register r1,
const Operand& r2) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Enter() {
// TODO(MIPS): Implement DEBUG
// We are about to push four values to the frame.
Adjust(4);
__ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
// Adjust FP to point to saved FP.
__ addiu(fp, sp, 2 * kPointerSize);
UNIMPLEMENTED_MIPS();
}
@ -86,232 +90,216 @@ void VirtualFrame::Exit() {
void VirtualFrame::AllocateStackSlots() {
int count = local_count();
if (count > 0) {
Comment cmnt(masm(), "[ Allocate space for locals");
Adjust(count);
// Initialize stack slots with 'undefined' value.
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ addiu(sp, sp, -count * kPointerSize);
for (int i = 0; i < count; i++) {
__ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
}
}
}
void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::RestoreContextRegister() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::PushReceiverSlotAddress() {
UNIMPLEMENTED_MIPS();
}
int VirtualFrame::InvalidateFrameSlotAt(int index) {
return kIllegalIndex;
}
void VirtualFrame::TakeFrameSlotAt(int index) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::StoreToFrameSlotAt(int index) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::PushTryHandler(HandlerType type) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::RawCallStub(CodeStub* stub) {
void VirtualFrame::CallJSFunction(int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
}
void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
#ifdef ENABLE_DEBUGGER_SUPPORT
void VirtualFrame::DebugBreak() {
UNIMPLEMENTED_MIPS();
}
#endif
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
Result* arg_count_register,
int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
switch (code->kind()) {
case Code::CALL_IC:
break;
case Code::FUNCTION:
UNIMPLEMENTED_MIPS();
break;
case Code::KEYED_LOAD_IC:
UNIMPLEMENTED_MIPS();
break;
case Code::LOAD_IC:
UNIMPLEMENTED_MIPS();
break;
case Code::KEYED_STORE_IC:
UNIMPLEMENTED_MIPS();
break;
case Code::STORE_IC:
UNIMPLEMENTED_MIPS();
break;
case Code::BUILTIN:
UNIMPLEMENTED_MIPS();
break;
default:
UNREACHABLE();
break;
}
Forget(dropped_args);
ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode);
void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args) {
void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallKeyedLoadIC() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallKeyedStoreIC() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args,
bool set_auto_args_slots) {
int dropped_args) {
UNIMPLEMENTED_MIPS();
}
// NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS.
const bool VirtualFrame::kA0InUse[TOS_STATES] =
{ false, true, false, true, true };
const bool VirtualFrame::kA1InUse[TOS_STATES] =
{ false, false, true, true, true };
const int VirtualFrame::kVirtualElements[TOS_STATES] =
{ 0, 1, 1, 2, 2 };
const Register VirtualFrame::kTopRegister[TOS_STATES] =
{ a0, a0, a1, a1, a0 };
const Register VirtualFrame::kBottomRegister[TOS_STATES] =
{ a0, a0, a1, a0, a1 };
const Register VirtualFrame::kAllocatedRegisters[
VirtualFrame::kNumberOfAllocatedRegisters] = { a2, a3, t0, t1, t2 };
// Popping is done by the transition implied by kStateAfterPop. Of course if
// there were no stack slots allocated to registers then the physical SP must
// be adjusted.
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
{ NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, A0_TOS, A1_TOS };
// Pushing is done by the transition implied by kStateAfterPush. Of course if
// the maximum number of registers was already allocated to the top of stack
// slots then one register must be physically pushed onto the stack.
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
{ A0_TOS, A1_A0_TOS, A0_A1_TOS, A0_A1_TOS, A1_A0_TOS };
void VirtualFrame::Drop(int count) {
ASSERT(count >= 0);
ASSERT(height() >= count);
int num_virtual_elements = (element_count() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
int num_dropped = count - num_virtual_elements;
stack_pointer_ -= num_dropped;
__ addiu(sp, sp, num_dropped * kPointerSize);
}
// Discard elements from the virtual frame and free any registers.
for (int i = 0; i < count; i++) {
FrameElement dropped = elements_.RemoveLast();
if (dropped.is_register()) {
Unuse(dropped.reg());
}
}
}
void VirtualFrame::DropFromVFrameOnly(int count) {
UNIMPLEMENTED_MIPS();
}
Result VirtualFrame::Pop() {
void VirtualFrame::Pop() {
UNIMPLEMENTED_MIPS();
Result res = Result();
return res; // UNIMPLEMENTED RETURN
}
void VirtualFrame::EmitPop(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ Pop(reg);
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SpillAllButCopyTOSToA0() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SpillAllButCopyTOSToA1() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SpillAllButCopyTOSToA1A0() {
UNIMPLEMENTED_MIPS();
}
Register VirtualFrame::Peek() {
UNIMPLEMENTED_MIPS();
return no_reg;
}
Register VirtualFrame::Peek2() {
UNIMPLEMENTED_MIPS();
return no_reg;
}
void VirtualFrame::Dup() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Dup2() {
UNIMPLEMENTED_MIPS();
}
Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
UNIMPLEMENTED_MIPS();
return no_reg;
}
void VirtualFrame::EnsureOneFreeTOSRegister() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPop(RegList regs) {
ASSERT(stack_pointer_ == element_count() - 1);
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_pointer_--;
elements_.RemoveLast();
}
}
__ MultiPop(regs);
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
stack_pointer_++;
__ Push(reg);
void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
UNIMPLEMENTED_MIPS();
}
Register VirtualFrame::GetTOSRegister() {
UNIMPLEMENTED_MIPS();
return no_reg;
}
void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPush(RegList regs) {
ASSERT(stack_pointer_ == element_count() - 1);
for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
stack_pointer_++;
}
}
__ MultiPush(regs);
}
void VirtualFrame::EmitArgumentSlots(RegList reglist) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPushReversed(RegList regs) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SpillAll() {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal

View File

@ -30,11 +30,13 @@
#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
#include "register-allocator.h"
#include "scopes.h"
namespace v8 {
namespace internal {
// This dummy class is only used to create invalid virtual frames.
extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
// -------------------------------------------------------------------------
// Virtual frames
@ -47,14 +49,54 @@ namespace internal {
class VirtualFrame : public ZoneObject {
public:
class RegisterAllocationScope;
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
// generator's current frame, but no attempt is made to require it
// to stay spilled. It is intended as documentation while the code
// generator is being transformed.
// generator's current frame, and keeps it spilled.
class SpilledScope BASE_EMBEDDED {
public:
explicit SpilledScope(VirtualFrame* frame)
: old_is_spilled_(
Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
if (frame != NULL) {
if (!old_is_spilled_) {
frame->SpillAll();
} else {
frame->AssertIsSpilled();
}
}
Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
}
~SpilledScope() {
Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
old_is_spilled_);
}
static bool is_spilled() {
return Isolate::Current()->is_virtual_frame_in_spilled_scope();
}
private:
int old_is_spilled_;
SpilledScope() {}
friend class RegisterAllocationScope;
};
class RegisterAllocationScope BASE_EMBEDDED {
public:
// A utility class to introduce a scope where the virtual frame
// is not spilled, ie. where register allocation occurs. Eventually
// when RegisterAllocationScope is ubiquitous it can be removed
// along with the (by then unused) SpilledScope class.
inline explicit RegisterAllocationScope(CodeGenerator* cgen);
inline ~RegisterAllocationScope();
private:
CodeGenerator* cgen_;
bool old_is_spilled_;
RegisterAllocationScope() {}
};
// An illegal index into the virtual frame.
@ -63,45 +105,49 @@ class VirtualFrame : public ZoneObject {
// Construct an initial virtual frame on entry to a JS function.
inline VirtualFrame();
// Construct an invalid virtual frame, used by JumpTargets.
inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index,
NumberInfo info = NumberInfo::Unknown());
inline CodeGenerator* cgen() const;
inline MacroAssembler* masm();
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
int element_count() const { return element_count_; }
// The height of the virtual expression stack.
int height() {
return element_count() - expression_base_index();
}
int register_location(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num];
}
int register_location(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)];
}
void set_register_location(Register reg, int index) {
register_locations_[RegisterAllocator::ToNumber(reg)] = index;
}
inline int height() const;
bool is_used(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)]
!= kIllegalIndex;
switch (num) {
case 0: { // a0.
return kA0InUse[top_of_stack_state_];
}
case 1: { // a1.
return kA1InUse[top_of_stack_state_];
}
case 2:
case 3:
case 4:
case 5:
case 6: { // a2 to a3, t0 to t2.
ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
ASSERT(num >= kFirstAllocatedRegister);
if ((register_allocation_map_ &
(1 << (num - kFirstAllocatedRegister))) == 0) {
return false;
} else {
return true;
}
}
default: {
ASSERT(num < kFirstAllocatedRegister ||
num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
return false;
}
}
}
// Add extra in-memory elements to the top of the frame to match an actual
@ -110,53 +156,60 @@ class VirtualFrame : public ZoneObject {
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted.
void Forget(int count) {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
// On mips, all elements are in memory, so there is no extra bookkeeping
// (registers, copies, etc.) beyond dropping the elements.
elements_.Rewind(stack_pointer_ + 1);
}
// the frame after a runtime call). No code is emitted except to bring the
// frame to a spilled state.
void Forget(int count);
// Forget count elements from the top of the frame and adjust the stack
// pointer downward. This is used, for example, before merging frames at
// break, continue, and return targets.
void ForgetElements(int count);
// Spill all values from the frame to memory.
void SpillAll();
void AssertIsSpilled() const {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
ASSERT(register_allocation_map_ == 0);
}
void AssertIsNotSpilled() {
ASSERT(!SpilledScope::is_spilled());
}
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
if (is_used(reg)) SpillElementAt(register_location(reg));
UNIMPLEMENTED();
}
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
// (ie, they all have frame-external references). Unimplemented.
Register SpillAnyRegister();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
void PrepareMergeTo(VirtualFrame* expected);
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected);
void MergeTo(const VirtualFrame* expected,
Condition cond = al,
Register r1 = no_reg,
const Operand& r2 = Operand(no_reg));
void MergeTo(VirtualFrame* expected,
Condition cond = al,
Register r1 = no_reg,
const Operand& r2 = Operand(no_reg));
// Checks whether this frame can be branched to by the other frame.
bool IsCompatibleWith(const VirtualFrame* other) const {
return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
}
inline void ForgetTypeInfo() {
tos_known_smi_map_ = 0;
}
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) cgen_allocator->Unuse(i);
}
}
// (Re)attach a frame to its code generator. This informs the register
@ -164,10 +217,6 @@ class VirtualFrame : public ZoneObject {
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) cgen_allocator->Unuse(i);
}
}
// Emit code for the physical JS entry and exit frame sequences. After
@ -177,176 +226,142 @@ class VirtualFrame : public ZoneObject {
void Enter();
void Exit();
// Prepare for returning from the frame by spilling locals and
// dropping all non-locals elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills.
void PrepareForReturn();
// Prepare for returning from the frame by elements in the virtual frame.
// This avoids generating unnecessary merge code when jumping to the shared
// return site. No spill code emitted. Value to return should be in v0.
inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.
static const int kLocalVarBound = 5;
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
MemOperand Top() { return MemOperand(sp, 0); }
MemOperand Top() {
AssertIsSpilled();
return MemOperand(sp, 0);
}
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
return MemOperand(sp, index * kPointerSize);
int adjusted_index = index - kVirtualElements[top_of_stack_state_];
ASSERT(adjusted_index >= 0);
return MemOperand(sp, adjusted_index * kPointerSize);
}
// Random-access store to a frame-top relative frame element. The result
// becomes owned by the frame and is invalidated.
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
Result temp(value);
SetElementAt(index, &temp);
bool KnownSmiAt(int index) {
if (index >= kTOSKnownSmiMapSize) return false;
return (tos_known_smi_map_ & (1 << index)) != 0;
}
void PushElementAt(int index) {
PushFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
MemOperand LocalAt(int index) {
ASSERT(0 <= index);
ASSERT(index < local_count());
return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
}
// Push a copy of the value of a local frame slot on top of the frame.
void PushLocalAt(int index) {
PushFrameSlotAt(local0_index() + index);
}
// Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read
// from it again.
void TakeLocalAt(int index) {
TakeFrameSlotAt(local0_index() + index);
}
// Store the top value on the virtual frame into a local frame slot. The
// value is left in place on top of the frame.
void StoreToLocalAt(int index) {
StoreToFrameSlotAt(local0_index() + index);
}
inline MemOperand LocalAt(int index);
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// The function frame slot.
MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
// Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); }
MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
// The context frame slot.
MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
// Save the value of the cp register to the context frame slot.
void SaveContextRegister();
// Restore the cp register from the value of the context frame
// slot.
void RestoreContextRegister();
MemOperand Context() { return MemOperand(fp, kContextOffset); }
// A parameter as an assembly operand.
MemOperand ParameterAt(int index) {
// Index -1 corresponds to the receiver.
ASSERT(-1 <= index); // -1 is the receiver.
ASSERT(index <= parameter_count());
uint16_t a = 0; // Number of argument slots.
return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
void PushParameterAt(int index) {
PushFrameSlotAt(param0_index() + index);
}
// Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before
// trying to read from it again.
void TakeParameterAt(int index) {
TakeFrameSlotAt(param0_index() + index);
}
// Store the top value on the virtual frame into a parameter frame slot.
// The value is left in place on top of the frame.
void StoreToParameterAt(int index) {
StoreToFrameSlotAt(param0_index() + index);
}
inline MemOperand ParameterAt(int index);
// The receiver frame slot.
MemOperand Receiver() { return ParameterAt(-1); }
inline MemOperand Receiver();
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
PrepareForCall(arg_count, arg_count);
RawCallStub(stub);
}
inline void CallStub(CodeStub* stub, int arg_count);
void CallStub(CodeStub* stub, Result* arg);
void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
// Call JS function from top of the stack with arguments
// taken from the stack.
void CallJSFunction(int arg_count);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
void CallRuntime(const Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count);
// Call runtime with sp aligned to 8 bytes.
void CallAlignedRuntime(Runtime::Function* f, int arg_count);
void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugBreak();
#endif
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
Result* arg_count_register,
int arg_count);
// Call load IC. Receiver is on the stack and is consumed. Result is returned
// in v0.
void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are consumed.
// Result is returned in v0.
void CallStoreIC(Handle<String> name, bool is_contextual);
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
// Result is returned in v0.
void CallKeyedLoadIC();
// Call keyed store IC. Value, key and receiver are on the stack. All three
// are consumed. Result is returned in v0 (and a0).
void CallKeyedStoreIC();
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments are passed as results and
// consumed by the call.
// from the stack. Register arguments to the IC stub are implicit,
// and depend on the type of IC stub.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args);
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args,
bool set_auto_args_slots = false);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
// Similar to VirtualFrame::Drop but we don't modify the actual stack.
// This is because we need to manually restore sp to the correct position.
void DropFromVFrameOnly(int count);
// Drop one element.
void Drop() { Drop(1); }
void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Discards
// the result.
void Pop();
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();
// Pop an element from the top of the expression stack. The register
// will be one normally used for the top of stack register allocation
// so you can't hold on to it if you push on the stack.
Register PopToRegister(Register but_not_to_this_one = no_reg);
// Look at the top of the stack. The register returned is aliased and
// must be copied to a scratch register before modification.
Register Peek();
// Look at the value beneath the top of the stack. The register returned is
// aliased and must be copied to a scratch register before modification.
Register Peek2();
// Duplicate the top of stack.
void Dup();
// Duplicate the two elements on top of stack.
void Dup2();
// Flushes all registers, but it puts a copy of the top-of-stack in a0.
void SpillAllButCopyTOSToA0();
// Flushes all registers, but it puts a copy of the top-of-stack in a1.
void SpillAllButCopyTOSToA1();
// Flushes all registers, but it puts a copy of the top-of-stack in a1
// and the next value on the stack in a0.
void SpillAllButCopyTOSToA1A0();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
@ -355,40 +370,41 @@ class VirtualFrame : public ZoneObject {
void EmitMultiPop(RegList regs);
void EmitMultiPopReversed(RegList regs);
// Takes the top two elements and puts them in a0 (top element) and a1
// (second element).
void PopToA1A0();
// Takes the top element and puts it in a1.
void PopToA1();
// Takes the top element and puts it in a0.
void PopToA0();
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPushRoot(Heap::RootListIndex index);
// Overwrite the nth thing on the stack. If the nth position is in a
// register then this turns into a Move, otherwise an sw. Afterwards
// you can still use the register even if it is a register that can be
// used for TOS (a0 or a1).
void SetElementAt(Register reg, int this_far_down);
// Get a register which is free and which must be immediately used to
// push on the top of the stack.
Register GetTOSRegister();
// Same but for multiple registers.
void EmitMultiPush(RegList regs);
void EmitMultiPushReversed(RegList regs);
// Push an element on the virtual frame.
inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
inline void Push(Handle<Object> value);
inline void Push(Smi* value);
// Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) {
if (result->is_register()) {
Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
}
result->Unuse();
}
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped);
// This pushes 4 arguments slots on the stack and saves asked 'a' registers
// 'a' registers are arguments register a0 to a3.
void EmitArgumentSlots(RegList reglist);
inline void SetTypeForLocalAt(int index, NumberInfo info);
inline void SetTypeForParamAt(int index, NumberInfo info);
static Register scratch0() { return t4; }
static Register scratch1() { return t5; }
static Register scratch2() { return t6; }
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@ -398,24 +414,51 @@ class VirtualFrame : public ZoneObject {
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
ZoneList<FrameElement> elements_;
// 5 states for the top of stack, which can be in memory or in a0 and a1.
enum TopOfStack { NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS,
TOS_STATES};
static const int kMaxTOSRegisters = 2;
static const bool kA0InUse[TOS_STATES];
static const bool kA1InUse[TOS_STATES];
static const int kVirtualElements[TOS_STATES];
static const TopOfStack kStateAfterPop[TOS_STATES];
static const TopOfStack kStateAfterPush[TOS_STATES];
static const Register kTopRegister[TOS_STATES];
static const Register kBottomRegister[TOS_STATES];
// We allocate up to 5 locals in registers.
static const int kNumberOfAllocatedRegisters = 5;
// r2 to r6 are allocated to locals.
static const int kFirstAllocatedRegister = 2;
static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
static Register AllocatedRegister(int r) {
ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
return kAllocatedRegisters[r];
}
// The number of elements on the stack frame.
int element_count_;
TopOfStack top_of_stack_state_:3;
int register_allocation_map_:kNumberOfAllocatedRegisters;
static const int kTOSKnownSmiMapSize = 4;
unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
// The index of the element that is at the processor's stack pointer
// (the sp register).
int stack_pointer_;
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
int register_locations_[RegisterAllocator::kNumRegisters];
// (the sp register). For now since everything is in memory it is given
// by the number of elements on the not-very-virtual stack frame.
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
int local_count() { return cgen()->scope()->num_stack_slots(); }
inline int parameter_count() const;
inline int local_count() const;
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
int frame_pointer() { return parameter_count() + 3; }
inline int frame_pointer() const;
// The index of the first parameter. The receiver lies below the first
// parameter.
@ -423,75 +466,22 @@ class VirtualFrame : public ZoneObject {
// The index of the context slot in the frame. It is immediately
// below the frame pointer.
int context_index() { return frame_pointer() - 1; }
inline int context_index();
// The index of the function slot in the frame. It is below the frame
// pointer and context slot.
int function_index() { return frame_pointer() - 2; }
inline int function_index();
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
int local0_index() { return frame_pointer() + 2; }
inline int local0_index() const;
// The index of the base of the expression stack.
int expression_base_index() { return local0_index() + local_count(); }
inline int expression_base_index() const;
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) {
ASSERT(index < element_count());
ASSERT(frame_pointer() < element_count()); // FP is on the frame.
return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
void Use(Register reg, int index) {
ASSERT(!is_used(reg));
set_register_location(reg, index);
cgen()->allocator()->Use(reg);
}
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
void Unuse(Register reg) {
ASSERT(is_used(reg));
set_register_location(reg, kIllegalIndex);
cgen()->allocator()->Unuse(reg);
}
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
// constant.
void SpillElementAt(int index);
// Sync the element at a particular index. If it is a register or
// constant that disagrees with the value on the stack, write it to memory.
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
// Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);
// Sync a single unsynced element that lies just above the stack pointer.
void SyncElementByPushing(int index);
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame.
inline void PushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
void TakeFrameSlotAt(int index);
// Store the value on top of the frame to a frame slot (typically a local
// or parameter).
void StoreToFrameSlotAt(int index);
inline int fp_relative(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
@ -499,45 +489,37 @@ class VirtualFrame : public ZoneObject {
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
// Move frame elements currently in registers or constants, that
// should be in memory in the expected frame, to memory.
void MergeMoveRegistersToMemory(VirtualFrame* expected);
// If all top-of-stack registers are in use then the lowest one is pushed
// onto the physical stack and made free.
void EnsureOneFreeTOSRegister();
// Make the register-to-register moves necessary to
// merge this frame with the expected frame.
// Register to memory moves must already have been made,
// and memory to register moves must follow this call.
// This is because some new memory-to-register moves are
// created in order to break cycles of register moves.
// Used in the implementation of MergeTo().
void MergeMoveRegistersToRegisters(VirtualFrame* expected);
// Emit instructions to get the top of stack state from where we are to where
// we want to be.
void MergeTOSTo(TopOfStack expected_state,
Condition cond = al,
Register r1 = no_reg,
const Operand& r2 = Operand(no_reg));
// Make the memory-to-register and constant-to-register moves
// needed to make this frame equal the expected frame.
// Called after all register-to-memory and register-to-register
// moves have been made. After this function returns, the frames
// should be equal.
void MergeMoveMemoryToRegisters(VirtualFrame* expected);
inline bool Equals(const VirtualFrame* other);
// Invalidates a frame slot (puts an invalid frame element in it).
// Copies on the frame are correctly handled, and if this slot was
// the backing store of copies, the index of the new backing store
// is returned. Otherwise, returns kIllegalIndex.
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
inline void LowerHeight(int count) {
element_count_ -= count;
if (count >= kTOSKnownSmiMapSize) {
tos_known_smi_map_ = 0;
} else {
tos_known_smi_map_ >>= count;
}
}
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
void RawCallStub(CodeStub* stub);
// Calls a code object which has already been prepared for calling
// (via PrepareForCall).
void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
ASSERT(known_smi_map < (1u << count));
element_count_ += count;
if (count >= kTOSKnownSmiMapSize) {
tos_known_smi_map_ = known_smi_map;
} else {
tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
}
}
friend class JumpTarget;
};

View File

@ -848,11 +848,45 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
IsRegionDirty(object->address() + offset)); \
}
#define READ_DOUBLE_FIELD(p, offset) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
#ifndef V8_TARGET_ARCH_MIPS
#define READ_DOUBLE_FIELD(p, offset) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
#else // V8_TARGET_ARCH_MIPS
// Prevent gcc from using load-double (mips ldc1) on (possibly)
// non-64-bit aligned HeapNumber::value.
static inline double read_double_field(HeapNumber* p, int offset) {
union conversion {
double d;
uint32_t u[2];
} c;
c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
return c.d;
}
#define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
#endif // V8_TARGET_ARCH_MIPS
#ifndef V8_TARGET_ARCH_MIPS
#define WRITE_DOUBLE_FIELD(p, offset, value) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
#else // V8_TARGET_ARCH_MIPS
// Prevent gcc from using store-double (mips sdc1) on (possibly)
// non-64-bit aligned HeapNumber::value.
static inline void write_double_field(HeapNumber* p, int offset,
double value) {
union conversion {
double d;
uint32_t u[2];
} c;
c.d = value;
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0];
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1];
}
#define WRITE_DOUBLE_FIELD(p, offset, value) \
write_double_field(p, offset, value)
#endif // V8_TARGET_ARCH_MIPS
#define WRITE_DOUBLE_FIELD(p, offset, value) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT_FIELD(p, offset) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))

View File

@ -97,6 +97,10 @@ uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 1u << VFP3;
#elif CAN_USE_ARMV7_INSTRUCTIONS
return 1u << ARMv7;
#elif(defined(__mips_hard_float) && __mips_hard_float != 0)
// Here gcc is telling us that we are on an MIPS and gcc is assuming that we
// have FPU instructions. If gcc can assume it then so can we.
return 1u << FPU;
#else
return 0; // Linux runs on anything.
#endif
@ -175,6 +179,58 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
#endif // def __arm__
#ifdef __mips__
bool OS::MipsCpuHasFeature(CpuFeature feature) {
const char* search_string = NULL;
const char* file_name = "/proc/cpuinfo";
// Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to MIPS (early 2010), no similar
// facility is universally available on the MIPS architectures,
// so it's up to individual OSes to provide such.
//
// This is written as a straight shot one pass parser
// and not using STL string and ifstream because,
// on Linux, it's reading from a (non-mmap-able)
// character special device.
switch (feature) {
case FPU:
search_string = "FPU";
break;
default:
UNREACHABLE();
}
FILE* f = NULL;
const char* what = search_string;
if (NULL == (f = fopen(file_name, "r")))
return false;
int k;
while (EOF != (k = fgetc(f))) {
if (k == *what) {
++what;
while ((*what != '\0') && (*what == fgetc(f))) {
++what;
}
if (*what == '\0') {
fclose(f);
return true;
} else {
what = search_string;
}
}
}
fclose(f);
// Did not find string in the proc file.
return false;
}
#endif // def __mips__
int OS::ActivationFrameAlignment() {
#ifdef V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
@ -190,8 +246,9 @@ int OS::ActivationFrameAlignment() {
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
// Only use on ARM hardware.
#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
(defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
// Only use on ARM or MIPS hardware.
MemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
@ -860,8 +917,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif
#elif V8_HOST_ARCH_MIPS
// Implement this on MIPS.
UNIMPLEMENTED();
sample.pc = reinterpret_cast<Address>(mcontext.pc);
sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);

View File

@ -294,6 +294,9 @@ class OS {
// Support runtime detection of VFP3 on ARM CPUs.
static bool ArmCpuHasFeature(CpuFeature feature);
// Support runtime detection of FPU on MIPS CPUs.
static bool MipsCpuHasFeature(CpuFeature feature);
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();

View File

@ -48,6 +48,7 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
};

View File

@ -468,7 +468,8 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
SAHF = 0}; // x86
SAHF = 0, // x86
FPU = 1}; // MIPS
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
enum StrictModeFlag {

View File

@ -96,7 +96,7 @@ SOURCES = {
'arch:x64': ['test-assembler-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'],
'arch:mips': ['test-assembler-mips.cc', 'test-mips.cc'],
'arch:mips': ['test-assembler-mips.cc'],
'os:linux': ['test-platform-linux.cc'],
'os:macos': ['test-platform-macos.cc'],
'os:nullos': ['test-platform-nullos.cc'],

View File

@ -79,12 +79,15 @@ test-compiler: SKIP
test-cpu-profiler: SKIP
test-debug: SKIP
test-decls: SKIP
test-deoptimization: SKIP
test-func-name-inference: SKIP
test-heap: SKIP
test-heap-profiler: SKIP
test-log: SKIP
test-log-utils: SKIP
test-mark-compact: SKIP
test-parsing: SKIP
test-profile-generator: SKIP
test-regexp: SKIP
test-serialize: SKIP
test-sockets: SKIP

File diff suppressed because it is too large Load Diff

View File

@ -45,6 +45,10 @@
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
#ifdef V8_TARGET_ARCH_MIPS
#include "mips/macro-assembler-mips.h"
#include "mips/regexp-macro-assembler-mips.h"
#endif
#ifdef V8_TARGET_ARCH_X64
#include "x64/macro-assembler-x64.h"
#include "x64/regexp-macro-assembler-x64.h"
@ -670,7 +674,7 @@ typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssembler ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#endif
class ContextInitializer {