MIPS port initial commit

This is the first step in the MIPS port of V8. It adds assembler, disassembler and simulator for the MIPS32 architecture.

Contains stubbed out implementation of all the compiler/code generator infrastructure to make it all build.

Patch by Alexandre Rames from Sigma Designs Inc.

This is the landing of http://codereview.chromium.org/543161.
Review URL: http://codereview.chromium.org/561072

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3799 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
sgjesse@chromium.org 2010-02-04 20:36:58 +00:00
parent a28143c77c
commit a6a7c75ae0
52 changed files with 10792 additions and 7 deletions

View File

@ -4,6 +4,7 @@
# Name/Organization <email address> # Name/Organization <email address>
Google Inc. Google Inc.
Sigma Designs Inc.
Alexander Botero-Lowry <alexbl@FreeBSD.org> Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexandre Vassalotti <avassalotti@gmail.com> Alexandre Vassalotti <avassalotti@gmail.com>

View File

@ -191,6 +191,17 @@ LIBRARY_FLAGS = {
'armvariant:arm': { 'armvariant:arm': {
'CPPDEFINES': ['V8_ARM_VARIANT_ARM'] 'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LDFLAGS': ['-EL']
}
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': { 'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'], 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'], 'CCFLAGS': ['-m64'],
@ -293,6 +304,9 @@ V8_EXTRA_FLAGS = {
# used by the arm simulator. # used by the arm simulator.
'WARNINGFLAGS': ['/wd4996'] 'WARNINGFLAGS': ['/wd4996']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
},
'disassembler:on': { 'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER'] 'CPPDEFINES': ['ENABLE_DISASSEMBLER']
} }
@ -458,10 +472,22 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-m64'], 'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'] 'LINKFLAGS': ['-m64']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LINKFLAGS': ['-EL'],
'LDFLAGS': ['-EL']
}
},
'simulator:arm': { 'simulator:arm': {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'] 'LINKFLAGS': ['-m32']
}, },
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'mode:release': { 'mode:release': {
'CCFLAGS': ['-O2'] 'CCFLAGS': ['-O2']
}, },
@ -602,7 +628,7 @@ SIMPLE_OPTIONS = {
'help': 'the os to build for (' + OS_GUESS + ')' 'help': 'the os to build for (' + OS_GUESS + ')'
}, },
'arch': { 'arch': {
'values':['arm', 'ia32', 'x64'], 'values':['arm', 'ia32', 'x64', 'mips'],
'default': ARCH_GUESS, 'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')' 'help': 'the architecture to build for (' + ARCH_GUESS + ')'
}, },
@ -652,7 +678,7 @@ SIMPLE_OPTIONS = {
'help': 'use Microsoft Visual C++ link-time code generation' 'help': 'use Microsoft Visual C++ link-time code generation'
}, },
'simulator': { 'simulator': {
'values': ['arm', 'none'], 'values': ['arm', 'mips', 'none'],
'default': 'none', 'default': 'none',
'help': 'build with simulator' 'help': 'build with simulator'
}, },
@ -872,6 +898,11 @@ def PostprocessOptions(options):
options['armvariant'] = 'arm' options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'): if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
options['armvariant'] = 'none' options['armvariant'] = 'none'
if options['arch'] == 'mips':
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
def ParseEnvOverrides(arg, imports): def ParseEnvOverrides(arg, imports):

View File

@ -131,6 +131,24 @@ SOURCES = {
'armvariant:thumb2': Split(""" 'armvariant:thumb2': Split("""
arm/assembler-thumb2.cc arm/assembler-thumb2.cc
"""), """),
'arch:mips': Split("""
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
mips/disasm-mips.cc
mips/fast-codegen-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
"""),
'arch:ia32': Split(""" 'arch:ia32': Split("""
ia32/assembler-ia32.cc ia32/assembler-ia32.cc
ia32/builtins-ia32.cc ia32/builtins-ia32.cc
@ -168,6 +186,7 @@ SOURCES = {
x64/virtual-frame-x64.cc x64/virtual-frame-x64.cc
"""), """),
'simulator:arm': ['arm/simulator-arm.cc'], 'simulator:arm': ['arm/simulator-arm.cc'],
'simulator:mips': ['mips/simulator-mips.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'], 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'], 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],

View File

@ -506,8 +506,10 @@ static inline bool is_intn(int x, int n) {
return -(1 << (n-1)) <= x && x < (1 << (n-1)); return -(1 << (n-1)) <= x && x < (1 << (n-1));
} }
static inline bool is_int24(int x) { return is_intn(x, 24); }
static inline bool is_int8(int x) { return is_intn(x, 8); } static inline bool is_int8(int x) { return is_intn(x, 8); }
static inline bool is_int16(int x) { return is_intn(x, 16); }
static inline bool is_int18(int x) { return is_intn(x, 18); }
static inline bool is_int24(int x) { return is_intn(x, 24); }
static inline bool is_uintn(int x, int n) { static inline bool is_uintn(int x, int n) {
return (x & -(1 << n)) == 0; return (x & -(1 << n)) == 0;
@ -519,9 +521,20 @@ static inline bool is_uint4(int x) { return is_uintn(x, 4); }
static inline bool is_uint5(int x) { return is_uintn(x, 5); } static inline bool is_uint5(int x) { return is_uintn(x, 5); }
static inline bool is_uint6(int x) { return is_uintn(x, 6); } static inline bool is_uint6(int x) { return is_uintn(x, 6); }
static inline bool is_uint8(int x) { return is_uintn(x, 8); } static inline bool is_uint8(int x) { return is_uintn(x, 8); }
static inline bool is_uint10(int x) { return is_uintn(x, 10); }
static inline bool is_uint12(int x) { return is_uintn(x, 12); } static inline bool is_uint12(int x) { return is_uintn(x, 12); }
static inline bool is_uint16(int x) { return is_uintn(x, 16); } static inline bool is_uint16(int x) { return is_uintn(x, 16); }
static inline bool is_uint24(int x) { return is_uintn(x, 24); } static inline bool is_uint24(int x) { return is_uintn(x, 24); }
static inline bool is_uint26(int x) { return is_uintn(x, 26); }
static inline bool is_uint28(int x) { return is_uintn(x, 28); }
static inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
num_bits_set += x & 1;
}
return num_bits_set;
}
} } // namespace v8::internal } } // namespace v8::internal

View File

@ -39,6 +39,8 @@
#include "x64/codegen-x64-inl.h" #include "x64/codegen-x64-inl.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm-inl.h" #include "arm/codegen-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/codegen-mips-inl.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

View File

@ -86,6 +86,8 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
#include "x64/codegen-x64.h" #include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h" #include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/codegen-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

View File

@ -218,7 +218,7 @@ DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
// rewriter.cc // rewriter.cc
DEFINE_bool(optimize_ast, true, "optimize the ast") DEFINE_bool(optimize_ast, true, "optimize the ast")
// simulator-arm.cc // simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "trace simulator execution") DEFINE_bool(trace_sim, false, "trace simulator execution")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")

View File

@ -36,6 +36,8 @@
#include "x64/frames-x64.h" #include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h" #include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/frames-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

View File

@ -46,6 +46,9 @@ namespace internal {
#elif defined(__ARMEL__) #elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1 #define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1 #define V8_HOST_ARCH_32_BIT 1
#elif defined(_MIPS_ARCH_MIPS32R2)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else #else
#error Your host architecture was not detected as supported by v8 #error Your host architecture was not detected as supported by v8
#endif #endif
@ -53,6 +56,7 @@ namespace internal {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
#define V8_TARGET_CAN_READ_UNALIGNED 1 #define V8_TARGET_CAN_READ_UNALIGNED 1
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#elif V8_TARGET_ARCH_MIPS
#else #else
#error Your target architecture is not supported by v8 #error Your target architecture is not supported by v8
#endif #endif

View File

@ -86,6 +86,13 @@ enum AllocationFlags {
#endif #endif
#include "code.h" // must be after assembler_*.h #include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h" #include "arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#include "assembler.h"
#include "mips/assembler-mips.h"
#include "mips/assembler-mips-inl.h"
#include "code.h" // must be after assembler_*.h
#include "mips/macro-assembler-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

View File

@ -0,0 +1,215 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
#include "mips/assembler-mips.h"
#include "cpu.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Condition
Condition NegateCondition(Condition cc) {
ASSERT(cc != cc_always);
return static_cast<Condition>(cc ^ 1);
}
// -----------------------------------------------------------------------------
// Operand and MemOperand
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(const char* s) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Register rm) {
rm_ = rm;
}
bool Operand::is_reg() const {
return rm_.is_valid();
}
// -----------------------------------------------------------------------------
// RelocInfo
void RelocInfo::apply(intptr_t delta) {
// On MIPS we do not use pc relative addressing, so we don't need to patch the
// code here.
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return reinterpret_cast<Address>(pc_);
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
}
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_)));
}
Object** RelocInfo::target_object_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(pc_);
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
}
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(pc_);
}
Address RelocInfo::call_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
}
Object* RelocInfo::call_object() {
return *call_object_address();
}
Object** RelocInfo::call_object_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_object(Object* target) {
*call_object_address() = target;
}
bool RelocInfo::IsPatchedReturnSequence() {
#ifdef DEBUG
PrintF("%s - %d - %s : Checking for jal(r)",
__FILE__, __LINE__, __func__);
#endif
return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
(((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
}
// -----------------------------------------------------------------------------
// Assembler
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
}
} } // namespace v8::internal
#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_

1208
src/mips/assembler-mips.cc Normal file

File diff suppressed because it is too large Load Diff

663
src/mips/assembler-mips.h Normal file
View File

@ -0,0 +1,663 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
#define V8_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
#include "assembler.h"
#include "constants-mips.h"
#include "serialize.h"
using namespace assembler::mips;
namespace v8 {
namespace internal {
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// compatible with int, which has caused code-generation bugs.
//
// 2) We would prefer to use a class instead of a struct but we don't like
// the register initialization to depend on the particular initialization
// order (which appears to be different on OS X, Linux, and Windows for the
// installed versions of C++ we tried). Using a struct permits C-style
// "initialization". Also, the Register objects cannot be const as this
// forces initialization stubs in MSVC, making us dependent on initialization
// order.
//
// 3) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister
// Core register.
struct Register {
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
// Unfortunately we can't make this private in a struct.
int code_;
};
extern const Register no_reg;
extern const Register zero_reg;
extern const Register at;
extern const Register v0;
extern const Register v1;
extern const Register a0;
extern const Register a1;
extern const Register a2;
extern const Register a3;
extern const Register t0;
extern const Register t1;
extern const Register t2;
extern const Register t3;
extern const Register t4;
extern const Register t5;
extern const Register t6;
extern const Register t7;
extern const Register s0;
extern const Register s1;
extern const Register s2;
extern const Register s3;
extern const Register s4;
extern const Register s5;
extern const Register s6;
extern const Register s7;
extern const Register t8;
extern const Register t9;
extern const Register k0;
extern const Register k1;
extern const Register gp;
extern const Register sp;
extern const Register s8_fp;
extern const Register ra;
int ToNumber(Register reg);
Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
// Unfortunately we can't make this private in a struct.
int code_;
};
extern const FPURegister no_creg;
extern const FPURegister f0;
extern const FPURegister f1;
extern const FPURegister f2;
extern const FPURegister f3;
extern const FPURegister f4;
extern const FPURegister f5;
extern const FPURegister f6;
extern const FPURegister f7;
extern const FPURegister f8;
extern const FPURegister f9;
extern const FPURegister f10;
extern const FPURegister f11;
extern const FPURegister f12; // arg
extern const FPURegister f13;
extern const FPURegister f14; // arg
extern const FPURegister f15;
extern const FPURegister f16;
extern const FPURegister f17;
extern const FPURegister f18;
extern const FPURegister f19;
extern const FPURegister f20;
extern const FPURegister f21;
extern const FPURegister f22;
extern const FPURegister f23;
extern const FPURegister f24;
extern const FPURegister f25;
extern const FPURegister f26;
extern const FPURegister f27;
extern const FPURegister f28;
extern const FPURegister f29;
extern const FPURegister f30;
extern const FPURegister f31;
// Returns the equivalent of !cc.
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
inline Condition NegateCondition(Condition cc);
inline Condition ReverseCondition(Condition cc) {
switch (cc) {
case Uless:
return Ugreater;
case Ugreater:
return Uless;
case Ugreater_equal:
return Uless_equal;
case Uless_equal:
return Ugreater_equal;
case less:
return greater;
case greater:
return less;
case greater_equal:
return less_equal;
case less_equal:
return greater_equal;
default:
return cc;
};
}
enum Hint {
no_hint = 0
};
inline Hint NegateHint(Hint hint) {
return no_hint;
}
// -----------------------------------------------------------------------------
// Machine instruction Operands.
// Class Operand represents a shifter operand in data processing instructions.
class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
// Register.
INLINE(explicit Operand(Register rm));
// Return true if this is a register operand.
INLINE(bool is_reg() const);
Register rm() const { return rm_; }
private:
Register rm_;
int32_t imm32_; // Valid if rm_ == no_reg
RelocInfo::Mode rmode_;
friend class Assembler;
friend class MacroAssembler;
};
// On MIPS we have only one adressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
explicit MemOperand(Register rn, int16_t offset = 0);
private:
int16_t offset_;
friend class Assembler;
};
class Assembler : public Malloced {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is NULL, the assembler allocates and grows its own
// buffer, and buffer_size determines the initial buffer size. The buffer is
// owned by the assembler and deallocated upon destruction of the assembler.
//
// If the provided buffer is not NULL, the assembler uses the provided buffer
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(void* buffer, int buffer_size);
~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
void GetCode(CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D).
//
// Takes a branch opcode (cc) and a label (L) and generates
// either a backward branch or a forward branch and links it
// to the label fixup chain. Usage:
//
// Label L; // unbound label
// j(cc, &L); // forward branch to unbound label
// bind(&L); // bind label to the current pc
// j(cc, &L); // backward branch to bound label
// bind(&L); // illegal: a label may be bound only once
//
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
// Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t o = branch_offset(L, jump_elimination_allowed);
ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset.
static const int kBranchPCOffset = 4;
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code.
inline static void set_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
}
// This sets the branch destination.
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
}
static const int kCallTargetSize = 3 * kPointerSize;
static const int kExternalTargetSize = 3 * kPointerSize;
// Distance between the instruction referring to the address of the call
// target and the return address.
static const int kCallTargetAddressOffset = 4 * kInstrSize;
// Distance between start of patched return sequence and the emitted address
// to jump to.
static const int kPatchReturnSequenceAddressOffset = kInstrSize;
// ---------------------------------------------------------------------------
// Code generation.
void nop() { sll(zero_reg, zero_reg, 0); }
//------- Branch and jump instructions --------
// We don't use likely variant of instructions.
void b(int16_t offset);
void b(Label* L) { b(branch_offset(L, false)>>2); }
void bal(int16_t offset);
void bal(Label* L) { bal(branch_offset(L, false)>>2); }
void beq(Register rs, Register rt, int16_t offset);
void beq(Register rs, Register rt, Label* L) {
beq(rs, rt, branch_offset(L, false) >> 2);
}
void bgez(Register rs, int16_t offset);
void bgezal(Register rs, int16_t offset);
void bgtz(Register rs, int16_t offset);
void blez(Register rs, int16_t offset);
void bltz(Register rs, int16_t offset);
void bltzal(Register rs, int16_t offset);
void bne(Register rs, Register rt, int16_t offset);
void bne(Register rs, Register rt, Label* L) {
bne(rs, rt, branch_offset(L, false)>>2);
}
// Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version. See Twiki for infos.
// Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
void j(int32_t target);
void jal(int32_t target);
void jalr(Register rs, Register rd = ra);
void jr(Register target);
//-------Data-processing-instructions---------
// Arithmetic.
void add(Register rd, Register rs, Register rt);
void addu(Register rd, Register rs, Register rt);
void sub(Register rd, Register rs, Register rt);
void subu(Register rd, Register rs, Register rt);
void mult(Register rs, Register rt);
void multu(Register rs, Register rt);
void div(Register rs, Register rt);
void divu(Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
void addi(Register rd, Register rs, int32_t j);
void addiu(Register rd, Register rs, int32_t j);
// Logical.
void and_(Register rd, Register rs, Register rt);
void or_(Register rd, Register rs, Register rt);
void xor_(Register rd, Register rs, Register rt);
void nor(Register rd, Register rs, Register rt);
void andi(Register rd, Register rs, int32_t j);
void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j);
// Shifts.
void sll(Register rd, Register rt, uint16_t sa);
void sllv(Register rd, Register rt, Register rs);
void srl(Register rd, Register rt, uint16_t sa);
void srlv(Register rd, Register rt, Register rs);
void sra(Register rt, Register rd, uint16_t sa);
void srav(Register rt, Register rd, Register rs);
//------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
void lbu(Register rd, const MemOperand& rs);
void lw(Register rd, const MemOperand& rs);
void sb(Register rd, const MemOperand& rs);
void sw(Register rd, const MemOperand& rs);
//-------------Misc-instructions--------------
// Break / Trap instructions.
void break_(uint32_t code);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
void tltu(Register rs, Register rt, uint16_t code);
void teq(Register rs, Register rt, uint16_t code);
void tne(Register rs, Register rt, uint16_t code);
// Move from HI/LO register.
void mfhi(Register rd);
void mflo(Register rd);
// Set on less than.
void slt(Register rd, Register rs, Register rt);
void sltu(Register rd, Register rs, Register rt);
void slti(Register rd, Register rs, int32_t j);
void sltiu(Register rd, Register rs, int32_t j);
//--------Coprocessor-instructions----------------
// Load, store, and move.
void lwc1(FPURegister fd, const MemOperand& src);
void ldc1(FPURegister fd, const MemOperand& src);
void swc1(FPURegister fs, const MemOperand& dst);
void sdc1(FPURegister fs, const MemOperand& dst);
// When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
// executed first, followed by the MTHC1.
void mtc1(FPURegister fs, Register rt);
void mthc1(FPURegister fs, Register rt);
void mfc1(FPURegister fs, Register rt);
void mfhc1(FPURegister fs, Register rt);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
void cvt_w_d(FPURegister fd, FPURegister fs);
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
void cvt_s_d(FPURegister fd, FPURegister fs);
void cvt_d_w(FPURegister fd, FPURegister fs);
void cvt_d_l(FPURegister fd, FPURegister fs);
void cvt_d_s(FPURegister fd, FPURegister fs);
// Conditions and branches.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
void bc1t(int16_t offset, uint16_t cc = 0);
void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
}
// Debugging.
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
void RecordComment(const char* msg);
void RecordPosition(int pos);
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
int32_t pc_offset() const { return pc_ - buffer_; }
int32_t current_position() const { return current_position_; }
int32_t current_statement_position() const { return current_position_; }
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
protected:
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions.
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
}
// Check if an instruction is a branch of some kind.
bool is_branch(Instr instr);
// Decode branch instruction at pos and return branch target pos.
int target_at(int32_t pos);
// Patch branch instruction at pos to branch to given branch target pos.
void target_at_put(int32_t pos, int32_t target_pos);
// Say if we need to relocate with this mode.
bool MustUseAt(RelocInfo::Mode rmode);
// Record reloc info for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
byte* buffer_;
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static const int kBufferCheckInterval = 1*KB/2;
// Code generation.
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
byte* pc_; // The program counter - moves forward.
// Relocation information generation.
// Each relocation is encoded as a variable size value.
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Source position information.
int current_position_;
int current_statement_position_;
int written_position_;
int written_statement_position_;
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
// However due to many different types of objects encoded in the same fields
// we have quite a few aliases for each mode.
// Using the same structure to refer to Register and FPURegister would spare a
// few aliases, but mixing both does not look clean to me.
// Anyway we could surely implement this differently.
void GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
Register rd,
uint16_t sa = 0,
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
FPURegister fd,
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
FPURegister fd,
SecondaryField func = NULLSF);
void GenInstrImmediate(Opcode opcode,
Register rs,
Register rt,
int32_t j);
void GenInstrImmediate(Opcode opcode,
Register rs,
SecondaryField SF,
int32_t j);
void GenInstrImmediate(Opcode opcode,
Register r1,
FPURegister r2,
int32_t j);
void GenInstrJump(Opcode opcode,
uint32_t address);
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
void next(Label* L);
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
};
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_MIPS_H_

109
src/mips/builtins-mips.cc Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal

View File

@ -0,0 +1,56 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
#define V8_MIPS_CODEGEN_MIPS_INL_H_
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void DeferredCode::Jump() { __ b(&entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_INL_H_

501
src/mips/codegen-mips.cc Normal file
View File

@ -0,0 +1,501 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "parser.h"
#include "register-allocator-inl.h"
#include "runtime.h"
#include "scopes.h"
#include "compiler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
void DeferredCode::SaveRegisters() {
UNIMPLEMENTED_MIPS();
}
void DeferredCode::RestoreRegisters() {
UNIMPLEMENTED_MIPS();
}
// -------------------------------------------------------------------------
// CodeGenerator implementation
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
cc_reg_(cc_always),
state_(NULL),
function_return_is_shadowed_(false) {
}
// Calling conventions:
// s8_fp: caller's frame pointer
// sp: stack pointer
// a1: called JS function
// cp: callee's context
void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitBlock(Block* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitDeclaration(Declaration* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitIfStatement(IfStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitForStatement(ForStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitForInStatement(ForInStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitConditional(Conditional* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitSlot(Slot* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitLiteral(Literal* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitAssignment(Assignment* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitThrow(Throw* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitProperty(Property* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCall(Call* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCallNew(CallNew* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCountOperation(CountOperation* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitThisFunction(ThisFunction* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
UNIMPLEMENTED_MIPS();
}
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() { return true; }
#endif
#undef __
#define __ ACCESS_MASM(masm)
// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
// positive or negative to indicate the result of the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x765);
}
void StackCheckStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x790);
}
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x808);
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
UNIMPLEMENTED_MIPS();
__ break_(0x815);
}
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
UNIMPLEMENTED_MIPS();
__ break_(0x826);
}
void CEntryStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x831);
}
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
UNIMPLEMENTED_MIPS();
// Load a result.
__ li(v0, Operand(0x1234));
__ jr(ra);
// Return
__ nop();
}
// This stub performs an instanceof, calling the builtin function if
// necessary. Uses a1 for the object, a0 for the function that it may
// be an instance of (these are fetched from the stack).
void InstanceofStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x845);
}
void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x851);
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x857);
}
void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x863);
}
const char* CompareStub::GetName() {
UNIMPLEMENTED_MIPS();
return NULL; // UNIMPLEMENTED RETURN
}
int CompareStub::MinorKey() {
// Encode the two parameters in a unique 16 bit value.
ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
}
#undef __
} } // namespace v8::internal

311
src/mips/codegen-mips.h Normal file
View File

@ -0,0 +1,311 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the label pair). It is threaded through the
// call stack. Constructing a state implicitly pushes it on the owning code
// generator's stack of states, and destroying one implicitly pops it.
class CodeGenState BASE_EMBEDDED {
public:
// Create an initial code generator state. Destroying the initial state
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state has its own typeof state and pair of branch
// labels.
CodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
TypeofState typeof_state() const { return typeof_state_; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
JumpTarget* true_target_;
JumpTarget* false_target_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
CodeGenState* previous_;
};
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
// Compilation mode. Either the compiler is used as the primary
// compiler and needs to setup everything or the compiler is used as
// the secondary compiler for split compilation and has to handle
// bailouts.
enum Mode {
PRIMARY,
SECONDARY
};
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
static void RecordPositions(MacroAssembler* masm, int pos);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
static const int kUnknownIntValue = -1;
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceLength = 6;
private:
// Construction/Destruction.
explicit CodeGenerator(MacroAssembler* masm);
virtual ~CodeGenerator() { delete masm_; }
// Accessors.
inline bool is_eval();
Scope* scope() const { return scope_; }
// Generating deferred code.
void ProcessDeferred();
// State
bool has_cc() const { return cc_reg_ != cc_always; }
TypeofState typeof_state() const { return state_->typeof_state(); }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
// We don't track loop nesting level on mips yet.
int loop_nesting() const { return 0; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
#define DEF_VISIT(type) \
void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Main code generation function
void Generate(CompilationInfo* info, Mode mode);
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateStringAdd(ZoneList<Expression*>* args);
void GenerateSubString(ZoneList<Expression*>* args);
void GenerateStringCompare(ZoneList<Expression*>* args);
void GenerateRegExpExec(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
ALWAYS_FALSE,
DONT_KNOW
};
ConditionAnalysis AnalyzeCondition(Expression* cond);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
List<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
CompilationInfo* info_;
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
// Jump targets
BreakTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
friend class FullCodeGenSyntaxChecker;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_

323
src/mips/constants-mips.cc Normal file
View File

@ -0,0 +1,323 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "constants-mips.h"
namespace assembler {
namespace mips {
namespace v8i = v8::internal;
// -----------------------------------------------------------------------------
// Registers
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg",
"at",
"v0", "v1",
"a0", "a1", "a2", "a3",
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"t8", "t9",
"k0", "k1",
"gp",
"sp",
"fp",
"ra",
"LO", "HI",
"pc"
};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"},
{23, "cp"},
{30, "s8"},
{30, "s8_fp"},
{kInvalidRegister, NULL}
};
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
result = names_[reg];
} else {
result = "noreg";
}
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].reg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].reg;
}
i++;
}
// No register with the reguested name found.
return kInvalidRegister;
}
const char* FPURegister::names_[kNumFPURegister] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
// List of alias names which can be used when referring to MIPS registers.
const FPURegister::RegisterAlias FPURegister::aliases_[] = {
{kInvalidRegister, NULL}
};
const char* FPURegister::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegister)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int FPURegister::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidFPURegister;
}
// -----------------------------------------------------------------------------
// Instruction
bool Instruction::IsForbiddenInBranchDelay() {
int op = OpcodeFieldRaw();
switch (op) {
case J:
case JAL:
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
case BEQL:
case BNEL:
case BLEZL:
case BGTZL:
return true;
case REGIMM:
switch (RtFieldRaw()) {
case BLTZ:
case BGEZ:
case BLTZAL:
case BGEZAL:
return true;
default:
return false;
};
break;
case SPECIAL:
switch (FunctionFieldRaw()) {
case JR:
case JALR:
return true;
default:
return false;
};
break;
default:
return false;
};
}
bool Instruction::IsLinkingInstruction() {
int op = OpcodeFieldRaw();
switch (op) {
case JAL:
case BGEZAL:
case BLTZAL:
return true;
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
return true;
default:
return false;
};
default:
return false;
};
}
bool Instruction::IsTrap() {
if (OpcodeFieldRaw() != SPECIAL) {
return false;
} else {
switch (FunctionFieldRaw()) {
case BREAK:
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE:
return true;
default:
return false;
};
}
}
Instruction::Type Instruction::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
switch (FunctionFieldRaw()) {
case JR:
case JALR:
case BREAK:
case SLL:
case SRL:
case SRA:
case SLLV:
case SRLV:
case SRAV:
case MFHI:
case MFLO:
case MULT:
case MULTU:
case DIV:
case DIVU:
case ADD:
case ADDU:
case SUB:
case SUBU:
case AND:
case OR:
case XOR:
case NOR:
case SLT:
case SLTU:
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE:
return kRegisterType;
default:
UNREACHABLE();
};
break;
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
return kRegisterType;
default:
UNREACHABLE();
};
break;
case COP1: // Coprocessor instructions
switch (FunctionFieldRaw()) {
case BC1: // branch on coprocessor condition
return kImmediateType;
default:
return kRegisterType;
};
break;
// 16 bits Immediate type instructions. eg: addi dest, src, imm16
case REGIMM:
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
case ADDI:
case ADDIU:
case SLTI:
case SLTIU:
case ANDI:
case ORI:
case XORI:
case LUI:
case BEQL:
case BNEL:
case BLEZL:
case BGTZL:
case LB:
case LW:
case LBU:
case SB:
case SW:
case LWC1:
case LDC1:
case SWC1:
case SDC1:
return kImmediateType;
// 26 bits immediate type instructions. eg: j imm26
case J:
case JAL:
return kJumpType;
default:
UNREACHABLE();
};
return kUnsupported;
}
} } // namespace assembler::mips

525
src/mips/constants-mips.h Normal file
View File

@ -0,0 +1,525 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
#include "checks.h"
// UNIMPLEMENTED_ macro for MIPS.
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
// See: MIPS32 Architecture For Programmers
// Volume II: The MIPS32 Instruction Set
// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
namespace assembler {
namespace mips {
// -----------------------------------------------------------------------------
// Registers and FPURegister.
// Number of general purpose registers.
static const int kNumRegisters = 32;
static const int kInvalidRegister = -1;
// Number of registers with HI, LO, and pc.
static const int kNumSimuRegisters = 35;
// In the simulator, the PC register is simulated as the 34th register.
static const int kPCRegister = 34;
// Number coprocessor registers.
static const int kNumFPURegister = 32;
static const int kInvalidFPURegister = -1;
// Helper functions for converting between register numbers and names.
class Registers {
public:
// Return the name of the register.
static const char* Name(int reg);
// Lookup the register number for the name provided.
static int Number(const char* name);
struct RegisterAlias {
int reg;
const char *name;
};
static const int32_t kMaxValue = 0x7fffffff;
static const int32_t kMinValue = 0x80000000;
private:
static const char* names_[kNumSimuRegisters];
static const RegisterAlias aliases_[];
};
// Helper functions for converting between register numbers and names.
class FPURegister {
public:
// Return the name of the register.
static const char* Name(int reg);
// Lookup the register number for the name provided.
static int Number(const char* name);
struct RegisterAlias {
int creg;
const char *name;
};
private:
static const char* names_[kNumFPURegister];
static const RegisterAlias aliases_[];
};
// -----------------------------------------------------------------------------
// Instructions encoding constants.
// On MIPS all instructions are 32 bits.
typedef int32_t Instr;
typedef unsigned char byte_;
// Special Software Interrupt codes when used in the presence of the MIPS
// simulator.
enum SoftwareInterruptCodes {
// Transition to C code.
call_rt_redirected = 0xfffff
};
// ----- Fields offset and length.
static const int kOpcodeShift = 26;
static const int kOpcodeBits = 6;
static const int kRsShift = 21;
static const int kRsBits = 5;
static const int kRtShift = 16;
static const int kRtBits = 5;
static const int kRdShift = 11;
static const int kRdBits = 5;
static const int kSaShift = 6;
static const int kSaBits = 5;
static const int kFunctionShift = 0;
static const int kFunctionBits = 6;
static const int kImm16Shift = 0;
static const int kImm16Bits = 16;
static const int kImm26Shift = 0;
static const int kImm26Bits = 26;
static const int kFsShift = 11;
static const int kFsBits = 5;
static const int kFtShift = 16;
static const int kFtBits = 5;
// ----- Miscellianous useful masks.
// Instruction bit masks.
static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
static const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
static const int kFunctionFieldMask =
((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
static const int HIMask = 0xffff << 16;
static const int LOMask = 0xffff;
static const int signMask = 0x80000000;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
enum Opcode {
SPECIAL = 0 << kOpcodeShift,
REGIMM = 1 << kOpcodeShift,
J = ((0 << 3) + 2) << kOpcodeShift,
JAL = ((0 << 3) + 3) << kOpcodeShift,
BEQ = ((0 << 3) + 4) << kOpcodeShift,
BNE = ((0 << 3) + 5) << kOpcodeShift,
BLEZ = ((0 << 3) + 6) << kOpcodeShift,
BGTZ = ((0 << 3) + 7) << kOpcodeShift,
ADDI = ((1 << 3) + 0) << kOpcodeShift,
ADDIU = ((1 << 3) + 1) << kOpcodeShift,
SLTI = ((1 << 3) + 2) << kOpcodeShift,
SLTIU = ((1 << 3) + 3) << kOpcodeShift,
ANDI = ((1 << 3) + 4) << kOpcodeShift,
ORI = ((1 << 3) + 5) << kOpcodeShift,
XORI = ((1 << 3) + 6) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift,
COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
LB = ((4 << 3) + 0) << kOpcodeShift,
LW = ((4 << 3) + 3) << kOpcodeShift,
LBU = ((4 << 3) + 4) << kOpcodeShift,
SB = ((5 << 3) + 0) << kOpcodeShift,
SW = ((5 << 3) + 3) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift
};
enum SecondaryField {
// SPECIAL Encoding of Function Field.
SLL = ((0 << 3) + 0),
SRL = ((0 << 3) + 2),
SRA = ((0 << 3) + 3),
SLLV = ((0 << 3) + 4),
SRLV = ((0 << 3) + 6),
SRAV = ((0 << 3) + 7),
JR = ((1 << 3) + 0),
JALR = ((1 << 3) + 1),
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
MFLO = ((2 << 3) + 2),
MULT = ((3 << 3) + 0),
MULTU = ((3 << 3) + 1),
DIV = ((3 << 3) + 2),
DIVU = ((3 << 3) + 3),
ADD = ((4 << 3) + 0),
ADDU = ((4 << 3) + 1),
SUB = ((4 << 3) + 2),
SUBU = ((4 << 3) + 3),
AND = ((4 << 3) + 4),
OR = ((4 << 3) + 5),
XOR = ((4 << 3) + 6),
NOR = ((4 << 3) + 7),
SLT = ((5 << 3) + 2),
SLTU = ((5 << 3) + 3),
TGE = ((6 << 3) + 0),
TGEU = ((6 << 3) + 1),
TLT = ((6 << 3) + 2),
TLTU = ((6 << 3) + 3),
TEQ = ((6 << 3) + 4),
TNE = ((6 << 3) + 6),
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
BGEZ = ((0 << 3) + 1) << 16,
BLTZAL = ((2 << 3) + 0) << 16,
BGEZAL = ((2 << 3) + 1) << 16,
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
MFHC1 = ((0 << 3) + 3) << 21,
MTC1 = ((0 << 3) + 4) << 21,
MTHC1 = ((0 << 3) + 7) << 21,
BC1 = ((1 << 3) + 0) << 21,
S = ((2 << 3) + 0) << 21,
D = ((2 << 3) + 1) << 21,
W = ((2 << 3) + 4) << 21,
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
// COP1 Encoding of Function Field When rs=PS.
NULLSF = 0
};
// ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditionnal branch instructions.
// the 'U' prefix is used to specify unsigned comparisons.
enum Condition {
// Any value < 0 is considered no_condition.
no_condition = -1,
overflow = 0,
no_overflow = 1,
Uless = 2,
Ugreater_equal= 3,
equal = 4,
not_equal = 5,
Uless_equal = 6,
Ugreater = 7,
negative = 8,
positive = 9,
parity_even = 10,
parity_odd = 11,
less = 12,
greater_equal = 13,
less_equal = 14,
greater = 15,
cc_always = 16,
// aliases
carry = Uless,
not_carry = Ugreater_equal,
zero = equal,
eq = equal,
not_zero = not_equal,
ne = not_equal,
sign = negative,
not_sign = positive,
cc_default = no_condition
};
// ----- Coprocessor conditions.
enum FPUCondition {
F, // False
UN, // Unordered
EQ, // Equal
UEQ, // Unordered or Equal
OLT, // Ordered or Less Than
ULT, // Unordered or Less Than
OLE, // Ordered or Less Than or Equal
ULE // Unordered or Less Than or Equal
};
// Break 0xfffff, reserved for redirected real time call.
const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
const Instr nopInstr = 0;
class Instruction {
public:
enum {
kInstructionSize = 4,
kInstructionSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
// always the value of the current instruction being exectued.
kPCReadOffset = 0
};
// Get the raw instruction bits.
inline Instr InstructionBits() const {
return *reinterpret_cast<const Instr*>(this);
}
// Set the raw instruction bits to value.
inline void SetInstructionBits(Instr value) {
*reinterpret_cast<Instr*>(this) = value;
}
// Read one particular bit out of the instruction bits.
inline int Bit(int nr) const {
return (InstructionBits() >> nr) & 1;
}
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
}
// Instruction type.
enum Type {
kRegisterType,
kImmediateType,
kJumpType,
kUnsupported = -1
};
// Get the encoding type of the instruction.
Type InstructionType() const;
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeField() const {
return static_cast<Opcode>(
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
inline int RsField() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRsShift + kRsBits - 1, kRsShift);
}
inline int RtField() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRtShift + kRtBits - 1, kRtShift);
}
inline int RdField() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kRdShift + kRdBits - 1, kRdShift);
}
inline int SaField() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
inline int FunctionField() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
inline int FsField() const {
return Bits(kFsShift + kRsBits - 1, kFsShift);
}
inline int FtField() const {
return Bits(kFtShift + kRsBits - 1, kFtShift);
}
// Return the fields at their original place in the instruction encoding.
inline Opcode OpcodeFieldRaw() const {
return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
}
inline int RsFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return InstructionBits() & kRsFieldMask;
}
inline int RtFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return InstructionBits() & kRtFieldMask;
}
inline int RdFieldRaw() const {
ASSERT(InstructionType() == kRegisterType);
return InstructionBits() & kRdFieldMask;
}
inline int SaFieldRaw() const {
ASSERT(InstructionType() == kRegisterType);
return InstructionBits() & kSaFieldMask;
}
inline int FunctionFieldRaw() const {
return InstructionBits() & kFunctionFieldMask;
}
// Get the secondary field according to the opcode.
inline int SecondaryField() const {
Opcode op = OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
return FunctionField();
case COP1:
return RsField();
case REGIMM:
return RtField();
default:
return NULLSF;
}
}
inline int32_t Imm16Field() const {
ASSERT(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm26Field() const {
ASSERT(InstructionType() == kJumpType);
return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
}
// Say if the instruction should not be used in a branch delay slot.
bool IsForbiddenInBranchDelay();
// Say if the instruction 'links'. eg: jal, bal.
bool IsLinkingInstruction();
// Say if the instruction is a break or a trap.
bool IsTrap();
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instruction.
static Instruction* At(byte_* pc) {
return reinterpret_cast<Instruction*>(pc);
}
private:
// We need to prevent the creation of instances of class Instruction.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
static const int kArgsSlotsSize = 4 * Instruction::kInstructionSize;
static const int kArgsSlotsNum = 4;
static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
static const int kDoubleAlignment = 2 * 8;
static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
} } // namespace assembler::mips
#endif // #ifndef V8_MIPS_CONSTANTS_H_

69
src/mips/cpu-mips.cc Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here.
#include <sys/syscall.h>
#include <unistd.h>
#ifdef __mips
#include <asm/cachectl.h>
#endif // #ifdef __mips
#include "v8.h"
#include "cpu.h"
namespace v8 {
namespace internal {
void CPU::Setup() {
// Nothing to do.
}
void CPU::FlushICache(void* start, size_t size) {
#ifdef __mips
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
#endif // #ifdef __mips
}
void CPU::DebugBreak() {
#ifdef __mips
asm volatile("break");
#endif // #ifdef __mips
}
} } // namespace v8::internal

112
src/mips/debug-mips.cc Normal file
View File

@ -0,0 +1,112 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "debug.h"
namespace v8 {
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
UNIMPLEMENTED_MIPS();
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
UNIMPLEMENTED_MIPS();
}
// A debug break in the exit code is identified by a call.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
#define __ ACCESS_MASM(masm)
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
#undef __
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal

784
src/mips/disasm-mips.cc Normal file
View File

@ -0,0 +1,784 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// A Disassembler object is used to disassemble a block of code instruction by
// instruction. The default implementation of the NameConverter object can be
// overriden to modify register names or to do symbol lookup on addresses.
//
// The example below will disassemble a block of code and print it to stdout.
//
// NameConverter converter;
// Disassembler d(converter);
// for (byte_* pc = begin; pc < end;) {
// char buffer[128];
// buffer[0] = '\0';
// byte_* prev_pc = pc;
// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
// printf("%p %08x %s\n",
// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
// }
//
// The Disassembler class also has a convenience method to disassemble a block
// of code into a FILE*, meaning that the above functionality could also be
// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#ifndef WIN32
#include <stdint.h>
#endif
#include "v8.h"
#include "constants-mips.h"
#include "disasm.h"
#include "macro-assembler.h"
#include "platform.h"
namespace assembler {
namespace mips {
namespace v8i = v8::internal;
//------------------------------------------------------------------------------
// Decoder decodes and disassembles instructions into an output buffer.
// It uses the converter to convert register names and call destinations into
// more informative description.
class Decoder {
public:
Decoder(const disasm::NameConverter& converter,
v8::internal::Vector<char> out_buffer)
: converter_(converter),
out_buffer_(out_buffer),
out_buffer_pos_(0) {
out_buffer_[out_buffer_pos_] = '\0';
}
~Decoder() {}
// Writes one disassembled instruction into 'buffer' (0-terminated).
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(byte_* instruction);
private:
// Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch);
void Print(const char* str);
// Printing of common values.
void PrintRegister(int reg);
void PrintCRegister(int creg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
void PrintFs(Instruction* instr);
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
void PrintFunction(Instruction* instr);
void PrintSecondaryField(Instruction* instr);
void PrintUImm16(Instruction* instr);
void PrintSImm16(Instruction* instr);
void PrintXImm16(Instruction* instr);
void PrintImm26(Instruction* instr);
void PrintCode(Instruction* instr); // For break and trap instructions.
// Printing of instruction name.
void PrintInstructionName(Instruction* instr);
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
int FormatCRegister(Instruction* instr, const char* option);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
// Each of these functions decodes one particular instruction type.
void DecodeTypeRegister(Instruction* instr);
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
int out_buffer_pos_;
DISALLOW_COPY_AND_ASSIGN(Decoder);
};
// Support for assertions in the Decoder formatting functions.
#define STRING_STARTS_WITH(string, compare_string) \
(strncmp(string, compare_string, strlen(compare_string)) == 0)
// Append the ch to the output buffer.
void Decoder::PrintChar(const char ch) {
out_buffer_[out_buffer_pos_++] = ch;
}
// Append the str to the output buffer.
void Decoder::Print(const char* str) {
char cur = *str++;
while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
PrintChar(cur);
cur = *str++;
}
out_buffer_[out_buffer_pos_] = 0;
}
// Print the register name according to the active name converter.
void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
void Decoder::PrintRs(Instruction* instr) {
int reg = instr->RsField();
PrintRegister(reg);
}
void Decoder::PrintRt(Instruction* instr) {
int reg = instr->RtField();
PrintRegister(reg);
}
void Decoder::PrintRd(Instruction* instr) {
int reg = instr->RdField();
PrintRegister(reg);
}
// Print the Cregister name according to the active name converter.
void Decoder::PrintCRegister(int creg) {
Print(converter_.NameOfXMMRegister(creg));
}
void Decoder::PrintFs(Instruction* instr) {
int creg = instr->RsField();
PrintCRegister(creg);
}
void Decoder::PrintFt(Instruction* instr) {
int creg = instr->RtField();
PrintCRegister(creg);
}
void Decoder::PrintFd(Instruction* instr) {
int creg = instr->RdField();
PrintCRegister(creg);
}
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
int sa = instr->SaField();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", sa);
}
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
int32_t imm = instr->Imm16Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%u", imm);
}
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
int32_t imm = ((instr->Imm16Field())<<16)>>16;
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", imm);
}
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
int32_t imm = instr->Imm16Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"0x%x", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintImm26(Instruction* instr) {
int32_t imm = instr->Imm26Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintCode(Instruction* instr) {
if (instr->OpcodeFieldRaw() != SPECIAL)
return; // Not a break or trap instruction.
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
out_buffer_pos_ +=
v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
break;
}
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE: {
int32_t code = instr->Bits(15, 6);
out_buffer_pos_ +=
v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
break;
}
default: // Not a break or trap instruction.
break;
};
}
// Printing of instruction name.
void Decoder::PrintInstructionName(Instruction* instr) {
}
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
if (format[1] == 's') { // 'rs: Rs register
int reg = instr->RsField();
PrintRegister(reg);
return 2;
} else if (format[1] == 't') { // 'rt: rt register
int reg = instr->RtField();
PrintRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'rd: rd register
int reg = instr->RdField();
PrintRegister(reg);
return 2;
}
UNREACHABLE();
return -1;
}
// Handle all Cregister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatCRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'f');
if (format[1] == 's') { // 'fs: fs register
int reg = instr->RsField();
PrintCRegister(reg);
return 2;
} else if (format[1] == 't') { // 'ft: ft register
int reg = instr->RtField();
PrintCRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'fd: fd register
int reg = instr->RdField();
PrintCRegister(reg);
return 2;
}
UNREACHABLE();
return -1;
}
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
// consumed by the caller.) FormatOption returns the number of
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
case 'c': { // 'code for break or trap instructions
ASSERT(STRING_STARTS_WITH(format, "code"));
PrintCode(instr);
return 4;
}
case 'i': { // 'imm16u or 'imm26
if (format[3] == '1') {
ASSERT(STRING_STARTS_WITH(format, "imm16"));
if (format[5] == 's') {
ASSERT(STRING_STARTS_WITH(format, "imm16s"));
PrintSImm16(instr);
} else if (format[5] == 'u') {
ASSERT(STRING_STARTS_WITH(format, "imm16u"));
PrintSImm16(instr);
} else {
ASSERT(STRING_STARTS_WITH(format, "imm16x"));
PrintXImm16(instr);
}
return 6;
} else {
ASSERT(STRING_STARTS_WITH(format, "imm26"));
PrintImm26(instr);
return 5;
}
}
case 'r': { // 'r: registers
return FormatRegister(instr, format);
}
case 'f': { // 'f: Cregisters
return FormatCRegister(instr, format);
}
case 's': { // 'sa
ASSERT(STRING_STARTS_WITH(format, "sa"));
PrintSa(instr);
return 2;
}
};
UNREACHABLE();
return -1;
}
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
void Decoder::Format(Instruction* instr, const char* format) {
char cur = *format++;
while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
if (cur == '\'') { // Single quote is used as the formatting escape.
format += FormatOption(instr, format);
} else {
out_buffer_[out_buffer_pos_++] = cur;
}
cur = *format++;
}
out_buffer_[out_buffer_pos_] = '\0';
}
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
void Decoder::Unknown(Instruction* instr) {
Format(instr, "unknown");
}
void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case COP1: // Coprocessor instructions
switch (instr->RsFieldRaw()) {
case BC1: // branch on coprocessor condition
UNREACHABLE();
break;
case MFC1:
Format(instr, "mfc1 'rt, 'fs");
break;
case MFHC1:
Format(instr, "mfhc1 rt, 'fs");
break;
case MTC1:
Format(instr, "mtc1 'rt, 'fs");
break;
case MTHC1:
Format(instr, "mthc1 rt, 'fs");
break;
case S:
case D:
UNIMPLEMENTED_MIPS();
break;
case W:
switch (instr->FunctionFieldRaw()) {
case CVT_S_W:
UNIMPLEMENTED_MIPS();
break;
case CVT_D_W: // Convert word to double.
Format(instr, "cvt.d.w 'fd, 'fs");
break;
default:
UNREACHABLE();
};
break;
case L:
case PS:
UNIMPLEMENTED_MIPS();
break;
break;
default:
UNREACHABLE();
};
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
Format(instr, "jr 'rs");
break;
case JALR:
Format(instr, "jalr 'rs");
break;
case SLL:
if ( 0x0 == static_cast<int>(instr->InstructionBits()))
Format(instr, "nop");
else
Format(instr, "sll 'rd, 'rt, 'sa");
break;
case SRL:
Format(instr, "srl 'rd, 'rt, 'sa");
break;
case SRA:
Format(instr, "sra 'rd, 'rt, 'sa");
break;
case SLLV:
Format(instr, "sllv 'rd, 'rt, 'rs");
break;
case SRLV:
Format(instr, "srlv 'rd, 'rt, 'rs");
break;
case SRAV:
Format(instr, "srav 'rd, 'rt, 'rs");
break;
case MFHI:
Format(instr, "mfhi 'rd");
break;
case MFLO:
Format(instr, "mflo 'rd");
break;
case MULT:
Format(instr, "mult 'rs, 'rt");
break;
case MULTU:
Format(instr, "multu 'rs, 'rt");
break;
case DIV:
Format(instr, "div 'rs, 'rt");
break;
case DIVU:
Format(instr, "divu 'rs, 'rt");
break;
case ADD:
Format(instr, "add 'rd, 'rs, 'rt");
break;
case ADDU:
Format(instr, "addu 'rd, 'rs, 'rt");
break;
case SUB:
Format(instr, "sub 'rd, 'rs, 'rt");
break;
case SUBU:
Format(instr, "sub 'rd, 'rs, 'rt");
break;
case AND:
Format(instr, "and 'rd, 'rs, 'rt");
break;
case OR:
if (0 == instr->RsField()) {
Format(instr, "mov 'rd, 'rt");
} else if (0 == instr->RtField()) {
Format(instr, "mov 'rd, 'rs");
} else {
Format(instr, "or 'rd, 'rs, 'rt");
}
break;
case XOR:
Format(instr, "xor 'rd, 'rs, 'rt");
break;
case NOR:
Format(instr, "nor 'rd, 'rs, 'rt");
break;
case SLT:
Format(instr, "slt 'rd, 'rs, 'rt");
break;
case SLTU:
Format(instr, "sltu 'rd, 'rs, 'rt");
break;
case BREAK:
Format(instr, "break, code: 'code");
break;
case TGE:
Format(instr, "tge 'rs, 'rt, code: 'code");
break;
case TGEU:
Format(instr, "tgeu 'rs, 'rt, code: 'code");
break;
case TLT:
Format(instr, "tlt 'rs, 'rt, code: 'code");
break;
case TLTU:
Format(instr, "tltu 'rs, 'rt, code: 'code");
break;
case TEQ:
Format(instr, "teq 'rs, 'rt, code: 'code");
break;
case TNE:
Format(instr, "tne 'rs, 'rt, code: 'code");
break;
default:
UNREACHABLE();
};
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
break;
default:
UNREACHABLE();
};
break;
default:
UNREACHABLE();
};
}
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
// ------------- REGIMM class.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
Format(instr, "bltz 'rs, 'imm16u");
break;
case BLTZAL:
Format(instr, "bltzal 'rs, 'imm16u");
break;
case BGEZ:
Format(instr, "bgez 'rs, 'imm16u");
break;
case BGEZAL:
Format(instr, "bgezal 'rs, 'imm16u");
break;
default:
UNREACHABLE();
};
break; // case REGIMM
// ------------- Branch instructions.
case BEQ:
Format(instr, "beq 'rs, 'rt, 'imm16u");
break;
case BNE:
Format(instr, "bne 'rs, 'rt, 'imm16u");
break;
case BLEZ:
Format(instr, "blez 'rs, 'imm16u");
break;
case BGTZ:
Format(instr, "bgtz 'rs, 'imm16u");
break;
// ------------- Arithmetic instructions.
case ADDI:
Format(instr, "addi 'rt, 'rs, 'imm16s");
break;
case ADDIU:
Format(instr, "addiu 'rt, 'rs, 'imm16s");
break;
case SLTI:
Format(instr, "slti 'rt, 'rs, 'imm16s");
break;
case SLTIU:
Format(instr, "sltiu 'rt, 'rs, 'imm16u");
break;
case ANDI:
Format(instr, "andi 'rt, 'rs, 'imm16x");
break;
case ORI:
Format(instr, "ori 'rt, 'rs, 'imm16x");
break;
case XORI:
Format(instr, "xori 'rt, 'rs, 'imm16x");
break;
case LUI:
Format(instr, "lui 'rt, 'imm16x");
break;
// ------------- Memory instructions.
case LB:
Format(instr, "lb 'rt, 'imm16s('rs)");
break;
case LW:
Format(instr, "lw 'rt, 'imm16s('rs)");
break;
case LBU:
Format(instr, "lbu 'rt, 'imm16s('rs)");
break;
case SB:
Format(instr, "sb 'rt, 'imm16s('rs)");
break;
case SW:
Format(instr, "sw 'rt, 'imm16s('rs)");
break;
case LWC1:
Format(instr, "lwc1 'ft, 'imm16s('rs)");
break;
case LDC1:
Format(instr, "ldc1 'ft, 'imm16s('rs)");
break;
case SWC1:
Format(instr, "swc1 'rt, 'imm16s('fs)");
break;
case SDC1:
Format(instr, "sdc1 'rt, 'imm16s('fs)");
break;
default:
UNREACHABLE();
break;
};
}
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
Format(instr, "j 'imm26");
break;
case JAL:
Format(instr, "jal 'imm26");
break;
default:
UNREACHABLE();
}
}
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte_* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
switch (instr->InstructionType()) {
case Instruction::kRegisterType: {
DecodeTypeRegister(instr);
break;
}
case Instruction::kImmediateType: {
DecodeTypeImmediate(instr);
break;
}
case Instruction::kJumpType: {
DecodeTypeJump(instr);
break;
}
default: {
UNSUPPORTED_MIPS();
}
}
return Instruction::kInstructionSize;
}
} } // namespace assembler::mips
//------------------------------------------------------------------------------
namespace disasm {
namespace v8i = v8::internal;
const char* NameConverter::NameOfAddress(byte_* addr) const {
static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
return tmp_buffer.start();
}
const char* NameConverter::NameOfConstant(byte_* addr) const {
return NameOfAddress(addr);
}
const char* NameConverter::NameOfCPURegister(int reg) const {
return assembler::mips::Registers::Name(reg);
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
return assembler::mips::FPURegister::Name(reg);
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register
return "nobytereg";
}
const char* NameConverter::NameInCode(byte_* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
}
//------------------------------------------------------------------------------
Disassembler::Disassembler(const NameConverter& converter)
: converter_(converter) {}
Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte_* instruction) {
assembler::mips::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
UNIMPLEMENTED_MIPS();
return -1;
}
void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
NameConverter converter;
Disassembler d(converter);
for (byte_* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
byte_* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
fprintf(f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
#undef UNSUPPORTED
} // namespace disasm

View File

@ -0,0 +1,56 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "fast-codegen.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
void FastCodeGenerator::Generate(CompilationInfo* info) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal

100
src/mips/frames-mips.cc Normal file
View File

@ -0,0 +1,100 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "frames-inl.h"
#include "mips/assembler-mips-inl.h"
namespace v8 {
namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement;
const int offset = ExitFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp + offset);
bool is_debug_exit = code->IsSmi();
if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
}
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
// Do nothing
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
}
Address InternalFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
}
} } // namespace v8::internal

164
src/mips/frames-mips.h Normal file
View File

@ -0,0 +1,164 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_FRAMES_MIPS_H_
#define V8_MIPS_FRAMES_MIPS_H_
namespace v8 {
namespace internal {
// Register lists.
// Note that the bit values must match those used in actual instruction
// encoding.
static const int kNumRegs = 32;
static const RegList kJSCallerSaved =
1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7; // a3
static const int kNumJSCallerSaved = 4;
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript.
static const RegList kCalleeSaved =
// Saved temporaries.
1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
// gp, sp, fp
1 << 28 | 1 << 29 | 1 << 30;
static const int kNumCalleeSaved = 11;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
static const int kStateOffset = 1 * kPointerSize;
static const int kFPOffset = 2 * kPointerSize;
static const int kPCOffset = 3 * kPointerSize;
static const int kSize = kPCOffset + kPointerSize;
};
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -3 * kPointerSize;
};
class ExitFrameConstants : public AllStatic {
public:
// Exit frames have a debug marker on the stack.
static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer.
static const int kDebugMarkOffset = -1 * kPointerSize;
// Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
static const int kCodeOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
static const int kCallerPCOffset = +1 * kPointerSize;
// FP-relative displacement of the caller's SP.
static const int kCallerSPDisplacement = +4 * kPointerSize;
};
class StandardFrameConstants : public AllStatic {
public:
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
static const int kCallerSPOffset = +2 * kPointerSize;
// Size of the MIPS 4 32-bit argument slots.
// This is just an alias with a shorter name. Use it from now on.
static const int kRArgsSlotsSize = 4 * kPointerSize;
static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
// C/C++ argument slots size.
static const int kCArgsSlotsSize = 4 * kPointerSize;
// JS argument slots size.
static const int kJSArgsSlotsSize = 0 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
};
class InternalFrameConstants : public AllStatic {
public:
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal
#endif

View File

@ -0,0 +1,268 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "parser.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitReturnSequence(int position) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::ApplyTOS(Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DoTest(Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
UNIMPLEMENTED_MIPS();
return MemOperand(zero_reg, 0); // UNIMPLEMENTED RETURN
}
void FullCodeGenerator::Move(Register destination, Slot* source) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitProperty(Property* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCall(Call* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNIMPLEMENTED_MIPS();
}
Register FullCodeGenerator::result_register() { return v0; }
Register FullCodeGenerator::context_register() { return cp; }
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
UNIMPLEMENTED_MIPS();
}
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::ExitFinallyBlock() {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal

187
src/mips/ic-mips.cc Normal file
View File

@ -0,0 +1,187 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// Static IC stub generators.
//
#define __ ACCESS_MASM(masm)
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::ClearInlinedVersion(Address address) {}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return false;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return false;
}
Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
}
void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::Generate(MacroAssembler* masm,
const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal

View File

@ -0,0 +1,87 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() {
UNIMPLEMENTED_MIPS();
}
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
UNIMPLEMENTED_MIPS();
}
void JumpTarget::Call() {
UNIMPLEMENTED_MIPS();
}
void JumpTarget::DoBind() {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Jump() {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Jump(Result* arg) {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Bind() {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Bind(Result* arg) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal

View File

@ -0,0 +1,895 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
generating_stub_(false),
allow_stub_calls_(true),
code_object_(Heap::undefined_value()) {
}
void MacroAssembler::Jump(Register target, Condition cond,
Register r1, const Operand& r2) {
Jump(Operand(target), cond, r1, r2);
}
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
Jump(Operand(target), cond, r1, r2);
}
void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
}
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
void MacroAssembler::Call(Register target,
Condition cond, Register r1, const Operand& r2) {
Call(Operand(target), cond, r1, r2);
}
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
Call(Operand(target), cond, r1, r2);
}
void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
}
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
}
void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
Jump(Operand(ra), cond, r1, r2);
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s4, index << kPointerSizeLog2));
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
Branch(NegateCondition(cond), 2, src1, src2);
nop();
lw(destination, MemOperand(s4, index << kPointerSizeLog2));
}
void MacroAssembler::RecordWrite(Register object, Register offset,
Register scratch) {
UNIMPLEMENTED_MIPS();
}
// ---------------------------------------------------------------------------
// Instruction macros
void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
add(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
addi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
add(rd, rs, at);
}
}
}
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
addiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
addu(rd, rs, at);
}
}
}
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
mul(rd, rs, at);
}
}
void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
mult(rs, at);
}
}
void MacroAssembler::Multu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
multu(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
multu(rs, at);
}
}
void MacroAssembler::Div(Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
div(rs, at);
}
}
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
divu(rs, at);
}
}
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
andi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
and_(rd, rs, at);
}
}
}
void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
ori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
or_(rd, rs, at);
}
}
}
void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
xori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
xor_(rd, rs, at);
}
}
}
void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
nor(rd, rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
nor(rd, rs, at);
}
}
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
slti(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
slt(rd, rs, at);
}
}
}
void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
sltiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
sltu(rd, rs, at);
}
}
}
//------------Pseudo-instructions-------------
void MacroAssembler::movn(Register rd, Register rt) {
addiu(at, zero_reg, -1); // Fill at with ones.
xor_(rd, rt, at);
}
// load wartd in a register
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
ASSERT(!j.is_reg());
if (!MustUseAt(j.rmode_) && !gen2instr) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & HIMask)) {
ori(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & LOMask)) {
lui(rd, (HIMask & j.imm32_) >> 16);
} else {
lui(rd, (HIMask & j.imm32_) >> 16);
ori(rd, rd, (LOMask & j.imm32_));
}
} else if (MustUseAt(j.rmode_) || gen2instr) {
if (MustUseAt(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
// We need always the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
if (is_int16(j.imm32_)) {
nop();
addiu(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & HIMask)) {
nop();
ori(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & LOMask)) {
nop();
lui(rd, (HIMask & j.imm32_) >> 16);
} else {
lui(rd, (HIMask & j.imm32_) >> 16);
ori(rd, rd, (LOMask & j.imm32_));
}
}
}
// Exception-generating instructions and debugging support
void MacroAssembler::stop(const char* msg) {
// TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
// We use the 0x54321 value to be able to find it easily when reading memory.
break_(0x54321);
}
void MacroAssembler::MultiPush(RegList regs) {
int16_t NumSaved = 0;
int16_t NumToPush = NumberOfBitsSet(regs);
addiu(sp, sp, -4 * NumToPush);
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
}
}
}
void MacroAssembler::MultiPushReversed(RegList regs) {
int16_t NumSaved = 0;
int16_t NumToPush = NumberOfBitsSet(regs);
addiu(sp, sp, -4 * NumToPush);
for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
}
}
}
void MacroAssembler::MultiPop(RegList regs) {
int16_t NumSaved = 0;
for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
}
}
addiu(sp, sp, 4 * NumSaved);
}
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t NumSaved = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
}
}
addiu(sp, sp, 4 * NumSaved);
}
// Emulated condtional branches do not emit a nop in the branch delay slot.
// Trashes the at register if no scratch register is provided.
void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
Register r2;
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
r2 = rt.rm_;
} else if (cond != cc_always) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
}
switch (cond) {
case cc_always:
b(offset);
break;
case eq:
beq(rs, r2, offset);
break;
case ne:
bne(rs, r2, offset);
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
bne(scratch, zero_reg, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
beq(scratch, zero_reg, offset);
break;
case less:
slt(scratch, rs, r2);
bne(scratch, zero_reg, offset);
break;
case less_equal:
slt(scratch, r2, rs);
beq(scratch, zero_reg, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
break;
case Uless:
sltu(scratch, rs, r2);
bne(scratch, zero_reg, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
break;
default:
UNREACHABLE();
}
}
void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
Register r2;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
r2 = scratch;
li(r2, rt);
}
// We use branch_offset as an argument for the branch instructions to be sure
// it is called just before generating the branch instruction, as needed.
switch (cond) {
case cc_always:
b(shifted_branch_offset(L, false));
break;
case eq:
beq(rs, r2, shifted_branch_offset(L, false));
break;
case ne:
bne(rs, r2, shifted_branch_offset(L, false));
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
bne(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case greater_equal:
slt(scratch, rs, r2);
beq(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case less:
slt(scratch, rs, r2);
bne(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case less_equal:
slt(scratch, r2, rs);
beq(scratch, zero_reg, shifted_branch_offset(L, false));
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
bne(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
beq(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case Uless:
sltu(scratch, rs, r2);
bne(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case Uless_equal:
sltu(scratch, r2, rs);
beq(scratch, zero_reg, shifted_branch_offset(L, false));
break;
default:
UNREACHABLE();
}
}
// Trashes the at register if no scratch register is provided.
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
Register r2;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
r2 = scratch;
li(r2, rt);
}
switch (cond) {
case cc_always:
bal(offset);
break;
case eq:
bne(rs, r2, 2);
nop();
bal(offset);
break;
case ne:
beq(rs, r2, 2);
nop();
bal(offset);
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
}
void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
Register r2;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
r2 = scratch;
li(r2, rt);
}
switch (cond) {
case cc_always:
bal(shifted_branch_offset(L, false));
break;
case eq:
bne(rs, r2, 2);
nop();
bal(shifted_branch_offset(L, false));
break;
case ne:
beq(rs, r2, 2);
nop();
bal(shifted_branch_offset(L, false));
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, shifted_branch_offset(L, false));
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, shifted_branch_offset(L, false));
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, shifted_branch_offset(L, false));
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, shifted_branch_offset(L, false));
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, shifted_branch_offset(L, false));
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, shifted_branch_offset(L, false));
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, shifted_branch_offset(L, false));
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, shifted_branch_offset(L, false));
break;
default:
UNREACHABLE();
}
}
void MacroAssembler::Jump(const Operand& target,
Condition cond, Register rs, const Operand& rt) {
if (target.is_reg()) {
if (cond == cc_always) {
jr(target.rm());
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jr(target.rm());
}
} else { // !target.is_reg()
if (!MustUseAt(target.rmode_)) {
if (cond == cc_always) {
j(target.imm32_);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
j(target.imm32_); // will generate only one instruction.
}
} else { // MustUseAt(target)
li(at, rt);
if (cond == cc_always) {
jr(at);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jr(at); // will generate only one instruction.
}
}
}
}
void MacroAssembler::Call(const Operand& target,
Condition cond, Register rs, const Operand& rt) {
if (target.is_reg()) {
if (cond == cc_always) {
jalr(target.rm());
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jalr(target.rm());
}
} else { // !target.is_reg()
if (!MustUseAt(target.rmode_)) {
if (cond == cc_always) {
jal(target.imm32_);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jal(target.imm32_); // will generate only one instruction.
}
} else { // MustUseAt(target)
li(at, rt);
if (cond == cc_always) {
jalr(at);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jalr(at); // will generate only one instruction.
}
}
}
}
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Drop(int count, Condition cond) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Call(Label* target) {
UNIMPLEMENTED_MIPS();
}
// ---------------------------------------------------------------------------
// Exception handling
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::PopTryHandler() {
UNIMPLEMENTED_MIPS();
}
// ---------------------------------------------------------------------------
// Activation frames
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::StubReturn(int argc) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
UNIMPLEMENTED_MIPS();
}
Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
bool* resolved) {
UNIMPLEMENTED_MIPS();
return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Check(Condition cc, const char* msg,
Register rs, Operand rt) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Abort(const char* msg) {
UNIMPLEMENTED_MIPS();
}
} } // namespace v8::internal

View File

@ -0,0 +1,381 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#include "assembler.h"
#include "mips/assembler-mips.h"
namespace v8 {
namespace internal {
// Forward declaration.
class JumpTarget;
// Register at is used for instruction generation. So it is not safe to use it
// unless we know exactly what we do.
// Registers aliases
const Register cp = s7; // JavaScript context pointer
const Register fp = s8_fp; // Alias fp
enum InvokeJSFlags {
CALL_JS,
JUMP_JS
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
MacroAssembler(void* buffer, int size);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(const Operand& target,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(const Operand& target,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Jump(Register target,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Jump(byte* target, RelocInfo::Mode rmode,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(Register target,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(byte* target, RelocInfo::Mode rmode,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Ret(Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
const Operand& rt = Operand(zero_reg), Register scratch = at);
void Branch(Condition cond, Label* L, Register rs = zero_reg,
const Operand& rt = Operand(zero_reg), Register scratch = at);
// conditionnal branch and link
void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
const Operand& rt = Operand(zero_reg),
Register scratch = at);
void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
const Operand& rt = Operand(zero_reg),
Register scratch = at);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = cc_always);
void Call(Label* target);
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
// Use rather b(Label) for code generation.
void jmp(Label* L) {
Branch(cc_always, L);
nop();
}
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
void LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
// Sets the remembered set bit for [address+offset], where address is the
// address of the heap object 'object'. The address must be in the first 8K
// of an allocated page. The 'scratch' register is used in the
// implementation and all 3 registers are clobbered by the operation, as
// well as the ip register.
void RecordWrite(Register object, Register offset, Register scratch);
// ---------------------------------------------------------------------------
// Instruction macros
#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
void instr(Register rd, Register rs, Register rt) { \
instr(rd, rs, Operand(rt)); \
} \
void instr(Register rs, Register rt, int32_t j) { \
instr(rs, rt, Operand(j)); \
}
#define DEFINE_INSTRUCTION2(instr) \
void instr(Register rs, const Operand& rt); \
void instr(Register rs, Register rt) { \
instr(rs, Operand(rt)); \
} \
void instr(Register rs, int32_t j) { \
instr(rs, Operand(j)); \
}
DEFINE_INSTRUCTION(Add);
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
DEFINE_INSTRUCTION2(Div);
DEFINE_INSTRUCTION2(Divu);
DEFINE_INSTRUCTION(And);
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
DEFINE_INSTRUCTION(Nor);
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
//------------Pseudo-instructions-------------
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
// Move the logical ones complement of source to dest.
void movn(Register rd, Register rt);
// load int32 in the rd register
void li(Register rd, Operand j, bool gen2instr = false);
inline void li(Register rd, int32_t j, bool gen2instr = false) {
li(rd, Operand(j), gen2instr);
}
// Exception-generating instructions and debugging support
void stop(const char* msg);
// Push multiple registers on the stack.
// With MultiPush, lower registers are pushed first on the stack.
// For example if you push t0, t1, s0, and ra you get:
// | |
// |-----------------------|
// | t0 | +
// |-----------------------| |
// | t1 | |
// |-----------------------| |
// | s0 | v
// |-----------------------| -
// | ra |
// |-----------------------|
// | |
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
void Push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
inline void push(Register src) { Push(src); }
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditionnal execution we use a Branch.
Branch(cond, 3, tst1, Operand(tst2));
nop();
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
void MultiPopReversed(RegList regs);
void Pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
void Pop() {
Add(sp, sp, Operand(kPointerSize));
}
// ---------------------------------------------------------------------------
// Exception handling
// Push a new try handler and link into try handler chain.
// The return address must be passed in register lr.
// On exit, r0 contains TOS (code slot).
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
// ---------------------------------------------------------------------------
// Support functions.
inline void BranchOnSmi(Register value, Label* smi_label,
Register scratch = at) {
ASSERT_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(eq, smi_label, scratch, Operand(zero_reg));
}
inline void BranchOnNotSmi(Register value, Label* not_smi_label,
Register scratch = at) {
ASSERT_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(ne, not_smi_label, scratch, Operand(zero_reg));
}
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void CallJSExitStub(CodeStub* stub);
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
// Call a runtime routine.
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number
// of parameters.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
// Jump to the builtin routine.
void JumpToRuntime(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
// Store the code object for the given builtin in the target register and
// setup the function in r1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
List<Unresolved>* unresolved() { return &unresolved_; }
Handle<Object> CodeObject() { return code_object_; }
// ---------------------------------------------------------------------------
// Stack limit support
void StackLimitCheck(Label* on_stack_limit_hit);
// ---------------------------------------------------------------------------
// StatsCounter support
void SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg, Register rs, Operand rt);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg, Register rs, Operand rt);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
};
// -----------------------------------------------------------------------------
// Static helper functions.
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_

View File

@ -0,0 +1,137 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
#include "v8.h"
#include "mips/assembler-mips.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
bool RegisterAllocator::IsReserved(Register reg) {
// The code for this test relies on the order of register codes.
return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
}
int RegisterAllocator::ToNumber(Register reg) {
ASSERT(reg.is_valid() && !IsReserved(reg));
const int kNumbers[] = {
0, // zero_reg
1, // at
2, // v0
3, // v1
4, // a0
5, // a1
6, // a2
7, // a3
8, // t0
9, // t1
10, // t2
11, // t3
12, // t4
13, // t5
14, // t
15, // t7
16, // t8
17, // t9
18, // s0
19, // s1
20, // s2
21, // s3
22, // s4
23, // s5
24, // s6
25, // s7
26, // k0
27, // k1
28, // gp
29, // sp
30, // s8_fp
31, // ra
};
return kNumbers[reg.code()];
}
Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {
zero_reg,
at,
v0,
v1,
a0,
a1,
a2,
a3,
t0,
t1,
t2,
t3,
t4,
t5,
t6,
t7,
s0,
s1,
s2,
s3,
s4,
s5,
s6,
s7,
t8,
t9,
k0,
k1,
gp,
sp,
s8_fp,
ra
};
return kRegisters[num];
}
void RegisterAllocator::Initialize() {
Reset();
// The non-reserved a1 and ra registers are live on JS function entry.
Use(a1); // JS function.
Use(ra); // Return address.
}
} } // namespace v8::internal
#endif // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_

View File

@ -0,0 +1,60 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "register-allocator-inl.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
void Result::ToRegister() {
UNIMPLEMENTED_MIPS();
}
void Result::ToRegister(Register target) {
UNIMPLEMENTED_MIPS();
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
// No byte registers on MIPS.
UNREACHABLE();
return Result();
}
} } // namespace v8::internal

View File

@ -0,0 +1,46 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
#include "mips/constants-mips.h"
namespace v8 {
namespace internal {
class RegisterAllocatorConstants : public AllStatic {
public:
static const int kNumRegisters = assembler::mips::kNumRegisters;
static const int kInvalidRegister = assembler::mips::kInvalidRegister;
};
} } // namespace v8::internal
#endif // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_

1648
src/mips/simulator-mips.cc Normal file

File diff suppressed because it is too large Load Diff

311
src/mips/simulator-mips.h Normal file
View File

@ -0,0 +1,311 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Declares a Simulator for MIPS instructions if we are not generating a native
// MIPS binary. This Simulator allows us to run and debug MIPS code generation
// on regular desktop machines.
// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
#ifndef V8_MIPS_SIMULATOR_MIPS_H_
#define V8_MIPS_SIMULATOR_MIPS_H_
#include "allocation.h"
#if defined(__mips)
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
return try_catch_address;
}
static inline void UnregisterCTryCatch() { }
};
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
// calculates the stack limit based on that value.
// NOTE: The check for overflow is not safe as there is no guarantee that the
// running thread has its stack in all memory up to address 0x00000000.
#define GENERATED_CODE_STACK_LIMIT(limit) \
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
#else // #if defined(__mips)
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(\
assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::mips::Simulator::current()->Call(\
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
namespace assembler {
namespace mips {
class Simulator {
public:
friend class Debugger;
// Registers are declared in order. See SMRL chapter 2.
enum Register {
no_reg = -1,
zero_reg = 0,
at,
v0, v1,
a0, a1, a2, a3,
t0, t1, t2, t3, t4, t5, t6, t7,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9,
k0, k1,
gp,
sp,
s8,
ra,
// LO, HI, and pc
LO,
HI,
pc, // pc must be the last register.
kNumSimuRegisters,
// aliases
fp = s8
};
// Coprocessor registers.
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters
f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
f26, f27, f28, f29, f30, f31,
kNumFPURegisters
};
Simulator();
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* current();
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
// Same for FPURegisters
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_double(int fpureg, double value);
int32_t get_fpu_register(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
// Call on program start.
static void Initialize();
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte_* entry, int argument_count, ...);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
// without being properly setup.
bad_ra = -1,
// A pc value used to signal the simulator to stop execution. Generally
// the ra is set to this value on transition from native C code to
// simulated execution, so that the simulator can "return" to the native
// C code.
end_sim_pc = -2,
// Unpredictable value.
Unpredictable = 0xbadbeaf
};
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
// Read and write memory.
inline uint32_t ReadBU(int32_t addr);
inline int32_t ReadB(int32_t addr);
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instruction* instr);
inline void WriteW(int32_t addr, int value, Instruction* instr);
inline double ReadD(int32_t addr, Instruction* instr);
inline void WriteD(int32_t addr, double value, Instruction* instr);
// Operations depending on endianness.
// Get Double Higher / Lower word.
inline int32_t GetDoubleHIW(double* addr);
inline int32_t GetDoubleLOW(double* addr);
// Set Double Higher / Lower word.
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
void BranchDelayInstructionDecode(Instruction* instr) {
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
instr->OpcodeField());
}
InstructionDecode(instr);
}
enum Exception {
none,
kIntegerOverflow,
kIntegerUnderflow,
kDivideByZero,
kNumExceptions
};
int16_t exceptions[kNumExceptions];
// Exceptions.
void SignalExceptions();
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
bool fp_return);
// Used for real time calls that takes two double values as arguments and
// returns a double.
void SetFpResult(double result);
// Architecture state.
// Registers.
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
int32_t FPUregisters_[kNumFPURegisters];
// Simulator support.
char* stack_;
bool pc_modified_;
int icount_;
static bool initialized_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
};
} } // namespace assembler::mips
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. Setting the c_limit to indicate a very small
// stack cause stack overflow errors, since the simulator ignores the input.
// This is unlikely to be an issue in practice, though it might cause testing
// trouble down the line.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::mips::Simulator::current()->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
assembler::mips::Simulator::current()->PopAddress();
}
};
#endif // defined(__mips)
#endif // V8_MIPS_SIMULATOR_MIPS_H_

384
src/mips/stub-cache-mips.cc Normal file
View File

@ -0,0 +1,384 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch,
Register extra) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
UNIMPLEMENTED_MIPS();
}
// Load a fast property out of a holder object (src). In-object properties
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
JSObject* holder, int index) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
Label* miss_label) {
UNIMPLEMENTED_MIPS();
}
// Generate code to load the length from a string object and return the length.
// If the receiver object is not a string or a wrapped string object the
// execution continues at the miss label. The register containing the
// receiver is potentially clobbered.
void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
UNIMPLEMENTED_MIPS();
__ break_(0x249);
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss_label) {
UNIMPLEMENTED_MIPS();
}
// Generate StoreField code, value is passed in r0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
Register receiver_reg,
Register name_reg,
Register scratch,
Label* miss_label) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
UNIMPLEMENTED_MIPS();
}
#undef __
#define __ ACCESS_MASM(masm())
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
Register scratch,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
return at; // UNIMPLEMENTED RETURN
}
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
int index,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadConstant(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Object* value,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
}
bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure) {
UNIMPLEMENTED_MIPS();
__ break_(0x470);
return false; // UNIMPLEMENTED RETURN
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
__ break_(0x505);
}
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileCallInterceptor(Object* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
__ break_(0x782);
return GetCode(INTERCEPTOR, name);
}
Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
AccessorInfo* callback,
String* name) {
UNIMPLEMENTED_MIPS();
__ break_(0x906);
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
JSObject* holder,
Object* value,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* receiver,
JSObject* holder,
int index) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
JSObject* receiver,
JSObject* holder,
Object* value) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
// TODO(1224671): implement the fast case.
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* ConstructStubCompiler::CompileConstructStub(
SharedFunctionInfo* shared) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
#undef __
} } // namespace v8::internal

View File

@ -0,0 +1,240 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
#define __ ACCESS_MASM(masm())
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in
// memory.
VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count()) { // 0-based index of TOS.
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
}
void VirtualFrame::SyncElementByPushing(int index) {
UNREACHABLE();
}
void VirtualFrame::SyncRange(int begin, int end) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Enter() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Exit() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::AllocateStackSlots() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::RestoreContextRegister() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::PushReceiverSlotAddress() {
UNIMPLEMENTED_MIPS();
}
int VirtualFrame::InvalidateFrameSlotAt(int index) {
return kIllegalIndex;
}
void VirtualFrame::TakeFrameSlotAt(int index) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::StoreToFrameSlotAt(int index) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::PushTryHandler(HandlerType type) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::RawCallStub(CodeStub* stub) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
Result* arg_count_register,
int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args,
bool set_auto_args_slots) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Drop(int count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::DropFromVFrameOnly(int count) {
UNIMPLEMENTED_MIPS();
}
Result VirtualFrame::Pop() {
UNIMPLEMENTED_MIPS();
Result res = Result();
return res; // UNIMPLEMENTED RETUR
}
void VirtualFrame::EmitPop(Register reg) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPop(RegList regs) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitPush(Register reg) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPush(RegList regs) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitArgumentSlots(RegList reglist) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal

View File

@ -0,0 +1,548 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
#include "register-allocator.h"
#include "scopes.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
//
// The virtual frame is an abstraction of the physical stack frame. It
// encapsulates the parameters, frame-allocated locals, and the expression
// stack. It supports push/pop operations on the expression stack, as well
// as random access to the expression stack elements, locals, and
// parameters.
class VirtualFrame : public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
// generator's current frame, but no attempt is made to require it
// to stay spilled. It is intended as documentation while the code
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
SpilledScope() {}
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
// The height of the virtual expression stack.
int height() {
return element_count() - expression_base_index();
}
int register_location(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num];
}
int register_location(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)];
}
void set_register_location(Register reg, int index) {
register_locations_[RegisterAllocator::ToNumber(reg)] = index;
}
bool is_used(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)]
!= kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted.
void Forget(int count) {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
// On mips, all elements are in memory, so there is no extra bookkeeping
// (registers, copies, etc.) beyond dropping the elements.
elements_.Rewind(stack_pointer_ + 1);
}
// Forget count elements from the top of the frame and adjust the stack
// pointer downward. This is used, for example, before merging frames at
// break, continue, and return targets.
void ForgetElements(int count);
// Spill all values from the frame to memory.
void SpillAll();
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
if (is_used(reg)) SpillElementAt(register_location(reg));
}
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
Register SpillAnyRegister();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
void PrepareMergeTo(VirtualFrame* expected);
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected);
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) cgen_allocator->Unuse(i);
}
}
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) cgen_allocator->Unuse(i);
}
}
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
// Exit it should not be used. Note that Enter does not allocate space in
// the physical frame for storing frame-allocated locals.
void Enter();
void Exit();
// Prepare for returning from the frame by spilling locals and
// dropping all non-locals elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills.
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
MemOperand Top() { return MemOperand(sp, 0); }
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
return MemOperand(sp, index * kPointerSize);
}
// Random-access store to a frame-top relative frame element. The result
// becomes owned by the frame and is invalidated.
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
Result temp(value);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
PushFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
MemOperand LocalAt(int index) {
ASSERT(0 <= index);
ASSERT(index < local_count());
return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
}
// Push a copy of the value of a local frame slot on top of the frame.
void PushLocalAt(int index) {
PushFrameSlotAt(local0_index() + index);
}
// Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read
// from it again.
void TakeLocalAt(int index) {
TakeFrameSlotAt(local0_index() + index);
}
// Store the top value on the virtual frame into a local frame slot. The
// value is left in place on top of the frame.
void StoreToLocalAt(int index) {
StoreToFrameSlotAt(local0_index() + index);
}
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// The function frame slot.
MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
// Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); }
// The context frame slot.
MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
// Save the value of the cp register to the context frame slot.
void SaveContextRegister();
// Restore the cp register from the value of the context frame
// slot.
void RestoreContextRegister();
// A parameter as an assembly operand.
MemOperand ParameterAt(int index) {
// Index -1 corresponds to the receiver.
ASSERT(-1 <= index); // -1 is the receiver.
ASSERT(index <= parameter_count());
uint16_t a = 0; // Number of argument slots.
return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
void PushParameterAt(int index) {
PushFrameSlotAt(param0_index() + index);
}
// Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before
// trying to read from it again.
void TakeParameterAt(int index) {
TakeFrameSlotAt(param0_index() + index);
}
// Store the top value on the virtual frame into a parameter frame slot.
// The value is left in place on top of the frame.
void StoreToParameterAt(int index) {
StoreToFrameSlotAt(param0_index() + index);
}
// The receiver frame slot.
MemOperand Receiver() { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
PrepareForCall(arg_count, arg_count);
RawCallStub(stub);
}
// Call stub that expects its argument in r0. The argument is given
// as a result which must be the register r0.
void CallStub(CodeStub* stub, Result* arg);
// Call stub that expects its arguments in r1 and r0. The arguments
// are given as results which must be the appropriate registers.
void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count);
// Call runtime with sp aligned to 8 bytes.
void CallAlignedRuntime(Runtime::Function* f, int arg_count);
void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
Result* arg_count_register,
int arg_count);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments are passed as results and
// consumed by the call.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args);
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args,
bool set_auto_args_slots = false);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
// Similar to VirtualFrame::Drop but we don't modify the actual stack.
// This is because we need to manually restore sp to the correct position.
void DropFromVFrameOnly(int count);
// Drop one element.
void Drop() { Drop(1); }
void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
// Same but for multiple registers
void EmitMultiPop(RegList regs); // higher indexed registers popped first
void EmitMultiPopReversed(RegList regs); // lower first
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
// Same but for multiple registers.
void EmitMultiPush(RegList regs); // lower indexed registers are pushed first
void EmitMultiPushReversed(RegList regs); // higher first
// Push an element on the virtual frame.
void Push(Register reg);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) {
if (result->is_register()) {
Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
}
result->Unuse();
}
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
void Nip(int num_dropped);
// This pushes 4 arguments slots on the stack and saves asked 'a' registers
// 'a' registers are arguments register a0 to a3.
void EmitArgumentSlots(RegList reglist);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
static const int kContextOffset = StandardFrameConstants::kContextOffset;
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
ZoneList<FrameElement> elements_;
// The index of the element that is at the processor's stack pointer
// (the sp register).
int stack_pointer_;
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
int local_count() { return cgen()->scope()->num_stack_slots(); }
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
int frame_pointer() { return parameter_count() + 3; }
// The index of the first parameter. The receiver lies below the first
// parameter.
int param0_index() { return 1; }
// The index of the context slot in the frame. It is immediately
// below the frame pointer.
int context_index() { return frame_pointer() - 1; }
// The index of the function slot in the frame. It is below the frame
// pointer and context slot.
int function_index() { return frame_pointer() - 2; }
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
int local0_index() { return frame_pointer() + 2; }
// The index of the base of the expression stack.
int expression_base_index() { return local0_index() + local_count(); }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) {
ASSERT(index < element_count());
ASSERT(frame_pointer() < element_count()); // FP is on the frame.
return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
void Use(Register reg, int index) {
ASSERT(!is_used(reg));
set_register_location(reg, index);
cgen()->allocator()->Use(reg);
}
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
void Unuse(Register reg) {
ASSERT(is_used(reg));
set_register_location(reg, kIllegalIndex);
cgen()->allocator()->Unuse(reg);
}
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
// constant.
void SpillElementAt(int index);
// Sync the element at a particular index. If it is a register or
// constant that disagrees with the value on the stack, write it to memory.
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
// Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);
// Sync a single unsynced element that lies just above the stack pointer.
void SyncElementByPushing(int index);
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame.
void PushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
void TakeFrameSlotAt(int index);
// Store the value on top of the frame to a frame slot (typically a local
// or parameter).
void StoreToFrameSlotAt(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
// Move frame elements currently in registers or constants, that
// should be in memory in the expected frame, to memory.
void MergeMoveRegistersToMemory(VirtualFrame* expected);
// Make the register-to-register moves necessary to
// merge this frame with the expected frame.
// Register to memory moves must already have been made,
// and memory to register moves must follow this call.
// This is because some new memory-to-register moves are
// created in order to break cycles of register moves.
// Used in the implementation of MergeTo().
void MergeMoveRegistersToRegisters(VirtualFrame* expected);
// Make the memory-to-register and constant-to-register moves
// needed to make this frame equal the expected frame.
// Called after all register-to-memory and register-to-register
// moves have been made. After this function returns, the frames
// should be equal.
void MergeMoveMemoryToRegisters(VirtualFrame* expected);
// Invalidates a frame slot (puts an invalid frame element in it).
// Copies on the frame are correctly handled, and if this slot was
// the backing store of copies, the index of the new backing store
// is returned. Otherwise, returns kIllegalIndex.
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
void RawCallStub(CodeStub* stub);
// Calls a code object which has already been prepared for calling
// (via PrepareForCall).
void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
friend class JumpTarget;
};
} } // namespace v8::internal
#endif // V8_MIPS_VIRTUAL_FRAME_MIPS_H_

View File

@ -34,6 +34,8 @@
#include "unicode-inl.h" #include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h" #include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#endif #endif
// //
@ -1101,7 +1103,6 @@ class HeapNumber: public HeapObject {
# define BIG_ENDIAN_FLOATING_POINT 1 # define BIG_ENDIAN_FLOATING_POINT 1
#endif #endif
static const int kSize = kValueOffset + kDoubleSize; static const int kSize = kValueOffset + kDoubleSize;
static const uint32_t kSignMask = 0x80000000u; static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u; static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu; static const uint32_t kMantissaMask = 0xfffffu;

View File

@ -151,11 +151,12 @@ int OS::ActivationFrameAlignment() {
// On EABI ARM targets this is required for fp correctness in the // On EABI ARM targets this is required for fp correctness in the
// runtime system. // runtime system.
return 8; return 8;
#else #elif V8_TARGET_ARCH_MIPS
return 8;
#endif
// With gcc 4.4 the tree vectorization optimiser can generate code // With gcc 4.4 the tree vectorization optimiser can generate code
// that requires 16 byte alignment such as movdqa on x86. // that requires 16 byte alignment such as movdqa on x86.
return 16; return 16;
#endif
} }
@ -262,6 +263,8 @@ void OS::DebugBreak() {
// which is the architecture of generated code). // which is the architecture of generated code).
#if defined(__arm__) || defined(__thumb__) #if defined(__arm__) || defined(__thumb__)
asm("bkpt 0"); asm("bkpt 0");
#elif defined(__mips__)
asm("break");
#else #else
asm("int $3"); asm("int $3");
#endif #endif
@ -713,6 +716,7 @@ static inline bool IsVmThread() {
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#ifndef V8_HOST_ARCH_MIPS
USE(info); USE(info);
if (signal != SIGPROF) return; if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return; if (active_sampler_ == NULL) return;
@ -743,6 +747,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.sp = reinterpret_cast<Address>(mcontext.arm_sp); sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample.fp = reinterpret_cast<Address>(mcontext.arm_fp); sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif #endif
#elif V8_HOST_ARCH_MIPS
// Implement this on MIPS.
UNIMPLEMENTED();
#endif #endif
if (IsVmThread()) if (IsVmThread())
active_sampler_->SampleStack(&sample); active_sampler_->SampleStack(&sample);
@ -752,6 +759,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.state = Logger::state(); sample.state = Logger::state();
active_sampler_->Tick(&sample); active_sampler_->Tick(&sample);
#endif
} }

View File

@ -38,6 +38,8 @@
#include "x64/register-allocator-x64-inl.h" #include "x64/register-allocator-x64-inl.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/register-allocator-arm-inl.h" #include "arm/register-allocator-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/register-allocator-mips-inl.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

View File

@ -36,6 +36,8 @@
#include "x64/register-allocator-x64.h" #include "x64/register-allocator-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/register-allocator-arm.h" #include "arm/register-allocator-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/register-allocator-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

View File

@ -34,6 +34,8 @@
#include "x64/simulator-x64.h" #include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h" #include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/simulator-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

View File

@ -37,6 +37,8 @@
#include "x64/virtual-frame-x64.h" #include "x64/virtual-frame-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/virtual-frame-arm.h" #include "arm/virtual-frame-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/virtual-frame-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

View File

@ -75,6 +75,7 @@ SOURCES = {
'arch:x64': ['test-assembler-x64.cc', 'arch:x64': ['test-assembler-x64.cc',
'test-macro-assembler-x64.cc', 'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'], 'test-log-stack-tracer.cc'],
'arch:mips': ['test-assembler-mips.cc'],
'os:linux': ['test-platform-linux.cc'], 'os:linux': ['test-platform-linux.cc'],
'os:macos': ['test-platform-macos.cc'], 'os:macos': ['test-platform-macos.cc'],
'os:nullos': ['test-platform-nullos.cc'], 'os:nullos': ['test-platform-nullos.cc'],

View File

@ -52,3 +52,23 @@ test-api/OutOfMemoryNested: SKIP
# BUG(355): Test crashes on ARM. # BUG(355): Test crashes on ARM.
test-log/ProfLazyMode: SKIP test-log/ProfLazyMode: SKIP
[ $arch == mips ]
test-accessors: SKIP
test-alloc: SKIP
test-api: SKIP
test-compiler: SKIP
test-debug: SKIP
test-decls: SKIP
test-func-name-inference: SKIP
test-heap: SKIP
test-heap-profiler: SKIP
test-log: SKIP
test-log-utils: SKIP
test-mark-compact: SKIP
test-regexp: SKIP
test-serialize: SKIP
test-sockets: SKIP
test-strings: SKIP
test-threads: SKIP
test-thread-termination: SKIP

View File

@ -0,0 +1,257 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "disassembler.h"
#include "factory.h"
#include "macro-assembler.h"
#include "mips/macro-assembler-mips.h"
#include "mips/simulator-mips.h"
#include "cctest.h"
using namespace v8::internal;
// Define these function prototypes to match JSEntryFunction in execution.cc.
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
static v8::Persistent<v8::Context> env;
// The test framework does not accept flags on the command line, so we set them.
static void InitializeVM() {
// Disable compilation of natives by specifying an empty natives file.
FLAG_natives_file = "";
// Enable generation of comments.
FLAG_debug_code = true;
if (env.IsEmpty()) {
env = v8::Context::New();
}
}
#define __ assm.
TEST(MIPS0) {
InitializeVM();
v8::HandleScope scope;
MacroAssembler assm(NULL, 0);
// Addition.
__ addu(v0, a0, a1);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
#endif
F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(0xabc, res);
}
TEST(MIPS1) {
InitializeVM();
v8::HandleScope scope;
MacroAssembler assm(NULL, 0);
Label L, C;
__ mov(a1, a0);
__ li(v0, 0);
__ b(&C);
__ nop();
__ bind(&L);
__ add(v0, v0, a1);
__ addiu(a1, a1, -1);
__ bind(&C);
__ xori(v1, a1, 0);
__ Branch(ne, &L, v1, Operand(0));
__ nop();
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
#endif
F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(1275, res);
}
TEST(MIPS2) {
InitializeVM();
v8::HandleScope scope;
MacroAssembler assm(NULL, 0);
Label exit, error;
// ----- Test all instructions.
// Test lui, ori, and addiu, used in the li pseudo-instruction.
// This way we can then safely load registers with chosen values.
__ ori(t0, zero_reg, 0);
__ lui(t0, 0x1234);
__ ori(t0, t0, 0);
__ ori(t0, t0, 0x0f0f);
__ ori(t0, t0, 0xf0f0);
__ addiu(t1, t0, 1);
__ addiu(t2, t1, -0x10);
// Load values in temporary registers.
__ li(t0, 0x00000004);
__ li(t1, 0x00001234);
__ li(t2, 0x12345678);
__ li(t3, 0x7fffffff);
__ li(t4, 0xfffffffc);
__ li(t5, 0xffffedcc);
__ li(t6, 0xedcba988);
__ li(t7, 0x80000000);
// SPECIAL class.
__ srl(v0, t2, 8); // 0x00123456
__ sll(v0, v0, 11); // 0x91a2b000
__ sra(v0, v0, 3); // 0xf2345600
__ srav(v0, v0, t0); // 0xff234560
__ sllv(v0, v0, t0); // 0xf2345600
__ srlv(v0, v0, t0); // 0x0f234560
__ Branch(ne, &error, v0, Operand(0x0f234560));
__ nop();
__ add(v0, t0, t1); // 0x00001238
__ sub(v0, v0, t0); // 0x00001234
__ Branch(ne, &error, v0, Operand(0x00001234));
__ nop();
__ addu(v1, t3, t0);
__ Branch(ne, &error, v1, Operand(0x80000003));
__ nop();
__ subu(v1, t7, t0); // 0x7ffffffc
__ Branch(ne, &error, v1, Operand(0x7ffffffc));
__ nop();
__ and_(v0, t1, t2); // 0x00001230
__ or_(v0, v0, t1); // 0x00001234
__ xor_(v0, v0, t2); // 0x1234444c
__ nor(v0, v0, t2); // 0xedcba987
__ Branch(ne, &error, v0, Operand(0xedcba983));
__ nop();
__ slt(v0, t7, t3);
__ Branch(ne, &error, v0, Operand(0x1));
__ nop();
__ sltu(v0, t7, t3);
__ Branch(ne, &error, v0, Operand(0x0));
__ nop();
// End of SPECIAL class.
__ addi(v0, zero_reg, 0x7421); // 0x00007421
__ addi(v0, v0, -0x1); // 0x00007420
__ addiu(v0, v0, -0x20); // 0x00007400
__ Branch(ne, &error, v0, Operand(0x00007400));
__ nop();
__ addiu(v1, t3, 0x1); // 0x80000000
__ Branch(ne, &error, v1, Operand(0x80000000));
__ nop();
__ slti(v0, t1, 0x00002000); // 0x1
__ slti(v0, v0, 0xffff8000); // 0x0
__ Branch(ne, &error, v0, Operand(0x0));
__ nop();
__ sltiu(v0, t1, 0x00002000); // 0x1
__ sltiu(v0, v0, 0x00008000); // 0x1
__ Branch(ne, &error, v0, Operand(0x1));
__ nop();
__ andi(v0, t1, 0xf0f0); // 0x00001030
__ ori(v0, v0, 0x8a00); // 0x00009a30
__ xori(v0, v0, 0x83cc); // 0x000019fc
__ Branch(ne, &error, v0, Operand(0x000019fc));
__ nop();
__ lui(v1, 0x8123); // 0x81230000
__ Branch(ne, &error, v1, Operand(0x81230000));
__ nop();
// Everything was correctly executed. Load the expected result.
__ li(v0, 0x31415926);
__ b(&exit);
__ nop();
__ bind(&error);
// Got an error. Return a wrong result.
__ bind(&exit);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
#endif
F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(0x31415926, res);
}
#undef __

View File

@ -653,6 +653,8 @@ typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler; typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler; typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssembler ArchRegExpMacroAssembler;
#endif #endif
class ContextInitializer { class ContextInitializer {

View File

@ -271,3 +271,8 @@ chapter15/15.7: UNIMPLEMENTED
chapter15/15.9: UNIMPLEMENTED chapter15/15.9: UNIMPLEMENTED
chapter15/15.10: UNIMPLEMENTED chapter15/15.10: UNIMPLEMENTED
chapter15/15.12: UNIMPLEMENTED chapter15/15.12: UNIMPLEMENTED
[ $arch == mips ]
# Skip all tests on MIPS.
*: SKIP

View File

@ -29,3 +29,8 @@ prefix message
# All tests in the bug directory are expected to fail. # All tests in the bug directory are expected to fail.
bugs: FAIL bugs: FAIL
[ $arch == mips ]
# Skip all tests on MIPS.
*: SKIP

View File

@ -64,3 +64,7 @@ array-splice: PASS || TIMEOUT
# Skip long running test in debug mode on ARM. # Skip long running test in debug mode on ARM.
string-indexof-2: PASS, SKIP if $mode == debug string-indexof-2: PASS, SKIP if $mode == debug
[ $arch == mips ]
# Skip all tests on MIPS.
*: SKIP

View File

@ -316,3 +316,8 @@ S15.9.5.9_A1_T2: FAIL_OK
S11.4.3_A3.6: FAIL_OK S11.4.3_A3.6: FAIL_OK
S15.10.7_A3_T2: FAIL_OK S15.10.7_A3_T2: FAIL_OK
S15.10.7_A3_T1: FAIL_OK S15.10.7_A3_T1: FAIL_OK
[ $arch == mips ]
# Skip all tests on MIPS.
*: SKIP