v8/src/mips/macro-assembler-mips.h

1557 lines
60 KiB
C
Raw Normal View History

// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#include "assembler.h"
#include "mips/assembler-mips.h"
#include "v8globals.h"
namespace v8 {
namespace internal {
// Forward declaration.
class JumpTarget;
// Reserved Register Usage Summary.
//
// Registers t8, t9, and at are reserved for use by the MacroAssembler.
//
// The programmer should know that the MacroAssembler may clobber these three,
// but won't touch other registers except in special cases.
//
// Per the MIPS ABI, register t9 must be used for indirect function call
// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1,
// Specify that the requested size of the space to allocate is specified in
// words instead of bytes.
SIZE_IN_WORDS = 1 << 2
};
// Flags used for the ObjectToDoubleFPURegister function.
enum ObjectToDoubleFlags {
// No special flags.
NO_OBJECT_TO_DOUBLE_FLAGS = 0,
// Object is known to be a non smi.
OBJECT_NOT_SMI = 1 << 0,
// Don't load NaNs or infinities, branch to the non number case instead.
AVOID_NANS_AND_INFINITIES = 1 << 1
};
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
enum BranchDelaySlot {
USE_DELAY_SLOT,
PROTECT
};
// Flags used for the li macro-assembler function.
enum LiFlags {
// If the constant value can be represented in just 16 bits, then
// optimize the li to use a single instruction, rather than lui/ori pair.
OPTIMIZE_SIZE = 0,
// Always use 2 instructions (lui/ori pair), even if the constant could
// be loaded with just one, so that this value is patchable later.
CONSTANT_SIZE = 1
};
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
// -----------------------------------------------------------------------------
// Static helper functions.
inline MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand GlobalObjectOperand() {
return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
}
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
inline MemOperand CFunctionArgumentOperand(int index) {
ASSERT(index > kCArgSlotCount);
// Argument 5 takes the slot just past the four Arg-slots.
int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
return MemOperand(sp, offset);
}
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
// The isolate parameter can be NULL if the macro assembler should
// not use isolate-dependent functionality. In this case, it's the
// responsibility of the caller to never invoke such function on the
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
// Cases when relocation is not needed.
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target) { \
Name(target, bd); \
} \
void Name(target_type target, \
COND_TYPED_ARGS, \
BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, \
target_type target, \
COND_TYPED_ARGS) { \
Name(target, COND_ARGS, bd); \
}
#define DECLARE_BRANCH_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
#undef COND_ARGS
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void Jump(Register target, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
static int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
static int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
Ret(cond, rs, rt, bd);
}
void Branch(Label* L,
Condition cond,
Register rs,
Heap::RootListIndex index,
BranchDelaySlot bdslot = PROTECT);
#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count,
Condition cond = cc_always,
Register reg = no_reg,
const Operand& op = Operand(no_reg));
// Trivial case of DropAndRet that utilizes the delay slot and only emits
// 2 instructions.
void DropAndRet(int drop);
void DropAndRet(int drop,
Condition cond,
Register reg,
const Operand& op);
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void Call(Label* target);
inline void Move(Register dst, Register src) {
if (!dst.is(src)) {
mov(dst, src);
}
}
inline void Move(FPURegister dst, FPURegister src) {
if (!dst.is(src)) {
mov_d(dst, src);
}
}
inline void Move(Register dst_low, Register dst_high, FPURegister src) {
mfc1(dst_low, src);
mfc1(dst_high, FPURegister::from_code(src.code() + 1));
}
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
}
// Conditional move.
void Move(FPURegister dst, double imm);
void Movz(Register rd, Register rs, Register rt);
void Movn(Register rd, Register rs, Register rt);
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
void Clz(Register rd, Register rs);
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
// Currently the branch delay slot is filled by the MacroAssembler.
// Use rather b(Label) for code generation.
void jmp(Label* L) {
Branch(L);
}
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
void LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
// Store an object to the root table.
void StoreRoot(Register source,
Heap::RootListIndex index);
void StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
li(result, object);
}
}
// ---------------------------------------------------------------------------
// GC Support
void IncrementalMarkingRecordWriteHelper(Register object,
Register value,
Register address);
enum RememberedSetFinalAction {
kReturnAtEnd,
kFallThroughAtEnd
};
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
void RememberedSetHelper(Register object, // Used for debug code.
Register addr,
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch) {
InNewSpace(object, scratch, ne, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
InNewSpace(object, scratch, eq, branch);
}
// Check if an object has a given incremental marking color.
void HasColor(Register object,
Register scratch0,
Register scratch1,
Label* has_color,
int first_bit,
int second_bit);
void JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white and
// we can determine that it doesn't need to be scanned, then we just mark it
// black and fall through. For the rest we jump to the label so the
// incremental marker can fix its assumptions.
void EnsureNotWhite(Register object,
Register scratch1,
Register scratch2,
Register scratch3,
Label* object_is_white_and_not_data);
// Detects conservatively whether an object is data-only, i.e. it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
Register value,
Register scratch,
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
inline void RecordWriteContextSlot(
Register context,
int offset,
Register value,
Register scratch,
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
scratch,
ra_status,
save_fp,
remembered_set_action,
smi_check);
}
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
Register object,
Register address,
Register value,
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// ---------------------------------------------------------------------------
// Inline caching support.
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, whereas both scratch registers are clobbered.
void CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss);
void GetNumberHash(Register reg0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2);
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
// Check if the given instruction is a 'type' marker.
// i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
// nop(type)). These instructions are generated to mark special location in
// the code, like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
return IsNop(instr, type);
}
static inline int GetCodeMarker(Instr instr) {
uint32_t opcode = ((instr & kOpcodeMask));
uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
// Return <n> if we have a sll zero_reg, zero_reg, n
// else return -1.
bool sllzz = (opcode == SLL &&
rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
rs == static_cast<uint32_t>(ToNumber(zero_reg)));
int type =
(sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
ASSERT((type == -1) ||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
return type;
}
// ---------------------------------------------------------------------------
// Allocation support.
// Allocate an object in new space. The object_size is specified
// either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. If the new space is exhausted control continues at the
// gc_required label. The allocated object is returned in result. If
// the flag tag_allocated_object is true the result is tagged as as
// a heap object. All registers are clobbered also when control
// continues at the gc_required label.
void AllocateInNewSpace(int object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags);
void AllocateInNewSpace(Register object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
// are left to the object(s) no longer allocated as they would be invalid when
// allocation is undone.
void UndoAllocationInNewSpace(Register object, Register scratch);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateAsciiString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateAsciiConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateAsciiSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
Register scratch1,
Register scratch2,
Label* gc_required);
// ---------------------------------------------------------------------------
// Instruction macros.
#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
void instr(Register rd, Register rs, Register rt) { \
instr(rd, rs, Operand(rt)); \
} \
void instr(Register rs, Register rt, int32_t j) { \
instr(rs, rt, Operand(j)); \
}
#define DEFINE_INSTRUCTION2(instr) \
void instr(Register rs, const Operand& rt); \
void instr(Register rs, Register rt) { \
instr(rs, Operand(rt)); \
} \
void instr(Register rs, int32_t j) { \
instr(rs, Operand(j)); \
}
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
DEFINE_INSTRUCTION2(Div);
DEFINE_INSTRUCTION2(Divu);
DEFINE_INSTRUCTION(And);
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
DEFINE_INSTRUCTION(Nor);
DEFINE_INSTRUCTION2(Neg);
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
// MIPS32 R2 instruction macro.
DEFINE_INSTRUCTION(Ror);
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
// ---------------------------------------------------------------------------
// Pseudo-instructions.
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
li(rd, Operand(j), mode);
}
inline void li(Register dst, Handle<Object> value,
LiFlags mode = OPTIMIZE_SIZE) {
li(dst, Operand(value), mode);
}
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
void MultiPushFPU(RegList regs);
void MultiPushReversedFPU(RegList regs);
// Lower case push() for compatibility with arch-independent code.
void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
// Push a handle.
void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi)); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
Subu(sp, sp, Operand(2 * kPointerSize));
sw(src1, MemOperand(sp, 1 * kPointerSize));
sw(src2, MemOperand(sp, 0 * kPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
Subu(sp, sp, Operand(3 * kPointerSize));
sw(src1, MemOperand(sp, 2 * kPointerSize));
sw(src2, MemOperand(sp, 1 * kPointerSize));
sw(src3, MemOperand(sp, 0 * kPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
Subu(sp, sp, Operand(4 * kPointerSize));
sw(src1, MemOperand(sp, 3 * kPointerSize));
sw(src2, MemOperand(sp, 2 * kPointerSize));
sw(src3, MemOperand(sp, 1 * kPointerSize));
sw(src4, MemOperand(sp, 0 * kPointerSize));
}
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
Subu(sp, sp, Operand(kPointerSize));
sw(src, MemOperand(sp, 0));
}
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
void MultiPopReversed(RegList regs);
void MultiPopFPU(RegList regs);
void MultiPopReversedFPU(RegList regs);
// Lower case pop() for compatibility with arch-independent code.
void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
ASSERT(!src1.is(src2));
lw(src2, MemOperand(sp, 0 * kPointerSize));
lw(src1, MemOperand(sp, 1 * kPointerSize));
Addu(sp, sp, 2 * kPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
lw(src3, MemOperand(sp, 0 * kPointerSize));
lw(src2, MemOperand(sp, 1 * kPointerSize));
lw(src1, MemOperand(sp, 2 * kPointerSize));
Addu(sp, sp, 3 * kPointerSize);
}
void Pop(uint32_t count = 1) {
Addu(sp, sp, Operand(count * kPointerSize));
}
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles();
// Store value in register src in the safepoint stack slot for
// register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Flush the I-cache from asm code. You should use CPU::FlushICache from C.
// Does not handle errors.
void FlushICache(Register address, unsigned instructions);
// MIPS32 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
// ---------------------------------------------------------------------------
// FPU macros. These do not handle special cases like NaN or +- inf.
// Convert unsigned word to double.
void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
// Convert double to unsigned word.
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
void Trunc_w_d(FPURegister fd, FPURegister fs);
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
// Wrapper function for the different cmp/branch types.
void BranchF(Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
// Alternate (inline) version for better readability with USE_DELAY_SLOT.
inline void BranchF(BranchDelaySlot bd,
Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2) {
BranchF(target, nan, cc, cmp1, cmp2, bd);
};
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label. If FPU is available double_scratch is used but not
// scratch2.
void ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
FPURegister double_scratch,
Label *not_int32);
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
// If check_inexact is kDontCheckForInexactConversion, then the inexact
// exception is masked.
void EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
Register scratch,
DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
// Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the singed 32bit
// integer range to a 32bit signed integer.
// Expects the double value loaded in input_high and input_low.
// Exits with the answer in 'result'.
// Note that this code does not work for values in the 32bit range!
void EmitOutOfInt32RangeTruncate(Register result,
Register input_high,
Register input_low,
Register scratch);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer and all other registers clobbered.
void EmitECMATruncate(Register result,
FPURegister double_input,
FPURegister single_scratch,
Register scratch,
Register scratch2,
Register scratch3);
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
// stack_space - extra stack space.
void EnterExitFrame(bool save_doubles,
int stack_space = 0);
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles,
Register arg_count,
bool do_return = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
// Make sure the stack is aligned. Only emits code in debug mode.
void AssertStackIsAligned();
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out,
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
void LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch);
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
li(kRootRegister, Operand(roots_array_start));
}
// -------------------------------------------------------------------------
// JavaScript invokes.
// Set up call kind marking in t1. The method takes t1 as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind);
void IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
Label* fail);
void IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail);
void IsObjectJSStringType(Register object,
Register scratch,
Label* fail);
#ifdef ENABLE_DEBUGGER_SUPPORT
// -------------------------------------------------------------------------
// Debugger Support.
void DebugBreak();
#endif
// -------------------------------------------------------------------------
// Exception handling.
// Push a new try handler and link into try handler chain.
void PushTryHandler(StackHandler::Kind kind, int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
// Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
void ThrowUncatchable(Register value);
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
void CopyBytes(Register src,
Register dst,
Register length,
Register scratch);
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
// Initialize fields with filler values. Fields starting at |start_offset|
// not including end_offset are overwritten with the value in |filler|. At
// the end the loop, |start_offset| takes the value of |end_offset|.
void InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler);
// -------------------------------------------------------------------------
// Support functions.
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss,
bool miss_on_bound_function = false);
void GetObjectType(Register function,
Register map,
Register type_reg);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
Register scratch,
Label* fail);
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map,
Register scratch,
Label* fail);
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements. Otherwise jump to fail, in which
// case scratch2, scratch3 and scratch4 are unmodified.
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
// All regs below here overwritten.
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail,
int elements_offset = 0);
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
// "branch_to" if the result of the comparison is "cond". If multiple map
// compares are required, the compare sequences branches to early_success.
void CompareMapAndBranch(Register obj,
Register scratch,
Handle<Map> map,
Label* early_success,
Condition cond,
Label* branch_to,
CompareMapMode mode = REQUIRE_EXACT_MAP);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
void CompareMapAndBranch(Register obj_map,
Handle<Map> map,
Label* early_success,
Condition cond,
Label* branch_to,
CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specificed map.
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
SmiCheckType smi_check_type,
CompareMapMode mode = REQUIRE_EXACT_MAP);
void CheckMap(Register obj,
Register scratch,
Heap::RootListIndex index,
Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
// known to be a heap object)
void DispatchMap(Register obj,
Register scratch,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type);
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string.
Condition IsObjectStringType(Register obj,
Register type,
Register result) {
lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
And(type, type, Operand(kIsNotStringMask));
ASSERT_EQ(0, kStringTag);
return eq;
}
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
// index - holds the overwritten index on exit.
void IndexFromHash(Register hash, Register index);
// Get the number of least significant bits from a register.
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
// Load the value of a number object into a FPU double register. If the
// object is not a number a jump to the label not_number is performed
// and the FPU double register is unchanged.
void ObjectToDoubleFPURegister(
Register object,
FPURegister value,
Register scratch1,
Register scratch2,
Register heap_number_map,
Label* not_number,
ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
// Load the value of a smi object into a FPU double register. The register
// scratch1 can be the same register as smi in which case smi will hold the
// untagged value afterwards.
void SmiToDoubleFPURegister(Register smi,
FPURegister value,
Register scratch1);
// -------------------------------------------------------------------------
// Overflow handling functions.
// Usage: first call the appropriate arithmetic function, then call one of the
// jump functions with the overflow_dst register as the second parameter.
void AdduAndCheckForOverflow(Register dst,
Register left,
Register right,
Register overflow_dst,
Register scratch = at);
void SubuAndCheckForOverflow(Register dst,
Register left,
Register right,
Register overflow_dst,
Register scratch = at);
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
Branch(label, lt, overflow_check, Operand(zero_reg), bd);
}
void BranchOnNoOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
Branch(label, ge, overflow_check, Operand(zero_reg), bd);
}
void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
Ret(lt, overflow_check, Operand(zero_reg), bd);
}
void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
Ret(ge, overflow_check, Operand(zero_reg), bd);
}
// -------------------------------------------------------------------------
// Runtime calls.
// See comments at the beginning of CEntryStub::Generate.
inline void PrepareCEntryArgs(int num_args) {
li(s0, num_args);
li(s1, (num_args - 1) * kPointerSize);
}
inline void PrepareCEntryFunction(const ExternalReference& ref) {
li(s2, Operand(ref));
}
// Call a code stub.
void CallStub(CodeStub* stub,
Condition cond = cc_always,
Register r1 = zero_reg,
const Operand& r2 = Operand(zero_reg),
BranchDelaySlot bd = PROTECT);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments,
BranchDelaySlot bd = PROTECT);
// Tail call of a runtime routine (jump).
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
void TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Before calling a C-function from generated code, align arguments on stack
// and add space for the four mips argument slots.
// After aligning the frame, non-register arguments must be stored on the
// stack, after the argument-slots using helper: CFunctionArgumentOperand().
// The argument count assumes all arguments are word sized.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_reg_arguments,
int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments,
Register scratch);
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
// sw(t0, CFunctionArgumentOperand(5));
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
MIPS: port all relevant commits since the new-gc was landed. This ports the following list of commits to mips. It is provided in this form so that the mips port of the new-gc can be easily tested on your end. This must be downloaded or landed after http://codereview.chromium.org/8106002 This is based on r9585. With these two mips commits, all tests are passing. 9319 Fix a harmless assert and a genuine bug in the GC-safety of stub generation ... 9329 ARM: Pregenerate some stubs that we call from other stubs. 9335 Initialize pre-allocated fields of JSObject with undefined. 9344 Put back the asserts in RememberedSetHelper, but correct this time 9370 Reorganize object type enum, such that proxies are no longer in the middle... 9392 Basic support for tracking smi-only arrays on ia32. 9402 Notify collector about lazily deoptimized code objects. 9411 Porting r9392 to arm (smi-only arrays). 9418 Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster. 9447 Tighten up assertions checking GC-safety of stub calls. 9449 Record function call targets, use them for inlining. 9459 Make sure we don't flush the pregenerated stubs, since they need to b 9461 Fix the build on ARM 9466 Move the is_pregenerated flag so it does not overlap other flags.... 9468 Fix the no-VFP3 build on ARM. 9475 Pass correct anchor_slot for EMBEDDED_OBJECT pointers from 9490 Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor. 9511 Clean list of external references from internal objects like the hole value. 9514 Simplify compares in KeyedStoreIC::GenerateGeneric. 9531 Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays). 9541 Fix load of potentially eval-shadowed let bindings. 9542 Fast allocation of block contexts. 9553 Activate smi-only optimizations for large array literals. 9575 Move declaration of SerializedScopeInfo from variables.h to objects.h 9577 Track elements_kind transitions in KeyedStoreICs. 9583 Fixing a bug in arm as pointed out in issue 1759. 9584 Refactor and fix polymorphic KeyedStoreIC creation. BUG= TEST= Review URL: http://codereview.chromium.org/8112008 Patch from Paul Lind <pling44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-10-13 08:00:10 +00:00
void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
// There are two ways of passing double arguments on MIPS, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void SetCallCDoubleArguments(DoubleRegister dreg);
void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in a1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
return code_object_;
}
// -------------------------------------------------------------------------
// StatsCounter support.
void SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
// -------------------------------------------------------------------------
// Debugging.
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg, Register rs, Operand rt);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg, Register rs, Operand rt);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// Number utilities.
// Check whether the value of reg is a power of two and not zero. If not
// control continues at the label not_power_of_two. If reg is a power of two
// the register scratch contains the value of (reg - 1) when control falls
// through.
void JumpIfNotPowerOfTwoOrZero(Register reg,
Register scratch,
Label* not_power_of_two_or_zero);
// -------------------------------------------------------------------------
// Smi utilities.
void SmiTag(Register reg) {
Addu(reg, reg, reg);
}
// Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
void SmiTagCheckOverflow(Register reg, Register overflow);
void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
void SmiTag(Register dst, Register src) {
Addu(dst, src, src);
}
void SmiUntag(Register reg) {
sra(reg, reg, kSmiTagSize);
}
void SmiUntag(Register dst, Register src) {
sra(dst, src, kSmiTagSize);
}
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Untag the source value into destination and jump if source is not a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
// Jump the register contains a smi.
void JumpIfSmi(Register value,
Label* smi_label,
Register scratch = at,
BranchDelaySlot bd = PROTECT);
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value,
Label* not_smi_label,
Register scratch = at,
BranchDelaySlot bd = PROTECT);
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message);
// ---------------------------------------------------------------------------
// HeapNumber utilities.
void JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number);
// -------------------------------------------------------------------------
// String utilities.
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first_object_instance_type,
Register second_object_instance_type,
Register scratch1,
Register scratch2,
Label* failure);
// Check if instance type is sequential ASCII string and jump to label if
// it is not.
void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure);
// Test that both first and second are sequential ASCII strings.
// Assume that they are non-smis.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
Register second,
Register scratch1,
Register scratch2,
Label* failure);
// Test that both first and second are sequential ASCII strings.
// Check that they are non-smis.
void JumpIfNotBothSequentialAsciiStrings(Register first,
Register second,
Register scratch1,
Register scratch2,
Label* failure);
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
DoubleRegister input_reg,
DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
MIPS: Sharing of descriptor arrays. Port r12492 (479be376) Original commit message: This CL adds multiple things: Transition arrays do not directly point at their descriptor array anymore, but rather do so via an indirect pointer (a JSGlobalPropertyCell). An ownership bit is added to maps indicating whether it owns its own descriptor array or not. Maps owning a descriptor array can pass on ownership if a transition from that map is generated; but only if the descriptor array stays exactly the same; or if a descriptor is added. Maps that don't have ownership get ownership back if their direct child to which ownership was passed is cleared in ClearNonLiveTransitions. To detect which descriptors in an array are valid, each map knows its own NumberOfOwnDescriptors. Since the descriptors are sorted in order of addition, if we search and find a descriptor with index bigger than this number, it is not valid for the given map. We currently still build up an enumeration cache (although this may disappear). The enumeration cache is always built for the entire descriptor array, even if not all descriptors are owned by the map. Once a descriptor array has an enumeration cache for a given map; this invariant will always be true, even if the descriptor array was extended. The extended array will inherit the enumeration cache from the smaller descriptor array. If a map with more descriptors needs an enumeration cache, it's EnumLength will still be set to invalid, so it will have to recompute the enumeration cache. This new cache will also be valid for smaller maps since they have their own enumlength; and use this to loop over the cache. If the EnumLength is still invalid, but there is already a cache present that is big enough; we just initialize the EnumLength field for the map. When we apply ClearNonLiveTransitions and descriptor ownership is passed back to a parent map, the descriptor array is trimmed in-place and resorted. At the same time, the enumeration cache is trimmed in-place. Only transition arrays contain descriptor arrays. If we transition to a map and pass ownership of the descriptor array along, the child map will not store the descriptor array it owns. Rather its parent will keep the pointer. So for every leaf-map, we find the descriptor array by following the back pointer, reading out the transition array, and fetching the descriptor array from the JSGlobalPropertyCell. If a map has a transition array, we fetch it from there. If a map has undefined as its back-pointer and has no transition array; it is considered to have an empty descriptor array. When we modify properties, we cannot share the descriptor array. To accommodate this, the child map will get its own transition array; even if there are not necessarily any transitions leaving from the child map. This is necessary since it's the only way to store its own descriptor array. BUG= TEST= Review URL: https://chromiumcodereview.appspot.com/10918287 Patch from Akos Palfi <palfia@homejinni.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12549 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2012-09-19 14:29:27 +00:00
void NumberOfOwnDescriptors(Register dst, Register map);
template<typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
static const int mask = (Field::kMask >> shift) << kSmiTagSize;
srl(reg, reg, shift);
And(reg, reg, Operand(mask));
}
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
// Patch the relocated value (lui/ori pair).
void PatchRelocatedValue(Register li_location,
Register scratch,
Register new_value);
// Get the relocatad value (loaded data) from the lui/ori pair.
void GetRelocatedValue(Register li_location,
Register value,
Register scratch);
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments);
void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchShort(int16_t offset, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot = PROTECT);
void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
void BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot = PROTECT);
void J(Label* L, BranchDelaySlot bdslot);
void Jr(Label* L, BranchDelaySlot bdslot);
void Jalr(Label* L, BranchDelaySlot bdslot);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
Condition cond, // eq for new space, ne otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Leaves addr_reg unchanged.
inline void GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg);
// Helper for throwing exceptions. Compute a handler address and jump to
// it. See the implementation for register usage.
void JumpToHandlerEntry();
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool generating_stub_;
bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
// Needs access to SafepointRegisterStackIndex for optimized frame
// traversal.
friend class OptimizedFrame;
};
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion to fail.
class CodePatcher {
public:
CodePatcher(byte* address, int instructions);
virtual ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
void Emit(Instr instr);
// Emit an address directly.
void Emit(Address addr);
// Change the condition part of an instruction leaving the rest of the current
// instruction unchanged.
void ChangeBranchCondition(Condition cond);
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_