2012-01-09 16:37:47 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2008-07-03 15:10:15 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met:
|
|
|
|
//
|
|
|
|
// * Redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
// * Redistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
// with the distribution.
|
|
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
2009-05-04 13:36:43 +00:00
|
|
|
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
|
|
|
|
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
#include "assembler.h"
|
2011-09-15 11:30:45 +00:00
|
|
|
#include "frames.h"
|
2011-05-17 12:05:06 +00:00
|
|
|
#include "v8globals.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-25 10:05:56 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-02-04 09:11:43 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// Static helper functions
|
|
|
|
|
|
|
|
// Generate a MemOperand for loading a field from an object.
|
2011-11-29 10:56:11 +00:00
|
|
|
inline MemOperand FieldMemOperand(Register object, int offset) {
|
2010-02-04 09:11:43 +00:00
|
|
|
return MemOperand(object, offset - kHeapObjectTag);
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-11-29 10:56:11 +00:00
|
|
|
inline Operand SmiUntagOperand(Register object) {
|
2011-02-09 14:57:24 +00:00
|
|
|
return Operand(object, ASR, kSmiTagSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Give alias names to registers
|
2009-06-10 15:08:25 +00:00
|
|
|
const Register cp = { 8 }; // JavaScript context pointer
|
2012-01-06 11:33:20 +00:00
|
|
|
const Register kRootRegister = { 10 }; // Roots array pointer.
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-05-07 14:06:55 +00:00
|
|
|
// Flags used for the AllocateInNewSpace functions.
|
|
|
|
enum AllocationFlags {
|
|
|
|
// No special flags.
|
|
|
|
NO_ALLOCATION_FLAGS = 0,
|
|
|
|
// Return the pointer to the allocated already tagged as a heap object.
|
|
|
|
TAG_OBJECT = 1 << 0,
|
|
|
|
// The content of the result register already contains the allocation top in
|
|
|
|
// new space.
|
|
|
|
RESULT_CONTAINS_TOP = 1 << 1,
|
|
|
|
// Specify that the requested size of the space to allocate is specified in
|
|
|
|
// words instead of bytes.
|
|
|
|
SIZE_IN_WORDS = 1 << 2
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-06-29 09:40:36 +00:00
|
|
|
// Flags used for the ObjectToDoubleVFPRegister function.
|
|
|
|
enum ObjectToDoubleFlags {
|
|
|
|
// No special flags.
|
|
|
|
NO_OBJECT_TO_DOUBLE_FLAGS = 0,
|
|
|
|
// Object is known to be a non smi.
|
|
|
|
OBJECT_NOT_SMI = 1 << 0,
|
|
|
|
// Don't load NaNs or infinities, branch to the non number case instead.
|
|
|
|
AVOID_NANS_AND_INFINITIES = 1 << 1
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
|
|
|
|
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
|
|
|
|
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
|
|
|
|
|
|
|
|
|
|
|
|
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// MacroAssembler implements a collection of frequently used macros.
|
|
|
|
class MacroAssembler: public Assembler {
|
|
|
|
public:
|
2011-04-01 15:37:59 +00:00
|
|
|
// The isolate parameter can be NULL if the macro assembler should
|
|
|
|
// not use isolate-dependent functionality. In this case, it's the
|
|
|
|
// responsibility of the caller to never invoke such function on the
|
|
|
|
// macro assembler.
|
|
|
|
MacroAssembler(Isolate* isolate, void* buffer, int size);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-02-04 21:32:02 +00:00
|
|
|
// Jump, Call, and Ret pseudo instructions implementing inter-working.
|
2008-07-03 15:10:15 +00:00
|
|
|
void Jump(Register target, Condition cond = al);
|
2011-06-30 11:26:15 +00:00
|
|
|
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
|
2008-09-22 13:57:03 +00:00
|
|
|
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
|
2011-08-10 08:03:22 +00:00
|
|
|
static int CallSize(Register target, Condition cond = al);
|
2008-07-03 15:10:15 +00:00
|
|
|
void Call(Register target, Condition cond = al);
|
2011-08-10 08:03:22 +00:00
|
|
|
static int CallSize(Address target,
|
|
|
|
RelocInfo::Mode rmode,
|
|
|
|
Condition cond = al);
|
2011-06-30 11:26:15 +00:00
|
|
|
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
|
2011-08-10 08:03:22 +00:00
|
|
|
static int CallSize(Handle<Code> code,
|
|
|
|
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
|
|
|
unsigned ast_id = kNoASTId,
|
|
|
|
Condition cond = al);
|
2011-04-27 15:02:59 +00:00
|
|
|
void Call(Handle<Code> code,
|
2011-06-30 11:26:15 +00:00
|
|
|
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
|
|
|
unsigned ast_id = kNoASTId,
|
2011-04-27 15:02:59 +00:00
|
|
|
Condition cond = al);
|
2009-04-16 09:30:23 +00:00
|
|
|
void Ret(Condition cond = al);
|
2010-01-12 08:48:26 +00:00
|
|
|
|
|
|
|
// Emit code to discard a non-negative number of pointer-sized elements
|
|
|
|
// from the stack, clobbering only the sp register.
|
|
|
|
void Drop(int count, Condition cond = al);
|
|
|
|
|
2010-12-21 10:52:50 +00:00
|
|
|
void Ret(int drop, Condition cond = al);
|
2010-04-08 22:30:30 +00:00
|
|
|
|
|
|
|
// Swap two registers. If the scratch register is omitted then a slightly
|
|
|
|
// less efficient form using xor instead of mov is emitted.
|
2010-05-27 13:48:52 +00:00
|
|
|
void Swap(Register reg1,
|
|
|
|
Register reg2,
|
|
|
|
Register scratch = no_reg,
|
|
|
|
Condition cond = al);
|
2010-04-08 22:30:30 +00:00
|
|
|
|
2010-06-14 11:20:36 +00:00
|
|
|
|
|
|
|
void And(Register dst, Register src1, const Operand& src2,
|
|
|
|
Condition cond = al);
|
|
|
|
void Ubfx(Register dst, Register src, int lsb, int width,
|
|
|
|
Condition cond = al);
|
|
|
|
void Sbfx(Register dst, Register src, int lsb, int width,
|
|
|
|
Condition cond = al);
|
2011-03-02 09:31:42 +00:00
|
|
|
// The scratch register is not used for ARMv7.
|
|
|
|
// scratch can be the same register as src (in which case it is trashed), but
|
|
|
|
// not the same as dst.
|
|
|
|
void Bfi(Register dst,
|
|
|
|
Register src,
|
|
|
|
Register scratch,
|
|
|
|
int lsb,
|
|
|
|
int width,
|
|
|
|
Condition cond = al);
|
2010-06-17 10:45:37 +00:00
|
|
|
void Bfc(Register dst, int lsb, int width, Condition cond = al);
|
2010-07-21 07:42:51 +00:00
|
|
|
void Usat(Register dst, int satpos, const Operand& src,
|
|
|
|
Condition cond = al);
|
2010-06-14 11:20:36 +00:00
|
|
|
|
2009-12-10 14:06:08 +00:00
|
|
|
void Call(Label* target);
|
2011-04-27 14:29:25 +00:00
|
|
|
|
|
|
|
// Register move. May do nothing if the registers are identical.
|
2009-12-10 14:06:08 +00:00
|
|
|
void Move(Register dst, Handle<Object> value);
|
2011-06-29 10:51:06 +00:00
|
|
|
void Move(Register dst, Register src, Condition cond = al);
|
2011-04-27 14:29:25 +00:00
|
|
|
void Move(DoubleRegister dst, DoubleRegister src);
|
|
|
|
|
2009-08-24 11:57:57 +00:00
|
|
|
// Load an object from the root table.
|
|
|
|
void LoadRoot(Register destination,
|
|
|
|
Heap::RootListIndex index,
|
|
|
|
Condition cond = al);
|
2010-05-06 09:35:18 +00:00
|
|
|
// Store an object to the root table.
|
|
|
|
void StoreRoot(Register source,
|
|
|
|
Heap::RootListIndex index,
|
|
|
|
Condition cond = al);
|
2008-10-03 12:04:59 +00:00
|
|
|
|
2011-12-06 12:11:08 +00:00
|
|
|
void LoadHeapObject(Register dst, Handle<HeapObject> object);
|
|
|
|
|
2011-12-23 10:39:01 +00:00
|
|
|
void LoadObject(Register result, Handle<Object> object) {
|
|
|
|
if (object->IsHeapObject()) {
|
|
|
|
LoadHeapObject(result, Handle<HeapObject>::cast(object));
|
|
|
|
} else {
|
|
|
|
Move(result, object);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// GC Support
|
2010-05-04 11:06:59 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void IncrementalMarkingRecordWriteHelper(Register object,
|
|
|
|
Register value,
|
|
|
|
Register address);
|
2010-05-04 11:06:59 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
enum RememberedSetFinalAction {
|
|
|
|
kReturnAtEnd,
|
|
|
|
kFallThroughAtEnd
|
|
|
|
};
|
2010-05-04 11:06:59 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Record in the remembered set the fact that we have a pointer to new space
|
|
|
|
// at the address pointed to by the addr register. Only works if addr is not
|
|
|
|
// in new space.
|
2011-09-20 13:32:27 +00:00
|
|
|
void RememberedSetHelper(Register object, // Used for debug code.
|
|
|
|
Register addr,
|
2011-09-19 18:36:47 +00:00
|
|
|
Register scratch,
|
|
|
|
SaveFPRegsMode save_fp,
|
|
|
|
RememberedSetFinalAction and_then);
|
|
|
|
|
|
|
|
void CheckPageFlag(Register object,
|
|
|
|
Register scratch,
|
|
|
|
int mask,
|
|
|
|
Condition cc,
|
|
|
|
Label* condition_met);
|
|
|
|
|
|
|
|
// Check if object is in new space. Jumps if the object is not in new space.
|
2011-09-20 13:32:27 +00:00
|
|
|
// The register scratch can be object itself, but scratch will be clobbered.
|
2011-09-19 18:36:47 +00:00
|
|
|
void JumpIfNotInNewSpace(Register object,
|
|
|
|
Register scratch,
|
|
|
|
Label* branch) {
|
|
|
|
InNewSpace(object, scratch, ne, branch);
|
|
|
|
}
|
2010-06-30 12:27:49 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Check if object is in new space. Jumps if the object is in new space.
|
|
|
|
// The register scratch can be object itself, but it will be clobbered.
|
|
|
|
void JumpIfInNewSpace(Register object,
|
|
|
|
Register scratch,
|
|
|
|
Label* branch) {
|
|
|
|
InNewSpace(object, scratch, eq, branch);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if an object has a given incremental marking color.
|
|
|
|
void HasColor(Register object,
|
|
|
|
Register scratch0,
|
|
|
|
Register scratch1,
|
|
|
|
Label* has_color,
|
|
|
|
int first_bit,
|
|
|
|
int second_bit);
|
|
|
|
|
|
|
|
void JumpIfBlack(Register object,
|
2010-06-17 10:45:37 +00:00
|
|
|
Register scratch0,
|
2011-09-19 18:36:47 +00:00
|
|
|
Register scratch1,
|
|
|
|
Label* on_black);
|
|
|
|
|
|
|
|
// Checks the color of an object. If the object is already grey or black
|
|
|
|
// then we just fall through, since it is already live. If it is white and
|
|
|
|
// we can determine that it doesn't need to be scanned, then we just mark it
|
|
|
|
// black and fall through. For the rest we jump to the label so the
|
|
|
|
// incremental marker can fix its assumptions.
|
|
|
|
void EnsureNotWhite(Register object,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Register scratch3,
|
|
|
|
Label* object_is_white_and_not_data);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-01-16 12:38:59 +00:00
|
|
|
// Detects conservatively whether an object is data-only, i.e. it does need to
|
2011-09-19 18:36:47 +00:00
|
|
|
// be scanned by the garbage collector.
|
|
|
|
void JumpIfDataObject(Register value,
|
|
|
|
Register scratch,
|
|
|
|
Label* not_data_object);
|
|
|
|
|
|
|
|
// Notify the garbage collector that we wrote a pointer into an object.
|
|
|
|
// |object| is the object being stored into, |value| is the object being
|
|
|
|
// stored. value and scratch registers are clobbered by the operation.
|
|
|
|
// The offset is the offset from the start of the object, not the offset from
|
|
|
|
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
|
|
|
|
void RecordWriteField(
|
|
|
|
Register object,
|
|
|
|
int offset,
|
|
|
|
Register value,
|
|
|
|
Register scratch,
|
|
|
|
LinkRegisterStatus lr_status,
|
|
|
|
SaveFPRegsMode save_fp,
|
|
|
|
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
|
|
|
|
SmiCheck smi_check = INLINE_SMI_CHECK);
|
|
|
|
|
|
|
|
// As above, but the offset has the tag presubtracted. For use with
|
|
|
|
// MemOperand(reg, off).
|
|
|
|
inline void RecordWriteContextSlot(
|
|
|
|
Register context,
|
|
|
|
int offset,
|
|
|
|
Register value,
|
|
|
|
Register scratch,
|
|
|
|
LinkRegisterStatus lr_status,
|
|
|
|
SaveFPRegsMode save_fp,
|
|
|
|
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
|
|
|
|
SmiCheck smi_check = INLINE_SMI_CHECK) {
|
|
|
|
RecordWriteField(context,
|
|
|
|
offset + kHeapObjectTag,
|
|
|
|
value,
|
|
|
|
scratch,
|
|
|
|
lr_status,
|
|
|
|
save_fp,
|
|
|
|
remembered_set_action,
|
|
|
|
smi_check);
|
|
|
|
}
|
|
|
|
|
|
|
|
// For a given |object| notify the garbage collector that the slot |address|
|
|
|
|
// has been written. |value| is the object being stored. The value and
|
|
|
|
// address registers are clobbered by the operation.
|
|
|
|
void RecordWrite(
|
|
|
|
Register object,
|
|
|
|
Register address,
|
|
|
|
Register value,
|
|
|
|
LinkRegisterStatus lr_status,
|
|
|
|
SaveFPRegsMode save_fp,
|
|
|
|
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
|
|
|
|
SmiCheck smi_check = INLINE_SMI_CHECK);
|
2010-06-30 12:27:49 +00:00
|
|
|
|
2011-06-08 13:55:33 +00:00
|
|
|
// Push a handle.
|
|
|
|
void Push(Handle<Object> handle);
|
|
|
|
|
2010-04-26 14:25:29 +00:00
|
|
|
// Push two registers. Pushes leftmost register first (to highest address).
|
|
|
|
void Push(Register src1, Register src2, Condition cond = al) {
|
|
|
|
ASSERT(!src1.is(src2));
|
|
|
|
if (src1.code() > src2.code()) {
|
|
|
|
stm(db_w, sp, src1.bit() | src2.bit(), cond);
|
|
|
|
} else {
|
|
|
|
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
|
|
|
|
str(src2, MemOperand(sp, 4, NegPreIndex), cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push three registers. Pushes leftmost register first (to highest address).
|
|
|
|
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
|
|
|
|
ASSERT(!src1.is(src2));
|
|
|
|
ASSERT(!src2.is(src3));
|
|
|
|
ASSERT(!src1.is(src3));
|
|
|
|
if (src1.code() > src2.code()) {
|
|
|
|
if (src2.code() > src3.code()) {
|
|
|
|
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
|
|
|
|
} else {
|
|
|
|
stm(db_w, sp, src1.bit() | src2.bit(), cond);
|
|
|
|
str(src3, MemOperand(sp, 4, NegPreIndex), cond);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
|
|
|
|
Push(src2, src3, cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push four registers. Pushes leftmost register first (to highest address).
|
2011-10-19 12:15:15 +00:00
|
|
|
void Push(Register src1,
|
|
|
|
Register src2,
|
|
|
|
Register src3,
|
|
|
|
Register src4,
|
|
|
|
Condition cond = al) {
|
2010-04-26 14:25:29 +00:00
|
|
|
ASSERT(!src1.is(src2));
|
|
|
|
ASSERT(!src2.is(src3));
|
|
|
|
ASSERT(!src1.is(src3));
|
|
|
|
ASSERT(!src1.is(src4));
|
|
|
|
ASSERT(!src2.is(src4));
|
|
|
|
ASSERT(!src3.is(src4));
|
|
|
|
if (src1.code() > src2.code()) {
|
|
|
|
if (src2.code() > src3.code()) {
|
|
|
|
if (src3.code() > src4.code()) {
|
|
|
|
stm(db_w,
|
|
|
|
sp,
|
|
|
|
src1.bit() | src2.bit() | src3.bit() | src4.bit(),
|
|
|
|
cond);
|
|
|
|
} else {
|
|
|
|
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
|
|
|
|
str(src4, MemOperand(sp, 4, NegPreIndex), cond);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
stm(db_w, sp, src1.bit() | src2.bit(), cond);
|
|
|
|
Push(src3, src4, cond);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
|
|
|
|
Push(src2, src3, src4, cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-02 09:31:42 +00:00
|
|
|
// Pop two registers. Pops rightmost register first (from lower address).
|
|
|
|
void Pop(Register src1, Register src2, Condition cond = al) {
|
|
|
|
ASSERT(!src1.is(src2));
|
|
|
|
if (src1.code() > src2.code()) {
|
|
|
|
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
|
|
|
|
} else {
|
|
|
|
ldr(src2, MemOperand(sp, 4, PostIndex), cond);
|
|
|
|
ldr(src1, MemOperand(sp, 4, PostIndex), cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-19 12:15:15 +00:00
|
|
|
// Pop three registers. Pops rightmost register first (from lower address).
|
|
|
|
void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
|
|
|
|
ASSERT(!src1.is(src2));
|
|
|
|
ASSERT(!src2.is(src3));
|
|
|
|
ASSERT(!src1.is(src3));
|
|
|
|
if (src1.code() > src2.code()) {
|
|
|
|
if (src2.code() > src3.code()) {
|
|
|
|
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
|
|
|
|
} else {
|
|
|
|
ldr(src3, MemOperand(sp, 4, PostIndex), cond);
|
|
|
|
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Pop(src2, src3, cond);
|
|
|
|
str(src1, MemOperand(sp, 4, PostIndex), cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pop four registers. Pops rightmost register first (from lower address).
|
|
|
|
void Pop(Register src1,
|
|
|
|
Register src2,
|
|
|
|
Register src3,
|
|
|
|
Register src4,
|
|
|
|
Condition cond = al) {
|
|
|
|
ASSERT(!src1.is(src2));
|
|
|
|
ASSERT(!src2.is(src3));
|
|
|
|
ASSERT(!src1.is(src3));
|
|
|
|
ASSERT(!src1.is(src4));
|
|
|
|
ASSERT(!src2.is(src4));
|
|
|
|
ASSERT(!src3.is(src4));
|
|
|
|
if (src1.code() > src2.code()) {
|
|
|
|
if (src2.code() > src3.code()) {
|
|
|
|
if (src3.code() > src4.code()) {
|
|
|
|
ldm(ia_w,
|
|
|
|
sp,
|
|
|
|
src1.bit() | src2.bit() | src3.bit() | src4.bit(),
|
|
|
|
cond);
|
|
|
|
} else {
|
|
|
|
ldr(src4, MemOperand(sp, 4, PostIndex), cond);
|
|
|
|
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Pop(src3, src4, cond);
|
|
|
|
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Pop(src2, src3, src4, cond);
|
|
|
|
ldr(src1, MemOperand(sp, 4, PostIndex), cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
// Push and pop the registers that can hold pointers, as defined by the
|
|
|
|
// RegList constant kSafepointSavedRegisters.
|
|
|
|
void PushSafepointRegisters();
|
|
|
|
void PopSafepointRegisters();
|
2011-01-14 08:49:52 +00:00
|
|
|
void PushSafepointRegistersAndDoubles();
|
|
|
|
void PopSafepointRegistersAndDoubles();
|
2011-02-21 11:29:45 +00:00
|
|
|
// Store value in register src in the safepoint stack slot for
|
|
|
|
// register dst.
|
|
|
|
void StoreToSafepointRegisterSlot(Register src, Register dst);
|
|
|
|
void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
|
|
|
|
// Load the value of the src register from its safepoint stack slot
|
|
|
|
// into register dst.
|
|
|
|
void LoadFromSafepointRegisterSlot(Register dst, Register src);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2010-05-26 11:19:39 +00:00
|
|
|
// Load two consecutive registers with two consecutive memory locations.
|
|
|
|
void Ldrd(Register dst1,
|
|
|
|
Register dst2,
|
|
|
|
const MemOperand& src,
|
|
|
|
Condition cond = al);
|
|
|
|
|
|
|
|
// Store two consecutive registers to two consecutive memory locations.
|
|
|
|
void Strd(Register src1,
|
|
|
|
Register src2,
|
|
|
|
const MemOperand& dst,
|
|
|
|
Condition cond = al);
|
|
|
|
|
2011-01-11 12:45:25 +00:00
|
|
|
// Clear specified FPSCR bits.
|
|
|
|
void ClearFPSCRBits(const uint32_t bits_to_clear,
|
|
|
|
const Register scratch,
|
|
|
|
const Condition cond = al);
|
|
|
|
|
|
|
|
// Compare double values and move the result to the normal condition flags.
|
|
|
|
void VFPCompareAndSetFlags(const DwVfpRegister src1,
|
|
|
|
const DwVfpRegister src2,
|
|
|
|
const Condition cond = al);
|
|
|
|
void VFPCompareAndSetFlags(const DwVfpRegister src1,
|
|
|
|
const double src2,
|
|
|
|
const Condition cond = al);
|
|
|
|
|
|
|
|
// Compare double values and then load the fpscr flags to a register.
|
|
|
|
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
|
|
|
const DwVfpRegister src2,
|
|
|
|
const Register fpscr_flags,
|
|
|
|
const Condition cond = al);
|
|
|
|
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
|
|
|
const double src2,
|
|
|
|
const Register fpscr_flags,
|
|
|
|
const Condition cond = al);
|
|
|
|
|
2011-06-29 10:51:06 +00:00
|
|
|
void Vmov(const DwVfpRegister dst,
|
|
|
|
const double imm,
|
|
|
|
const Condition cond = al);
|
|
|
|
|
2010-08-27 07:08:03 +00:00
|
|
|
// Enter exit frame.
|
2011-02-04 13:43:38 +00:00
|
|
|
// stack_space - extra stack space, used for alignment before call to C.
|
|
|
|
void EnterExitFrame(bool save_doubles, int stack_space = 0);
|
2008-09-23 08:19:26 +00:00
|
|
|
|
|
|
|
// Leave the current exit frame. Expects the return value in r0.
|
2011-02-15 13:53:51 +00:00
|
|
|
// Expect the number of values, pushed prior to the exit frame, to
|
|
|
|
// remove in a register (or no_reg, if there is nothing to remove).
|
|
|
|
void LeaveExitFrame(bool save_doubles, Register argument_count);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-04-15 09:34:47 +00:00
|
|
|
// Get the actual activation frame alignment for target environment.
|
|
|
|
static int ActivationFrameAlignment();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-11-26 10:28:32 +00:00
|
|
|
void LoadContext(Register dst, int context_chain_length);
|
|
|
|
|
2012-02-02 11:22:26 +00:00
|
|
|
// Conditionally load the cached Array transitioned map of type
|
|
|
|
// transitioned_kind from the global context if the map in register
|
|
|
|
// map_in_out is the cached Array map in the global context of
|
|
|
|
// expected_kind.
|
|
|
|
void LoadTransitionedArrayMapConditional(
|
|
|
|
ElementsKind expected_kind,
|
|
|
|
ElementsKind transitioned_kind,
|
|
|
|
Register map_in_out,
|
|
|
|
Register scratch,
|
|
|
|
Label* no_map_match);
|
|
|
|
|
|
|
|
// Load the initial map for new Arrays from a JSFunction.
|
|
|
|
void LoadInitialArrayMap(Register function_in,
|
|
|
|
Register scratch,
|
|
|
|
Register map_out);
|
2012-01-26 21:47:57 +00:00
|
|
|
|
2010-11-26 08:43:34 +00:00
|
|
|
void LoadGlobalFunction(int index, Register function);
|
|
|
|
|
|
|
|
// Load the initial map from the global function. The registers
|
|
|
|
// function and map can be the same, function is then overwritten.
|
|
|
|
void LoadGlobalFunctionInitialMap(Register function,
|
|
|
|
Register map,
|
|
|
|
Register scratch);
|
|
|
|
|
2012-01-06 11:33:20 +00:00
|
|
|
void InitializeRootRegister() {
|
|
|
|
ExternalReference roots_array_start =
|
|
|
|
ExternalReference::roots_array_start(isolate());
|
|
|
|
mov(kRootRegister, Operand(roots_array_start));
|
|
|
|
}
|
|
|
|
|
2008-08-06 10:02:49 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// JavaScript invokes
|
|
|
|
|
2012-01-13 13:09:52 +00:00
|
|
|
// Set up call kind marking in ecx. The method takes ecx as an
|
2011-05-24 14:01:36 +00:00
|
|
|
// explicit first parameter to make the code more readable at the
|
|
|
|
// call sites.
|
|
|
|
void SetCallKind(Register dst, CallKind kind);
|
|
|
|
|
2008-08-06 10:02:49 +00:00
|
|
|
// Invoke the JavaScript function code by either calling or jumping.
|
|
|
|
void InvokeCode(Register code,
|
|
|
|
const ParameterCount& expected,
|
|
|
|
const ParameterCount& actual,
|
2011-01-11 14:11:03 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-30 13:23:17 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
|
|
|
CallKind call_kind);
|
2008-08-06 10:02:49 +00:00
|
|
|
|
|
|
|
void InvokeCode(Handle<Code> code,
|
|
|
|
const ParameterCount& expected,
|
|
|
|
const ParameterCount& actual,
|
2008-09-22 13:57:03 +00:00
|
|
|
RelocInfo::Mode rmode,
|
2011-05-24 14:01:36 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-30 13:23:17 +00:00
|
|
|
CallKind call_kind);
|
2008-08-06 10:02:49 +00:00
|
|
|
|
|
|
|
// Invoke the JavaScript function in the given register. Changes the
|
|
|
|
// current context to the context in the function before invoking.
|
|
|
|
void InvokeFunction(Register function,
|
|
|
|
const ParameterCount& actual,
|
2011-01-11 14:11:03 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-30 13:23:17 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
|
|
|
CallKind call_kind);
|
2008-08-06 10:02:49 +00:00
|
|
|
|
2011-10-28 12:37:29 +00:00
|
|
|
void InvokeFunction(Handle<JSFunction> function,
|
2010-02-15 12:32:27 +00:00
|
|
|
const ParameterCount& actual,
|
2011-05-30 13:23:17 +00:00
|
|
|
InvokeFlag flag,
|
2012-01-17 15:53:58 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
2011-05-30 13:23:17 +00:00
|
|
|
CallKind call_kind);
|
2010-02-15 12:32:27 +00:00
|
|
|
|
2010-12-21 10:52:50 +00:00
|
|
|
void IsObjectJSObjectType(Register heap_object,
|
|
|
|
Register map,
|
|
|
|
Register scratch,
|
|
|
|
Label* fail);
|
|
|
|
|
|
|
|
void IsInstanceJSObjectType(Register map,
|
|
|
|
Register scratch,
|
|
|
|
Label* fail);
|
|
|
|
|
|
|
|
void IsObjectJSStringType(Register object,
|
|
|
|
Register scratch,
|
|
|
|
Label* fail);
|
2008-09-12 03:29:06 +00:00
|
|
|
|
2009-04-20 16:36:13 +00:00
|
|
|
#ifdef ENABLE_DEBUGGER_SUPPORT
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Debugger Support
|
|
|
|
|
2010-02-08 13:44:49 +00:00
|
|
|
void DebugBreak();
|
2009-04-20 16:36:13 +00:00
|
|
|
#endif
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Exception handling
|
|
|
|
|
|
|
|
// Push a new try handler and link into try handler chain.
|
2012-02-09 09:43:37 +00:00
|
|
|
void PushTryHandler(StackHandler::Kind kind, int handler_index);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-12-10 14:06:08 +00:00
|
|
|
// Unlink the stack handler on top of the stack from the try handler chain.
|
|
|
|
// Must preserve the result register.
|
|
|
|
void PopTryHandler();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-02-10 08:47:35 +00:00
|
|
|
// Passes thrown value to the handler of top of the try handler chain.
|
2011-02-15 13:53:51 +00:00
|
|
|
void Throw(Register value);
|
|
|
|
|
|
|
|
// Propagates an uncatchable exception to the top of the current JS stack's
|
|
|
|
// handler chain.
|
2012-02-10 08:47:35 +00:00
|
|
|
void ThrowUncatchable(Register value);
|
2011-02-15 13:53:51 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Inline caching support
|
|
|
|
|
|
|
|
// Generate code for checking access rights - used for security checks
|
|
|
|
// on access to global objects across environments. The holder register
|
|
|
|
// is left untouched, whereas both scratch registers are clobbered.
|
2008-10-21 20:11:50 +00:00
|
|
|
void CheckAccessGlobalProxy(Register holder_reg,
|
|
|
|
Register scratch,
|
|
|
|
Label* miss);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-01-10 12:58:41 +00:00
|
|
|
void GetNumberHash(Register t0, Register scratch);
|
|
|
|
|
2011-07-08 10:46:10 +00:00
|
|
|
void LoadFromNumberDictionary(Label* miss,
|
|
|
|
Register elements,
|
|
|
|
Register key,
|
|
|
|
Register result,
|
|
|
|
Register t0,
|
|
|
|
Register t1,
|
|
|
|
Register t2);
|
|
|
|
|
|
|
|
|
2010-11-24 09:40:58 +00:00
|
|
|
inline void MarkCode(NopMarkerTypes type) {
|
|
|
|
nop(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the given instruction is a 'type' marker.
|
2012-01-16 12:38:59 +00:00
|
|
|
// i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
|
2010-11-24 09:40:58 +00:00
|
|
|
// These instructions are generated to mark special location in the code,
|
|
|
|
// like some special IC code.
|
|
|
|
static inline bool IsMarkedCode(Instr instr, int type) {
|
|
|
|
ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
|
|
|
|
return IsNop(instr, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline int GetCodeMarker(Instr instr) {
|
|
|
|
int dst_reg_offset = 12;
|
|
|
|
int dst_mask = 0xf << dst_reg_offset;
|
|
|
|
int src_mask = 0xf;
|
|
|
|
int dst_reg = (instr & dst_mask) >> dst_reg_offset;
|
|
|
|
int src_reg = instr & src_mask;
|
|
|
|
uint32_t non_register_mask = ~(dst_mask | src_mask);
|
|
|
|
uint32_t mov_mask = al | 13 << 21;
|
|
|
|
|
|
|
|
// Return <n> if we have a mov rn rn, else return -1.
|
|
|
|
int type = ((instr & non_register_mask) == mov_mask) &&
|
|
|
|
(dst_reg == src_reg) &&
|
|
|
|
(FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
|
|
|
|
? src_reg
|
|
|
|
: -1;
|
|
|
|
ASSERT((type == -1) ||
|
|
|
|
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
|
|
|
|
return type;
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-09-01 07:36:46 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Allocation support
|
|
|
|
|
2011-01-06 08:56:29 +00:00
|
|
|
// Allocate an object in new space. The object_size is specified
|
|
|
|
// either in bytes or in words if the allocation flag SIZE_IN_WORDS
|
|
|
|
// is passed. If the new space is exhausted control continues at the
|
|
|
|
// gc_required label. The allocated object is returned in result. If
|
|
|
|
// the flag tag_allocated_object is true the result is tagged as as
|
|
|
|
// a heap object. All registers are clobbered also when control
|
|
|
|
// continues at the gc_required label.
|
2009-09-30 09:24:46 +00:00
|
|
|
void AllocateInNewSpace(int object_size,
|
|
|
|
Register result,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required,
|
|
|
|
AllocationFlags flags);
|
|
|
|
void AllocateInNewSpace(Register object_size,
|
|
|
|
Register result,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required,
|
|
|
|
AllocationFlags flags);
|
2009-09-02 11:13:44 +00:00
|
|
|
|
|
|
|
// Undo allocation in new space. The object passed and objects allocated after
|
|
|
|
// it will no longer be allocated. The caller must make sure that no pointers
|
|
|
|
// are left to the object(s) no longer allocated as they would be invalid when
|
|
|
|
// allocation is undone.
|
|
|
|
void UndoAllocationInNewSpace(Register object, Register scratch);
|
2009-09-01 07:36:46 +00:00
|
|
|
|
2010-02-04 09:11:43 +00:00
|
|
|
|
|
|
|
void AllocateTwoByteString(Register result,
|
|
|
|
Register length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Register scratch3,
|
|
|
|
Label* gc_required);
|
|
|
|
void AllocateAsciiString(Register result,
|
|
|
|
Register length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Register scratch3,
|
|
|
|
Label* gc_required);
|
2010-02-05 12:00:42 +00:00
|
|
|
void AllocateTwoByteConsString(Register result,
|
|
|
|
Register length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required);
|
|
|
|
void AllocateAsciiConsString(Register result,
|
|
|
|
Register length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required);
|
2011-09-01 15:24:26 +00:00
|
|
|
void AllocateTwoByteSlicedString(Register result,
|
|
|
|
Register length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required);
|
|
|
|
void AllocateAsciiSlicedString(Register result,
|
|
|
|
Register length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required);
|
2010-02-04 09:11:43 +00:00
|
|
|
|
2010-05-07 10:16:11 +00:00
|
|
|
// Allocates a heap number or jumps to the gc_required label if the young
|
|
|
|
// space is full and a scavenge is needed. All registers are clobbered also
|
|
|
|
// when control continues at the gc_required label.
|
2010-03-23 13:38:04 +00:00
|
|
|
void AllocateHeapNumber(Register result,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
2010-06-17 21:51:51 +00:00
|
|
|
Register heap_number_map,
|
2010-03-23 13:38:04 +00:00
|
|
|
Label* gc_required);
|
2010-06-29 09:40:36 +00:00
|
|
|
void AllocateHeapNumberWithValue(Register result,
|
|
|
|
DwVfpRegister value,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Register heap_number_map,
|
|
|
|
Label* gc_required);
|
|
|
|
|
2010-08-06 13:04:27 +00:00
|
|
|
// Copies a fixed number of fields of heap objects from src to dst.
|
|
|
|
void CopyFields(Register dst, Register src, RegList temps, int field_count);
|
2010-02-04 09:11:43 +00:00
|
|
|
|
2011-03-01 14:09:23 +00:00
|
|
|
// Copies a number of bytes from src to dst. All registers are clobbered. On
|
|
|
|
// exit src and dst will point to the place just after where the last byte was
|
|
|
|
// read or written and length will be zero.
|
|
|
|
void CopyBytes(Register src,
|
|
|
|
Register dst,
|
|
|
|
Register length,
|
|
|
|
Register scratch);
|
|
|
|
|
2011-09-20 10:06:23 +00:00
|
|
|
// Initialize fields with filler values. Fields starting at |start_offset|
|
|
|
|
// not including end_offset are overwritten with the value in |filler|. At
|
|
|
|
// the end the loop, |start_offset| takes the value of |end_offset|.
|
|
|
|
void InitializeFieldsWithFiller(Register start_offset,
|
|
|
|
Register end_offset,
|
|
|
|
Register filler);
|
|
|
|
|
2008-10-06 06:41:10 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Support functions.
|
|
|
|
|
2009-06-10 11:42:13 +00:00
|
|
|
// Try to get function prototype of a function and puts the value in
|
|
|
|
// the result register. Checks that the function really is a
|
|
|
|
// function and jumps to the miss label if the fast checks fail. The
|
|
|
|
// function register will be untouched; the other registers may be
|
|
|
|
// clobbered.
|
|
|
|
void TryGetFunctionPrototype(Register function,
|
|
|
|
Register result,
|
|
|
|
Register scratch,
|
2011-10-17 12:44:16 +00:00
|
|
|
Label* miss,
|
|
|
|
bool miss_on_bound_function = false);
|
2009-06-10 11:42:13 +00:00
|
|
|
|
|
|
|
// Compare object type for heap object. heap_object contains a non-Smi
|
|
|
|
// whose object type should be compared with the given type. This both
|
|
|
|
// sets the flags and leaves the object type in the type_reg register.
|
|
|
|
// It leaves the map in the map register (unless the type_reg and map register
|
|
|
|
// are the same register). It leaves the heap object in the heap_object
|
|
|
|
// register unless the heap_object register is the same register as one of the
|
2009-09-02 11:13:44 +00:00
|
|
|
// other registers.
|
2009-06-10 11:42:13 +00:00
|
|
|
void CompareObjectType(Register heap_object,
|
|
|
|
Register map,
|
|
|
|
Register type_reg,
|
|
|
|
InstanceType type);
|
|
|
|
|
2009-09-02 11:13:44 +00:00
|
|
|
// Compare instance type in a map. map contains a valid map object whose
|
|
|
|
// object type should be compared with the given type. This both
|
2011-09-13 11:42:57 +00:00
|
|
|
// sets the flags and leaves the object type in the type_reg register.
|
2009-09-02 11:13:44 +00:00
|
|
|
void CompareInstanceType(Register map,
|
|
|
|
Register type_reg,
|
|
|
|
InstanceType type);
|
|
|
|
|
2010-02-04 09:11:43 +00:00
|
|
|
|
2011-06-03 07:41:37 +00:00
|
|
|
// Check if a map for a JSObject indicates that the object has fast elements.
|
|
|
|
// Jump to the specified label if it does not.
|
|
|
|
void CheckFastElements(Register map,
|
|
|
|
Register scratch,
|
|
|
|
Label* fail);
|
|
|
|
|
2011-09-23 09:31:20 +00:00
|
|
|
// Check if a map for a JSObject indicates that the object can have both smi
|
|
|
|
// and HeapObject elements. Jump to the specified label if it does not.
|
|
|
|
void CheckFastObjectElements(Register map,
|
|
|
|
Register scratch,
|
|
|
|
Label* fail);
|
|
|
|
|
|
|
|
// Check if a map for a JSObject indicates that the object has fast smi only
|
|
|
|
// elements. Jump to the specified label if it does not.
|
|
|
|
void CheckFastSmiOnlyElements(Register map,
|
|
|
|
Register scratch,
|
|
|
|
Label* fail);
|
|
|
|
|
2011-10-05 11:39:34 +00:00
|
|
|
// Check to see if maybe_number can be stored as a double in
|
|
|
|
// FastDoubleElements. If it can, store it at the index specified by key in
|
2012-02-07 14:03:13 +00:00
|
|
|
// the FastDoubleElements array elements. Otherwise jump to fail, in which
|
|
|
|
// case scratch2, scratch3 and scratch4 are unmodified.
|
2011-10-05 11:39:34 +00:00
|
|
|
void StoreNumberToDoubleElements(Register value_reg,
|
|
|
|
Register key_reg,
|
|
|
|
Register receiver_reg,
|
|
|
|
Register elements_reg,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Register scratch3,
|
|
|
|
Register scratch4,
|
|
|
|
Label* fail);
|
|
|
|
|
2012-01-09 16:37:47 +00:00
|
|
|
// Compare an object's map with the specified map and its transitioned
|
|
|
|
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
|
|
|
|
// set with result of map compare. If multiple map compares are required, the
|
|
|
|
// compare sequences branches to early_success.
|
|
|
|
void CompareMap(Register obj,
|
|
|
|
Register scratch,
|
|
|
|
Handle<Map> map,
|
|
|
|
Label* early_success,
|
|
|
|
CompareMapMode mode = REQUIRE_EXACT_MAP);
|
|
|
|
|
|
|
|
// Check if the map of an object is equal to a specified map and branch to
|
|
|
|
// label if not. Skip the smi check if not required (object is known to be a
|
|
|
|
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
|
2012-01-16 12:38:59 +00:00
|
|
|
// against maps that are ElementsKind transition maps of the specified map.
|
2010-02-05 13:57:18 +00:00
|
|
|
void CheckMap(Register obj,
|
|
|
|
Register scratch,
|
|
|
|
Handle<Map> map,
|
|
|
|
Label* fail,
|
2012-01-09 16:37:47 +00:00
|
|
|
SmiCheckType smi_check_type,
|
|
|
|
CompareMapMode mode = REQUIRE_EXACT_MAP);
|
2010-02-05 13:57:18 +00:00
|
|
|
|
2011-05-18 13:17:29 +00:00
|
|
|
|
2010-06-11 10:25:34 +00:00
|
|
|
void CheckMap(Register obj,
|
|
|
|
Register scratch,
|
|
|
|
Heap::RootListIndex index,
|
|
|
|
Label* fail,
|
2011-05-17 12:05:06 +00:00
|
|
|
SmiCheckType smi_check_type);
|
2010-06-11 10:25:34 +00:00
|
|
|
|
|
|
|
|
2011-05-18 13:17:29 +00:00
|
|
|
// Check if the map of an object is equal to a specified map and branch to a
|
|
|
|
// specified target if equal. Skip the smi check if not required (object is
|
|
|
|
// known to be a heap object)
|
|
|
|
void DispatchMap(Register obj,
|
|
|
|
Register scratch,
|
|
|
|
Handle<Map> map,
|
|
|
|
Handle<Code> success,
|
|
|
|
SmiCheckType smi_check_type);
|
|
|
|
|
|
|
|
|
2011-03-03 12:21:37 +00:00
|
|
|
// Compare the object in a register to a value from the root list.
|
|
|
|
// Uses the ip register as scratch.
|
|
|
|
void CompareRoot(Register obj, Heap::RootListIndex index);
|
|
|
|
|
|
|
|
|
2010-02-04 09:11:43 +00:00
|
|
|
// Load and check the instance type of an object for being a string.
|
|
|
|
// Loads the type into the second argument register.
|
|
|
|
// Returns a condition that will be enabled if the object was a string.
|
|
|
|
Condition IsObjectStringType(Register obj,
|
|
|
|
Register type) {
|
|
|
|
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
|
|
|
|
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
|
|
|
|
tst(type, Operand(kIsNotStringMask));
|
|
|
|
ASSERT_EQ(0, kStringTag);
|
|
|
|
return eq;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-06 06:41:10 +00:00
|
|
|
// Generates code for reporting that an illegal operation has
|
|
|
|
// occurred.
|
|
|
|
void IllegalOperation(int num_arguments);
|
|
|
|
|
2010-08-27 11:47:12 +00:00
|
|
|
// Picks out an array index from the hash field.
|
|
|
|
// Register use:
|
|
|
|
// hash - holds the index's hash. Clobbered.
|
|
|
|
// index - holds the overwritten index on exit.
|
|
|
|
void IndexFromHash(Register hash, Register index);
|
|
|
|
|
2010-02-05 08:46:41 +00:00
|
|
|
// Get the number of least significant bits from a register
|
|
|
|
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
|
2011-02-10 20:04:54 +00:00
|
|
|
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
|
2010-02-05 08:46:41 +00:00
|
|
|
|
2009-11-12 13:55:21 +00:00
|
|
|
// Uses VFP instructions to Convert a Smi to a double.
|
|
|
|
void IntegerToDoubleConversionWithVFP3(Register inReg,
|
|
|
|
Register outHighReg,
|
|
|
|
Register outLowReg);
|
|
|
|
|
2010-06-29 09:40:36 +00:00
|
|
|
// Load the value of a number object into a VFP double register. If the object
|
|
|
|
// is not a number a jump to the label not_number is performed and the VFP
|
|
|
|
// double register is unchanged.
|
|
|
|
void ObjectToDoubleVFPRegister(
|
|
|
|
Register object,
|
|
|
|
DwVfpRegister value,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Register heap_number_map,
|
|
|
|
SwVfpRegister scratch3,
|
|
|
|
Label* not_number,
|
|
|
|
ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
|
|
|
|
|
|
|
|
// Load the value of a smi object into a VFP double register. The register
|
|
|
|
// scratch1 can be the same register as smi in which case smi will hold the
|
|
|
|
// untagged value afterwards.
|
|
|
|
void SmiToDoubleVFPRegister(Register smi,
|
|
|
|
DwVfpRegister value,
|
|
|
|
Register scratch1,
|
|
|
|
SwVfpRegister scratch2);
|
|
|
|
|
2010-09-02 08:30:52 +00:00
|
|
|
// Convert the HeapNumber pointed to by source to a 32bits signed integer
|
|
|
|
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
|
2011-02-04 10:52:19 +00:00
|
|
|
// to not_int32 label. If VFP3 is available double_scratch is used but not
|
|
|
|
// scratch2.
|
2010-09-02 08:30:52 +00:00
|
|
|
void ConvertToInt32(Register source,
|
|
|
|
Register dest,
|
|
|
|
Register scratch,
|
|
|
|
Register scratch2,
|
2011-02-04 10:52:19 +00:00
|
|
|
DwVfpRegister double_scratch,
|
2010-09-02 08:30:52 +00:00
|
|
|
Label *not_int32);
|
|
|
|
|
2011-03-15 11:19:13 +00:00
|
|
|
// Truncates a double using a specific rounding mode.
|
|
|
|
// Clears the z flag (ne condition) if an overflow occurs.
|
|
|
|
// If exact_conversion is true, the z flag is also cleared if the conversion
|
2012-01-16 12:38:59 +00:00
|
|
|
// was inexact, i.e. if the double value could not be converted exactly
|
2011-03-15 11:19:13 +00:00
|
|
|
// to a 32bit integer.
|
2011-03-02 09:31:42 +00:00
|
|
|
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
|
|
|
|
SwVfpRegister result,
|
|
|
|
DwVfpRegister double_input,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
CheckForInexactConversion check
|
|
|
|
= kDontCheckForInexactConversion);
|
|
|
|
|
2011-03-15 11:19:13 +00:00
|
|
|
// Helper for EmitECMATruncate.
|
|
|
|
// This will truncate a floating-point value outside of the singed 32bit
|
|
|
|
// integer range to a 32bit signed integer.
|
|
|
|
// Expects the double value loaded in input_high and input_low.
|
|
|
|
// Exits with the answer in 'result'.
|
|
|
|
// Note that this code does not work for values in the 32bit range!
|
|
|
|
void EmitOutOfInt32RangeTruncate(Register result,
|
|
|
|
Register input_high,
|
|
|
|
Register input_low,
|
|
|
|
Register scratch);
|
|
|
|
|
|
|
|
// Performs a truncating conversion of a floating point number as used by
|
|
|
|
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
|
|
|
|
// Exits with 'result' holding the answer and all other registers clobbered.
|
|
|
|
void EmitECMATruncate(Register result,
|
|
|
|
DwVfpRegister double_input,
|
|
|
|
SwVfpRegister single_scratch,
|
|
|
|
Register scratch,
|
|
|
|
Register scratch2,
|
|
|
|
Register scratch3);
|
|
|
|
|
2010-03-23 13:38:04 +00:00
|
|
|
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
|
|
|
|
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
|
2010-06-28 11:47:23 +00:00
|
|
|
// for 0 (31 instead of 32). Source and scratch can be the same in which case
|
|
|
|
// the source is clobbered. Source and zeros can also be the same in which
|
|
|
|
// case scratch should be a different register.
|
|
|
|
void CountLeadingZeros(Register zeros,
|
|
|
|
Register source,
|
|
|
|
Register scratch);
|
2008-10-06 06:41:10 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Runtime calls
|
|
|
|
|
|
|
|
// Call a code stub.
|
2009-08-26 10:27:32 +00:00
|
|
|
void CallStub(CodeStub* stub, Condition cond = al);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-02-05 12:00:42 +00:00
|
|
|
// Call a code stub.
|
|
|
|
void TailCallStub(CodeStub* stub, Condition cond = al);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Call a runtime routine.
|
2011-03-18 20:35:07 +00:00
|
|
|
void CallRuntime(const Runtime::Function* f, int num_arguments);
|
2010-12-07 11:31:57 +00:00
|
|
|
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Convenience function: Same as above, but takes the fid instead.
|
|
|
|
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
|
|
|
|
|
2010-02-15 12:32:27 +00:00
|
|
|
// Convenience function: call an external reference.
|
|
|
|
void CallExternalReference(const ExternalReference& ext,
|
|
|
|
int num_arguments);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Tail call of a runtime routine (jump).
|
2010-02-24 08:33:51 +00:00
|
|
|
// Like JumpToExternalReference, but also takes care of passing the number
|
2008-08-13 09:32:07 +00:00
|
|
|
// of parameters.
|
2010-02-24 08:33:51 +00:00
|
|
|
void TailCallExternalReference(const ExternalReference& ext,
|
|
|
|
int num_arguments,
|
|
|
|
int result_size);
|
|
|
|
|
|
|
|
// Convenience function: tail call a runtime routine (jump).
|
|
|
|
void TailCallRuntime(Runtime::FunctionId fid,
|
2009-09-08 11:52:05 +00:00
|
|
|
int num_arguments,
|
|
|
|
int result_size);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-04-27 14:29:25 +00:00
|
|
|
int CalculateStackPassedWords(int num_reg_arguments,
|
|
|
|
int num_double_arguments);
|
|
|
|
|
2010-04-09 11:25:52 +00:00
|
|
|
// Before calling a C-function from generated code, align arguments on stack.
|
|
|
|
// After aligning the frame, non-register arguments must be stored in
|
|
|
|
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
|
2011-04-27 14:29:25 +00:00
|
|
|
// are word sized. If double arguments are used, this function assumes that
|
|
|
|
// all double arguments are stored before core registers; otherwise the
|
|
|
|
// correct alignment of the double values is not guaranteed.
|
2010-04-09 11:25:52 +00:00
|
|
|
// Some compilers/platforms require the stack to be aligned when calling
|
|
|
|
// C++ code.
|
|
|
|
// Needs a scratch register to do some arithmetic. This register will be
|
|
|
|
// trashed.
|
2011-04-27 14:29:25 +00:00
|
|
|
void PrepareCallCFunction(int num_reg_arguments,
|
|
|
|
int num_double_registers,
|
|
|
|
Register scratch);
|
|
|
|
void PrepareCallCFunction(int num_reg_arguments,
|
|
|
|
Register scratch);
|
|
|
|
|
|
|
|
// There are two ways of passing double arguments on ARM, depending on
|
|
|
|
// whether soft or hard floating point ABI is used. These functions
|
|
|
|
// abstract parameter passing for the three different ways we call
|
|
|
|
// C functions from generated code.
|
|
|
|
void SetCallCDoubleArguments(DoubleRegister dreg);
|
|
|
|
void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
|
|
|
|
void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
|
2010-04-09 11:25:52 +00:00
|
|
|
|
|
|
|
// Calls a C function and cleans up the space for arguments allocated
|
|
|
|
// by PrepareCallCFunction. The called function is not allowed to trigger a
|
|
|
|
// garbage collection, since that might move the code and invalidate the
|
|
|
|
// return address (unless this is somehow accounted for by the called
|
|
|
|
// function).
|
|
|
|
void CallCFunction(ExternalReference function, int num_arguments);
|
2011-10-04 09:07:50 +00:00
|
|
|
void CallCFunction(Register function, int num_arguments);
|
2011-04-27 14:29:25 +00:00
|
|
|
void CallCFunction(ExternalReference function,
|
|
|
|
int num_reg_arguments,
|
|
|
|
int num_double_arguments);
|
2011-10-04 09:07:50 +00:00
|
|
|
void CallCFunction(Register function,
|
2011-04-27 14:29:25 +00:00
|
|
|
int num_reg_arguments,
|
|
|
|
int num_double_arguments);
|
2010-04-09 11:25:52 +00:00
|
|
|
|
2011-02-23 10:41:13 +00:00
|
|
|
void GetCFunctionDoubleResult(const DoubleRegister dst);
|
|
|
|
|
2011-10-28 12:37:29 +00:00
|
|
|
// Calls an API function. Allocates HandleScope, extracts returned value
|
|
|
|
// from handle and propagates exceptions. Restores context. stack_space
|
2012-01-16 12:38:59 +00:00
|
|
|
// - space to be unwound on exit (includes the call JS arguments space and
|
2011-10-28 12:37:29 +00:00
|
|
|
// the additional space allocated for the fast call).
|
|
|
|
void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
|
2011-02-04 13:43:38 +00:00
|
|
|
|
2009-09-28 13:53:43 +00:00
|
|
|
// Jump to a runtime routine.
|
2010-02-24 08:33:51 +00:00
|
|
|
void JumpToExternalReference(const ExternalReference& builtin);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Invoke specified builtin JavaScript function. Adds an entry to
|
|
|
|
// the unresolved list if the name does not resolve.
|
2011-01-10 12:24:19 +00:00
|
|
|
void InvokeBuiltin(Builtins::JavaScript id,
|
2011-04-29 20:07:41 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-03 15:12:40 +00:00
|
|
|
const CallWrapper& call_wrapper = NullCallWrapper());
|
2008-09-12 03:29:06 +00:00
|
|
|
|
|
|
|
// Store the code object for the given builtin in the target register and
|
|
|
|
// setup the function in r1.
|
|
|
|
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-08-20 07:10:18 +00:00
|
|
|
// Store the function for the given builtin in the target register.
|
|
|
|
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
|
|
|
|
|
2011-04-01 15:37:59 +00:00
|
|
|
Handle<Object> CodeObject() {
|
|
|
|
ASSERT(!code_object_.is_null());
|
|
|
|
return code_object_;
|
|
|
|
}
|
2009-02-25 16:52:15 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-11-20 16:59:00 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// StatsCounter support
|
|
|
|
|
|
|
|
void SetCounter(StatsCounter* counter, int value,
|
|
|
|
Register scratch1, Register scratch2);
|
|
|
|
void IncrementCounter(StatsCounter* counter, int value,
|
|
|
|
Register scratch1, Register scratch2);
|
|
|
|
void DecrementCounter(StatsCounter* counter, int value,
|
|
|
|
Register scratch1, Register scratch2);
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Debugging
|
|
|
|
|
2011-01-26 08:32:54 +00:00
|
|
|
// Calls Abort(msg) if the condition cond is not satisfied.
|
2008-07-03 15:10:15 +00:00
|
|
|
// Use --debug_code to enable.
|
2011-01-26 08:32:54 +00:00
|
|
|
void Assert(Condition cond, const char* msg);
|
2010-06-17 21:51:51 +00:00
|
|
|
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
|
2010-08-16 16:06:46 +00:00
|
|
|
void AssertFastElements(Register elements);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Like Assert(), but always enabled.
|
2011-01-26 08:32:54 +00:00
|
|
|
void Check(Condition cond, const char* msg);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Print a message to stdout and abort execution.
|
|
|
|
void Abort(const char* msg);
|
|
|
|
|
|
|
|
// Verify restrictions about code generated in stubs.
|
|
|
|
void set_generating_stub(bool value) { generating_stub_ = value; }
|
|
|
|
bool generating_stub() { return generating_stub_; }
|
2008-07-30 08:49:36 +00:00
|
|
|
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
|
|
|
|
bool allow_stub_calls() { return allow_stub_calls_; }
|
2011-09-15 11:30:45 +00:00
|
|
|
void set_has_frame(bool value) { has_frame_ = value; }
|
|
|
|
bool has_frame() { return has_frame_; }
|
|
|
|
inline bool AllowThisStubCall(CodeStub* stub);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-04-29 08:50:38 +00:00
|
|
|
// EABI variant for double arguments in use.
|
|
|
|
bool use_eabi_hardfloat() {
|
|
|
|
#if USE_EABI_HARDFLOAT
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-02-01 16:38:25 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Number utilities
|
|
|
|
|
|
|
|
// Check whether the value of reg is a power of two and not zero. If not
|
|
|
|
// control continues at the label not_power_of_two. If reg is a power of two
|
|
|
|
// the register scratch contains the value of (reg - 1) when control falls
|
|
|
|
// through.
|
|
|
|
void JumpIfNotPowerOfTwoOrZero(Register reg,
|
|
|
|
Register scratch,
|
|
|
|
Label* not_power_of_two_or_zero);
|
2011-03-22 10:00:43 +00:00
|
|
|
// Check whether the value of reg is a power of two and not zero.
|
|
|
|
// Control falls through if it is, with scratch containing the mask
|
|
|
|
// value (reg - 1).
|
|
|
|
// Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
|
|
|
|
// zero or negative, or jumps to the 'not_power_of_two' label if the value is
|
|
|
|
// strictly positive but not a power of two.
|
|
|
|
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
|
|
|
|
Register scratch,
|
|
|
|
Label* zero_and_neg,
|
|
|
|
Label* not_power_of_two);
|
2011-02-01 16:38:25 +00:00
|
|
|
|
2010-02-05 12:00:42 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Smi utilities
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void SmiTag(Register reg, SBit s = LeaveCC) {
|
|
|
|
add(reg, reg, Operand(reg), s);
|
|
|
|
}
|
2011-01-25 14:52:35 +00:00
|
|
|
void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
|
|
|
|
add(dst, src, Operand(src), s);
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2011-01-14 08:49:52 +00:00
|
|
|
// Try to convert int32 to smi. If the value is to large, preserve
|
|
|
|
// the original value and jump to not_a_smi. Destroys scratch and
|
|
|
|
// sets flags.
|
|
|
|
void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
|
|
|
|
mov(scratch, reg);
|
|
|
|
SmiTag(scratch, SetCC);
|
|
|
|
b(vs, not_a_smi);
|
|
|
|
mov(reg, scratch);
|
|
|
|
}
|
|
|
|
|
2011-03-01 14:09:23 +00:00
|
|
|
void SmiUntag(Register reg, SBit s = LeaveCC) {
|
|
|
|
mov(reg, Operand(reg, ASR, kSmiTagSize), s);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
2011-03-01 14:09:23 +00:00
|
|
|
void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
|
|
|
|
mov(dst, Operand(src, ASR, kSmiTagSize), s);
|
2011-01-25 14:52:35 +00:00
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2012-01-27 16:54:22 +00:00
|
|
|
// Untag the source value into destination and jump if source is a smi.
|
|
|
|
// Souce and destination can be the same register.
|
|
|
|
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
|
|
|
|
|
|
|
// Untag the source value into destination and jump if source is not a smi.
|
|
|
|
// Souce and destination can be the same register.
|
|
|
|
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
|
|
|
|
2011-01-26 07:44:45 +00:00
|
|
|
// Jump the register contains a smi.
|
|
|
|
inline void JumpIfSmi(Register value, Label* smi_label) {
|
|
|
|
tst(value, Operand(kSmiTagMask));
|
|
|
|
b(eq, smi_label);
|
|
|
|
}
|
|
|
|
// Jump if either of the registers contain a non-smi.
|
|
|
|
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
|
|
|
|
tst(value, Operand(kSmiTagMask));
|
|
|
|
b(ne, not_smi_label);
|
|
|
|
}
|
2010-02-05 12:00:42 +00:00
|
|
|
// Jump if either of the registers contain a non-smi.
|
|
|
|
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
|
|
|
|
// Jump if either of the registers contain a smi.
|
|
|
|
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
|
|
|
|
|
2010-08-12 13:43:08 +00:00
|
|
|
// Abort execution if argument is a smi. Used in debug code.
|
|
|
|
void AbortIfSmi(Register object);
|
2011-01-20 14:20:54 +00:00
|
|
|
void AbortIfNotSmi(Register object);
|
2010-08-12 13:43:08 +00:00
|
|
|
|
2011-02-14 13:13:41 +00:00
|
|
|
// Abort execution if argument is a string. Used in debug code.
|
|
|
|
void AbortIfNotString(Register object);
|
|
|
|
|
2011-02-04 10:52:19 +00:00
|
|
|
// Abort execution if argument is not the root value with the given index.
|
|
|
|
void AbortIfNotRootValue(Register src,
|
|
|
|
Heap::RootListIndex root_value_index,
|
|
|
|
const char* message);
|
|
|
|
|
2011-01-25 14:52:35 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// HeapNumber utilities
|
|
|
|
|
|
|
|
void JumpIfNotHeapNumber(Register object,
|
|
|
|
Register heap_number_map,
|
|
|
|
Register scratch,
|
|
|
|
Label* on_not_heap_number);
|
|
|
|
|
2010-01-21 12:10:56 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// String utilities
|
|
|
|
|
|
|
|
// Checks if both objects are sequential ASCII strings and jumps to label
|
|
|
|
// if either is not. Assumes that neither object is a smi.
|
|
|
|
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
|
|
|
|
Register object2,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
2010-03-17 14:53:16 +00:00
|
|
|
Label* failure);
|
2010-01-21 12:10:56 +00:00
|
|
|
|
|
|
|
// Checks if both objects are sequential ASCII strings and jumps to label
|
|
|
|
// if either is not.
|
|
|
|
void JumpIfNotBothSequentialAsciiStrings(Register first,
|
|
|
|
Register second,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* not_flat_ascii_strings);
|
|
|
|
|
2010-03-08 11:58:33 +00:00
|
|
|
// Checks if both instance types are sequential ASCII strings and jumps to
|
|
|
|
// label if either is not.
|
|
|
|
void JumpIfBothInstanceTypesAreNotSequentialAscii(
|
|
|
|
Register first_object_instance_type,
|
|
|
|
Register second_object_instance_type,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* failure);
|
|
|
|
|
|
|
|
// Check if instance type is sequential ASCII string and jump to label if
|
|
|
|
// it is not.
|
|
|
|
void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
|
|
|
|
Register scratch,
|
|
|
|
Label* failure);
|
|
|
|
|
|
|
|
|
2011-01-19 14:53:38 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Patching helpers.
|
|
|
|
|
|
|
|
// Get the location of a relocated constant (its address in the constant pool)
|
|
|
|
// from its load site.
|
|
|
|
void GetRelocatedValueLocation(Register ldr_location,
|
|
|
|
Register result);
|
|
|
|
|
|
|
|
|
2011-05-16 14:10:56 +00:00
|
|
|
void ClampUint8(Register output_reg, Register input_reg);
|
|
|
|
|
|
|
|
void ClampDoubleToUint8(Register result_reg,
|
|
|
|
DoubleRegister input_reg,
|
|
|
|
DoubleRegister temp_double_reg);
|
|
|
|
|
|
|
|
|
2011-05-23 15:59:38 +00:00
|
|
|
void LoadInstanceDescriptors(Register map, Register descriptors);
|
|
|
|
|
2011-09-15 11:30:45 +00:00
|
|
|
// Activation support.
|
|
|
|
void EnterFrame(StackFrame::Type type);
|
|
|
|
void LeaveFrame(StackFrame::Type type);
|
|
|
|
|
2012-02-22 12:47:42 +00:00
|
|
|
// Expects object in r0 and returns map with validated enum cache
|
|
|
|
// in r0. Assumes that any other register can be used as a scratch.
|
|
|
|
void CheckEnumCache(Register null_value, Label* call_runtime);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
private:
|
2011-03-18 20:35:07 +00:00
|
|
|
void CallCFunctionHelper(Register function,
|
2011-04-27 14:29:25 +00:00
|
|
|
int num_reg_arguments,
|
|
|
|
int num_double_arguments);
|
2011-03-18 20:35:07 +00:00
|
|
|
|
2010-02-04 21:32:02 +00:00
|
|
|
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
|
2008-09-12 03:29:06 +00:00
|
|
|
|
|
|
|
// Helper functions for generating invokes.
|
|
|
|
void InvokePrologue(const ParameterCount& expected,
|
|
|
|
const ParameterCount& actual,
|
|
|
|
Handle<Code> code_constant,
|
|
|
|
Register code_reg,
|
|
|
|
Label* done,
|
2012-01-17 15:53:58 +00:00
|
|
|
bool* definitely_mismatches,
|
2011-01-11 14:11:03 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-30 13:23:17 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
|
|
|
CallKind call_kind);
|
2008-09-12 03:29:06 +00:00
|
|
|
|
2010-05-04 14:49:50 +00:00
|
|
|
void InitializeNewString(Register string,
|
|
|
|
Register length,
|
|
|
|
Heap::RootListIndex map_index,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2);
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
|
|
|
|
void InNewSpace(Register object,
|
|
|
|
Register scratch,
|
|
|
|
Condition cond, // eq for new space, ne otherwise.
|
|
|
|
Label* branch);
|
|
|
|
|
|
|
|
// Helper for finding the mark bits for an address. Afterwards, the
|
|
|
|
// bitmap register points at the word with the mark bits and the mask
|
|
|
|
// the position of the first bit. Leaves addr_reg unchanged.
|
|
|
|
inline void GetMarkBits(Register addr_reg,
|
|
|
|
Register bitmap_reg,
|
|
|
|
Register mask_reg);
|
|
|
|
|
2011-11-11 13:48:14 +00:00
|
|
|
// Helper for throwing exceptions. Compute a handler address and jump to
|
|
|
|
// it. See the implementation for register usage.
|
|
|
|
void JumpToHandlerEntry();
|
|
|
|
|
2011-02-21 11:29:45 +00:00
|
|
|
// Compute memory operands for safepoint stack slots.
|
|
|
|
static int SafepointRegisterStackIndex(int reg_code);
|
|
|
|
MemOperand SafepointRegisterSlot(Register reg);
|
|
|
|
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
|
|
|
|
|
2010-02-04 21:32:02 +00:00
|
|
|
bool generating_stub_;
|
|
|
|
bool allow_stub_calls_;
|
2011-09-15 11:30:45 +00:00
|
|
|
bool has_frame_;
|
2010-02-04 21:32:02 +00:00
|
|
|
// This handle will be patched with the code object on installation.
|
|
|
|
Handle<Object> code_object_;
|
2011-02-21 11:29:45 +00:00
|
|
|
|
|
|
|
// Needs access to SafepointRegisterStackIndex for optimized frame
|
|
|
|
// traversal.
|
|
|
|
friend class OptimizedFrame;
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-09-14 06:57:24 +00:00
|
|
|
// The code patcher is used to patch (typically) small parts of code e.g. for
|
|
|
|
// debugging and other types of instrumentation. When using the code patcher
|
|
|
|
// the exact number of bytes specified must be emitted. It is not legal to emit
|
|
|
|
// relocation information. If any of these constraints are violated it causes
|
|
|
|
// an assertion to fail.
|
|
|
|
class CodePatcher {
|
|
|
|
public:
|
|
|
|
CodePatcher(byte* address, int instructions);
|
|
|
|
virtual ~CodePatcher();
|
|
|
|
|
|
|
|
// Macro assembler to emit code.
|
|
|
|
MacroAssembler* masm() { return &masm_; }
|
|
|
|
|
|
|
|
// Emit an instruction directly.
|
2011-02-09 14:57:24 +00:00
|
|
|
void Emit(Instr instr);
|
2009-09-14 06:57:24 +00:00
|
|
|
|
|
|
|
// Emit an address directly.
|
|
|
|
void Emit(Address addr);
|
|
|
|
|
2011-02-09 14:57:24 +00:00
|
|
|
// Emit the condition part of an instruction leaving the rest of the current
|
|
|
|
// instruction unchanged.
|
|
|
|
void EmitCondition(Condition cond);
|
|
|
|
|
2009-09-14 06:57:24 +00:00
|
|
|
private:
|
|
|
|
byte* address_; // The address of the code being patched.
|
|
|
|
int instructions_; // Number of instructions of the expected patch size.
|
|
|
|
int size_; // Number of bytes of the expected patch size.
|
|
|
|
MacroAssembler masm_; // Macro assembler used to generate the code.
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Static helper functions.
|
|
|
|
|
2011-11-29 10:56:11 +00:00
|
|
|
inline MemOperand ContextOperand(Register context, int index) {
|
2010-11-10 17:00:20 +00:00
|
|
|
return MemOperand(context, Context::SlotOffset(index));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-29 10:56:11 +00:00
|
|
|
inline MemOperand GlobalObjectOperand() {
|
2010-11-10 17:00:20 +00:00
|
|
|
return ContextOperand(cp, Context::GLOBAL_INDEX);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-04-21 13:42:12 +00:00
|
|
|
#ifdef GENERATED_CODE_COVERAGE
|
|
|
|
#define CODE_COVERAGE_STRINGIFY(x) #x
|
|
|
|
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
|
|
|
|
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
|
|
|
|
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
|
|
|
|
#else
|
|
|
|
#define ACCESS_MASM(masm) masm->
|
|
|
|
#endif
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
} } // namespace v8::internal
|
|
|
|
|
2009-05-04 13:36:43 +00:00
|
|
|
#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
|