Avoid a call to the runtime system when doing binary fp ops on ARM

(at the moment only if we do not need to allocate a heap number).
Find a few more oportunities to avoid heap number allocation on IA32.
Add some infrastructure to test coverage of generated ARM code in our
tests.
Review URL: http://codereview.chromium.org/67163

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1720 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
erik.corry@gmail.com 2009-04-16 09:30:23 +00:00
parent b92740bf44
commit 4d18959446
24 changed files with 529 additions and 130 deletions

View File

@ -582,4 +582,40 @@ ExternalReference ExternalReference::debug_step_in_fp_address() {
return ExternalReference(Debug::step_in_fp_addr());
}
static double add_two_doubles(double x, double y) {
return x + y;
}
static double sub_two_doubles(double x, double y) {
return x - y;
}
static double mul_two_doubles(double x, double y) {
return x * y;
}
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation) {
typedef double BinaryFPOperation(double x, double y);
BinaryFPOperation* function = NULL;
switch (operation) {
case Token::ADD:
function = &add_two_doubles;
break;
case Token::SUB:
function = &sub_two_doubles;
break;
case Token::MUL:
function = &mul_two_doubles;
break;
default:
UNREACHABLE();
}
return ExternalReference(FUNCTION_ADDR(function));
}
} } // namespace v8::internal

View File

@ -38,6 +38,7 @@
#include "runtime.h"
#include "top.h"
#include "zone-inl.h"
#include "token.h"
namespace v8 { namespace internal {
@ -417,6 +418,8 @@ class ExternalReference BASE_EMBEDDED {
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address();
static ExternalReference double_fp_operation(Token::Value operation);
Address address() const {return address_;}
private:

View File

@ -34,7 +34,7 @@
namespace v8 { namespace internal {
#define __ masm->
#define __ DEFINE_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
@ -218,8 +218,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
__ mov(r7, Operand(r4));
if (kR9Available == 1)
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));

View File

@ -38,7 +38,8 @@
namespace v8 { namespace internal {
#define __ masm_->
#define __ DEFINE_MASM(masm_)
// -------------------------------------------------------------------------
// CodeGenState implementation.
@ -677,13 +678,25 @@ class SetPropertyStub : public CodeStub {
class GenericBinaryOpStub : public CodeStub {
public:
explicit GenericBinaryOpStub(Token::Value op) : op_(op) { }
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode)
: op_(op), mode_(mode) { }
private:
Token::Value op_;
OverwriteMode mode_;
// Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 14> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() { return static_cast<int>(op_); }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_);
}
void Generate(MacroAssembler* masm);
const char* GetName() {
@ -708,7 +721,8 @@ class GenericBinaryOpStub : public CodeStub {
};
void CodeGenerator::GenericBinaryOperation(Token::Value op) {
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) {
VirtualFrame::SpilledScope spilled_scope(this);
// sp[0] : y
// sp[1] : x
@ -727,7 +741,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op) {
case Token::SAR: {
frame_->EmitPop(r0); // r0 : y
frame_->EmitPop(r1); // r1 : x
GenericBinaryOpStub stub(op);
GenericBinaryOpStub stub(op, overwrite_mode);
frame_->CallStub(&stub, 0);
break;
}
@ -767,11 +781,13 @@ class DeferredInlineSmiOperation: public DeferredCode {
DeferredInlineSmiOperation(CodeGenerator* generator,
Token::Value op,
int value,
bool reversed)
bool reversed,
OverwriteMode overwrite_mode)
: DeferredCode(generator),
op_(op),
value_(value),
reversed_(reversed) {
reversed_(reversed),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlinedSmiOperation");
}
@ -781,6 +797,7 @@ class DeferredInlineSmiOperation: public DeferredCode {
Token::Value op_;
int value_;
bool reversed_;
OverwriteMode overwrite_mode_;
};
@ -844,7 +861,7 @@ void DeferredInlineSmiOperation::Generate() {
break;
}
GenericBinaryOpStub igostub(op_);
GenericBinaryOpStub igostub(op_, overwrite_mode_);
Result arg0 = generator()->allocator()->Allocate(r1);
ASSERT(arg0.is_valid());
Result arg1 = generator()->allocator()->Allocate(r0);
@ -856,7 +873,8 @@ void DeferredInlineSmiOperation::Generate() {
void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed) {
bool reversed,
OverwriteMode mode) {
VirtualFrame::SpilledScope spilled_scope(this);
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
@ -875,7 +893,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed);
new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
__ add(r0, r0, Operand(value), SetCC);
deferred->enter()->Branch(vs);
@ -887,7 +905,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SUB: {
DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed);
new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
if (!reversed) {
__ sub(r0, r0, Operand(value), SetCC);
@ -905,7 +923,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::BIT_XOR:
case Token::BIT_AND: {
DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed);
new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
__ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne);
switch (op) {
@ -925,12 +943,12 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ mov(ip, Operand(value));
frame_->EmitPush(ip);
frame_->EmitPush(r0);
GenericBinaryOperation(op);
GenericBinaryOperation(op, mode);
} else {
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, shift_value, false);
new DeferredInlineSmiOperation(this, op, shift_value, false, mode);
__ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
@ -982,7 +1000,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
frame_->EmitPush(ip);
frame_->EmitPush(r0);
}
GenericBinaryOperation(op);
GenericBinaryOperation(op, mode);
break;
}
@ -1487,8 +1505,8 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
// Test for a Smi value in a HeapNumber.
__ tst(r0, Operand(kSmiTagMask));
is_smi.Branch(eq);
__ ldr(r1, MemOperand(r0, HeapObject::kMapOffset - kHeapObjectTag));
__ ldrb(r1, MemOperand(r1, Map::kInstanceTypeOffset - kHeapObjectTag));
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(HEAP_NUMBER_TYPE));
default_target->Branch(ne);
frame_->EmitPush(r0);
@ -2523,7 +2541,9 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
if (s->is_eval_scope()) {
Label next, fast;
if (!context.is(tmp)) __ mov(tmp, Operand(context));
if (!context.is(tmp)) {
__ mov(tmp, Operand(context));
}
__ bind(&next);
// Terminate at global context.
__ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
@ -2934,15 +2954,24 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
LoadAndSpill(node->value());
} else {
// +=, *= and similar binary assignments.
// Get the old value of the lhs.
target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(), literal->handle(), false);
SmiOperation(node->binary_op(),
literal->handle(),
false,
overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
frame_->EmitPush(r0);
} else {
LoadAndSpill(node->value());
GenericBinaryOperation(node->binary_op());
GenericBinaryOperation(node->binary_op(),
overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
frame_->EmitPush(r0);
}
}
@ -3822,19 +3851,39 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// is a literal small integer.
Literal* lliteral = node->left()->AsLiteral();
Literal* rliteral = node->right()->AsLiteral();
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
bool overwrite_left =
(node->left()->AsBinaryOperation() != NULL &&
node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
bool overwrite_right =
(node->right()->AsBinaryOperation() != NULL &&
node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
LoadAndSpill(node->left());
SmiOperation(node->op(), rliteral->handle(), false);
SmiOperation(node->op(),
rliteral->handle(),
false,
overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
LoadAndSpill(node->right());
SmiOperation(node->op(), lliteral->handle(), true);
SmiOperation(node->op(),
lliteral->handle(),
true,
overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
} else {
OverwriteMode overwrite_mode = NO_OVERWRITE;
if (overwrite_left) {
overwrite_mode = OVERWRITE_LEFT;
} else if (overwrite_right) {
overwrite_mode = OVERWRITE_RIGHT;
}
LoadAndSpill(node->left());
LoadAndSpill(node->right());
GenericBinaryOperation(node->op());
GenericBinaryOperation(node->op(), overwrite_mode);
}
frame_->EmitPush(r0);
}
@ -4067,7 +4116,8 @@ bool CodeGenerator::HasValidEntryRegisters() { return true; }
#undef __
#define __ masm->
#define __ DEFINE_MASM(masm)
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
@ -4469,94 +4519,157 @@ void SetPropertyStub::Generate(MacroAssembler* masm) {
}
static void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
const Builtins::JavaScript& builtin,
Token::Value operation,
int swi_number,
OverwriteMode mode) {
Label slow;
if (mode == NO_OVERWRITE) {
__ bind(not_smi);
}
__ bind(&slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // Set number of arguments.
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call.
// Could it be a double-double op? If we already have a place to put
// the answer then we can do the op and skip the builtin and runtime call.
if (mode != NO_OVERWRITE) {
__ bind(not_smi);
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &slow); // We can't handle a Smi-double combination yet.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &slow); // We can't handle a Smi-double combination yet.
// Get map of r0 into r2.
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
// Get type of r0 into r3.
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(HEAP_NUMBER_TYPE));
__ b(ne, &slow);
// Get type of r1 into r3.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
// Check they are both the same map (heap number map).
__ cmp(r2, r3);
__ b(ne, &slow);
// Both are doubles.
// Calling convention says that second double is in r2 and r3.
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ push(lr);
if (mode == OVERWRITE_LEFT) {
__ push(r1);
} else {
__ push(r0);
}
// Calling convention says that first double is in r0 and r1.
__ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
// Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
#if !defined(__arm__)
// Notify the simulator that we are calling an add routine in C.
__ swi(swi_number);
#else
// Actually call the add routine written in C.
__ blx(r5);
#endif
// Store answer in the overwritable heap number.
__ pop(r4);
__ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
__ mov(r0, Operand(r4));
// And we are done.
__ pop(pc);
}
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
// result : r0
// All ops need to know whether we are dealing with two Smis. Set up r2 to
// tell us that.
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
switch (op_) {
case Token::ADD: {
Label slow, exit;
// fast path
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
__ add(r0, r1, Operand(r0), SetCC); // add y optimistically
// go slow-path in case of overflow
__ b(vs, &slow);
// go slow-path in case of non-smi operands
ASSERT(kSmiTag == 0); // adjust code below
Label not_smi;
// Fast path.
ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &exit);
// slow path
__ bind(&slow);
__ sub(r0, r0, Operand(r1)); // revert optimistic add
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // set number of arguments
__ InvokeBuiltin(Builtins::ADD, JUMP_JS);
// done
__ bind(&exit);
__ b(ne, &not_smi);
__ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
// Return if no overflow.
__ Ret(vc);
__ sub(r0, r0, Operand(r1)); // Revert optimistic add.
HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::ADD,
Token::ADD,
assembler::arm::simulator_fp_add,
mode_);
break;
}
case Token::SUB: {
Label slow, exit;
// fast path
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
__ sub(r3, r1, Operand(r0), SetCC); // subtract y optimistically
// go slow-path in case of overflow
__ b(vs, &slow);
// go slow-path in case of non-smi operands
ASSERT(kSmiTag == 0); // adjust code below
Label not_smi;
// Fast path.
ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(r2, Operand(kSmiTagMask));
__ mov(r0, Operand(r3), LeaveCC, eq); // conditionally set r0 to result
__ b(eq, &exit);
// slow path
__ bind(&slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // set number of arguments
__ InvokeBuiltin(Builtins::SUB, JUMP_JS);
// done
__ bind(&exit);
__ b(ne, &not_smi);
__ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
// Return if no overflow.
__ Ret(vc);
__ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::SUB,
Token::SUB,
assembler::arm::simulator_fp_sub,
mode_);
break;
}
case Token::MUL: {
Label slow, exit;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
Label not_smi, slow;
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow);
// remove tag from one operand (but keep sign), so that result is smi
__ b(ne, &not_smi);
// Remove tag from one operand (but keep sign), so that result is Smi.
__ mov(ip, Operand(r0, ASR, kSmiTagSize));
// do multiplication
__ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1
// go slow on overflows (overflow bit is not set)
// Do multiplication
__ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
// Go slow on overflows (overflow bit is not set).
__ mov(ip, Operand(r3, ASR, 31));
__ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
__ b(ne, &slow);
// go slow on zero result to handle -0
// Go slow on zero result to handle -0.
__ tst(r3, Operand(r3));
__ mov(r0, Operand(r3), LeaveCC, ne);
__ b(ne, &exit);
// slow case
__ Ret(ne);
// Slow case.
__ bind(&slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // set number of arguments
__ InvokeBuiltin(Builtins::MUL, JUMP_JS);
// done
__ bind(&exit);
HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::MUL,
Token::MUL,
assembler::arm::simulator_fp_mul,
mode_);
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR: {
Label slow, exit;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
Label slow;
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow);
@ -4566,7 +4679,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
default: UNREACHABLE();
}
__ b(&exit);
__ Ret();
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
@ -4584,16 +4697,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default:
UNREACHABLE();
}
__ bind(&exit);
break;
}
case Token::SHL:
case Token::SHR:
case Token::SAR: {
Label slow, exit;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
Label slow;
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow);
@ -4633,7 +4743,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// tag result and store it in r0
ASSERT(kSmiTag == 0); // adjust code below
__ mov(r0, Operand(r3, LSL, kSmiTagSize));
__ b(&exit);
__ Ret();
// slow case
__ bind(&slow);
__ push(r1); // restore stack
@ -4645,13 +4755,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break;
default: UNREACHABLE();
}
__ bind(&exit);
break;
}
default: UNREACHABLE();
}
__ Ret();
// This code should be unreachable.
__ stop("Unreachable");
}
@ -4721,7 +4831,9 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ mov(cp, Operand(0), LeaveCC, eq);
// Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc));
if (kDebug && FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
__ pop(pc);
}
@ -4784,7 +4896,9 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
__ mov(cp, Operand(0), LeaveCC, eq);
// Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc));
if (kDebug && FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
__ pop(pc);
}
@ -5043,9 +5157,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
__ ldr(ip, MemOperand(ip)); // deref address
// Branch and link to JSEntryTrampoline
// Branch and link to JSEntryTrampoline. We don't use the double underscore
// macro for the add instruction because we don't want the coverage tool
// inserting instructions here after we read the pc.
__ mov(lr, Operand(pc));
__ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
// Unlink this frame from the handler chain. When reading the
// address of the next handler, there is no need to use the address
@ -5057,6 +5173,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// No need to restore registers
__ add(sp, sp, Operand(StackHandlerConstants::kSize));
__ bind(&exit); // r0 holds result
// Restore the top frame descriptors from the stack.
__ pop(r3);
@ -5068,7 +5185,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore callee-saved registers and return.
#ifdef DEBUG
if (FLAG_debug_code) __ mov(lr, Operand(pc));
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}

View File

@ -35,9 +35,6 @@ class DeferredCode;
class RegisterAllocator;
class RegisterFile;
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@ -292,10 +289,13 @@ class CodeGenerator: public AstVisitor {
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
void GenericBinaryOperation(Token::Value op);
void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode);
void Comparison(Condition cc, bool strict = false);
void SmiOperation(Token::Value op, Handle<Object> value, bool reversed);
void SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);

View File

@ -3933,6 +3933,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
} else {
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
// There are two cases where the target is not read in the right hand
// side, that are easy to test for: the right hand side is a literal,
@ -3945,7 +3948,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.GetValue(NOT_INSIDE_TYPEOF);
}
Load(node->value());
GenericBinaryOperation(node->binary_op(), node->type());
GenericBinaryOperation(node->binary_op(),
node->type(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
}
if (var != NULL &&

View File

@ -35,9 +35,6 @@ class DeferredCode;
class RegisterAllocator;
class RegisterFile;
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@ -435,7 +432,7 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(
Token::Value op,
SmiAnalysis* type,
const OverwriteMode overwrite_mode = NO_OVERWRITE);
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.

View File

@ -71,6 +71,11 @@
// CodeForStatementPosition
// CodeForSourcePosition
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
#ifdef ARM
#include "codegen-arm.h"
#else

View File

@ -106,7 +106,12 @@ enum SoftwareInterruptCodes {
call_rt_r5 = 0x10,
call_rt_r2 = 0x11,
// break point
break_point = 0x20
break_point = 0x20,
// FP operations. These simulate calling into C for a moment to do fp ops.
// They should trash all caller-save registers.
simulator_fp_add = 0x21,
simulator_fp_sub = 0x22,
simulator_fp_mul = 0x23
};

View File

@ -58,7 +58,7 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
}
#define __ masm->
#define __ DEFINE_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,

View File

@ -261,6 +261,15 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) {
case break_point:
Print("break_point");
return;
case simulator_fp_add:
Print("simulator_fp_add");
return;
case simulator_fp_mul:
Print("simulator_fp_mul");
return;
case simulator_fp_sub:
Print("simulator_fp_sub");
return;
default:
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",

View File

@ -514,6 +514,16 @@ inline Dest bit_cast(const Source& source) {
}
#ifdef ARM_GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define DEFINE_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define DEFINE_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_GLOBALS_H_

View File

@ -39,7 +39,7 @@ namespace v8 { namespace internal {
// Static IC stub generators.
//
#define __ masm->
#define __ DEFINE_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal.
@ -96,7 +96,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Compute the masked index: (hash + i + i * i) & mask.
__ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
__ mov(t1, Operand(t1, LSR, String::kHashShift));
if (i > 0) __ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
if (i > 0) {
__ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
}
__ and_(t1, t1, Operand(r3));
// Scale the index by multiplying by the element size.

View File

@ -168,11 +168,11 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
void MacroAssembler::Ret() {
void MacroAssembler::Ret(Condition cond) {
#if USE_BX
bx(lr);
bx(lr, cond);
#else
mov(pc, Operand(lr));
mov(pc, Operand(lr), LeaveCC, cond);
#endif
}

View File

@ -86,7 +86,7 @@ class MacroAssembler: public Assembler {
void Call(Register target, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret();
void Ret(Condition cond = al);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);

View File

@ -3482,6 +3482,7 @@ static Object* Runtime_NumberToSmi(Arguments args) {
return Heap::nan_value();
}
static Object* Runtime_NumberAdd(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);

View File

@ -683,6 +683,18 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
10,
"Debug::step_in_fp_addr()");
Add(ExternalReference::double_fp_operation(Token::ADD).address(),
UNCLASSIFIED,
11,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB).address(),
UNCLASSIFIED,
12,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL).address(),
UNCLASSIFIED,
13,
"mul_two_doubles");
}

View File

@ -90,12 +90,44 @@ Debugger::~Debugger() {
}
#ifdef ARM_GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
static void InitializeCoverage() {
char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
if (file_name != NULL) {
coverage_log = fopen(file_name, "aw+");
}
}
void Debugger::Stop(Instr* instr) {
char* str = reinterpret_cast<char*>(instr->InstructionBits() & 0x0fffffff);
if (strlen(str) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "Simulator hit %s\n", str);
fflush(coverage_log);
}
instr->SetInstructionBits(0xe1a00000); // Overwrite with nop.
}
sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
}
#else // ndef ARM_GENERATED_CODE_COVERAGE
static void InitializeCoverage() {
}
void Debugger::Stop(Instr* instr) {
const char* str = (const char*)(instr->InstructionBits() & 0x0fffffff);
PrintF("Simulator hit %s\n", str);
sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
Debug();
}
#endif
static const char* reg_names[] = { "r0", "r1", "r2", "r3",
@ -375,6 +407,7 @@ Simulator::Simulator() {
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
InitializeCoverage();
}
@ -427,6 +460,37 @@ int32_t Simulator::get_pc() const {
}
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void Simulator::GetFpArgs(double* x, double* y) {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[2 * sizeof(registers_[0])];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(buffer));
memcpy(x, buffer, sizeof(buffer));
// Registers 2 and 3 -> y.
memcpy(buffer, registers_ + 2, sizeof(buffer));
memcpy(y, buffer, sizeof(buffer));
}
void Simulator::SetFpResult(const double& result) {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
// result -> registers 0 and 1.
memcpy(registers_, buffer, sizeof(buffer));
}
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
registers_[2] = 0x50Bad4U;
registers_[3] = 0x50Bad4U;
registers_[12] = 0x50Bad4U;
}
// The ARM cannot do unaligned reads and writes. On some ARM platforms an
// interrupt is caused. On others it does a funky rotation thing. For now we
// simply disallow unaligned reads, but at some point we may want to move to
@ -862,7 +926,8 @@ typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
void Simulator::SoftwareInterrupt(Instr* instr) {
switch (instr->SwiField()) {
int swi = instr->SwiField();
switch (swi) {
case call_rt_r5: {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(get_register(r5));
@ -894,6 +959,30 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
dbg.Debug();
break;
}
{
double x, y, z;
case simulator_fp_add:
GetFpArgs(&x, &y);
z = x + y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
case simulator_fp_sub:
GetFpArgs(&x, &y);
z = x - y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
case simulator_fp_mul:
GetFpArgs(&x, &y);
z = x * y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
}
default: {
UNREACHABLE();
break;

View File

@ -174,6 +174,12 @@ class Simulator {
// Executes one instruction.
void InstructionDecode(Instr* instr);
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void GetFpArgs(double* x, double* y);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
// architecture state
int32_t registers_[16];
bool n_flag_;

View File

@ -33,7 +33,7 @@
namespace v8 { namespace internal {
#define __ masm->
#define __ DEFINE_MASM(masm)
static void ProbeTable(MacroAssembler* masm,
@ -183,7 +183,7 @@ void StubCompiler::GenerateLoadField(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
GenerateFastPropertyLoad(masm, r0, reg, holder, index);
__ Ret();
}
@ -203,7 +203,7 @@ void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Return the constant value.
__ mov(r0, Operand(Handle<Object>(value)));
@ -226,7 +226,7 @@ void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
@ -256,7 +256,7 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
@ -456,8 +456,7 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#undef __
#define __ masm()->
#define __ DEFINE_MASM(masm())
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
@ -511,7 +510,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register.
Register reg =
__ CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss);
masm()->CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
// Check that the function really is a function.

View File

@ -36,7 +36,8 @@ namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
#define __ masm_->
#define __ DEFINE_MASM(masm_)
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in

View File

@ -30,9 +30,9 @@ const SMI_MIN = -(1 << 30);
function testmulneg(a, b) {
var base = a * b;
assertEquals(-base, a * -b);
assertEquals(-base, -a * b);
assertEquals(base, -a * -b);
assertEquals(-base, a * -b, "a * -b where a = " + a + ", b = " + b);
assertEquals(-base, -a * b, "-a * b where a = " + a + ", b = " + b);
assertEquals(base, -a * -b, "*-a * -b where a = " + a + ", b = " + b);
}
testmulneg(2, 3);

View File

@ -33,10 +33,14 @@ function testLimits() {
var addAboveMax = Number.MAX_VALUE + 1/eps;
var mulBelowMin = Number.MIN_VALUE * (1 - eps);
var addBelowMin = Number.MIN_VALUE - eps;
assertTrue(mulAboveMax == Number.MAX_VALUE || mulAboveMax == Infinity);
assertTrue(addAboveMax == Number.MAX_VALUE || addAboveMax == Infinity);
assertTrue(mulBelowMin == Number.MIN_VALUE || mulBelowMin <= 0);
assertTrue(addBelowMin == Number.MIN_VALUE || addBelowMin <= 0);
assertTrue(mulAboveMax == Number.MAX_VALUE ||
mulAboveMax == Infinity, "mul" + i);
assertTrue(addAboveMax == Number.MAX_VALUE ||
addAboveMax == Infinity, "add" + i);
assertTrue(mulBelowMin == Number.MIN_VALUE ||
mulBelowMin <= 0, "mul2" + i);
assertTrue(addBelowMin == Number.MIN_VALUE ||
addBelowMin <= 0, "add2" + i);
}
}

View File

@ -100,3 +100,98 @@ assertEquals(SMI_MIN - ONE_HUNDRED, Sub100(SMI_MIN)); // overflow
assertEquals(ONE_HUNDRED - SMI_MIN, Sub100Reversed(SMI_MIN)); // overflow
assertEquals(42 - ONE_HUNDRED, Sub100(OBJ_42)); // non-smi
assertEquals(ONE_HUNDRED - 42, Sub100Reversed(OBJ_42)); // non-smi
function Shr1(x) {
return x >>> 1;
}
function Shr100(x) {
return x >>> 100;
}
function Shr1Reversed(x) {
return 1 >>> x;
}
function Shr100Reversed(x) {
return 100 >>> x;
}
function Sar1(x) {
return x >> 1;
}
function Sar100(x) {
return x >> 100;
}
function Sar1Reversed(x) {
return 1 >> x;
}
function Sar100Reversed(x) {
return 100 >> x;
}
assertEquals(0, Shr1(1));
assertEquals(0, Sar1(1));
assertEquals(0, Shr1Reversed(2));
assertEquals(0, Sar1Reversed(2));
assertEquals(1610612736, Shr1(SMI_MIN));
assertEquals(-536870912, Sar1(SMI_MIN));
assertEquals(1, Shr1Reversed(SMI_MIN));
assertEquals(1, Sar1Reversed(SMI_MIN));
assertEquals(21, Shr1(OBJ_42));
assertEquals(21, Sar1(OBJ_42));
assertEquals(0, Shr1Reversed(OBJ_42));
assertEquals(0, Sar1Reversed(OBJ_42));
assertEquals(6, Shr100(100));
assertEquals(6, Sar100(100));
assertEquals(12, Shr100Reversed(99));
assertEquals(12, Sar100Reversed(99));
assertEquals(201326592, Shr100(SMI_MIN));
assertEquals(-67108864, Sar100(SMI_MIN));
assertEquals(100, Shr100Reversed(SMI_MIN));
assertEquals(100, Sar100Reversed(SMI_MIN));
assertEquals(2, Shr100(OBJ_42));
assertEquals(2, Sar100(OBJ_42));
assertEquals(0, Shr100Reversed(OBJ_42));
assertEquals(0, Sar100Reversed(OBJ_42));
function Xor1(x) {
return x ^ 1;
}
function Xor100(x) {
return x ^ 100;
}
function Xor1Reversed(x) {
return 1 ^ x;
}
function Xor100Reversed(x) {
return 100 ^ x;
}
assertEquals(0, Xor1(1));
assertEquals(3, Xor1Reversed(2));
assertEquals(SMI_MIN + 1, Xor1(SMI_MIN));
assertEquals(SMI_MIN + 1, Xor1Reversed(SMI_MIN));
assertEquals(43, Xor1(OBJ_42));
assertEquals(43, Xor1Reversed(OBJ_42));
assertEquals(0, Xor100(100));
assertEquals(7, Xor100Reversed(99));
assertEquals(-1073741724, Xor100(SMI_MIN));
assertEquals(-1073741724, Xor100Reversed(SMI_MIN));
assertEquals(78, Xor100(OBJ_42));
assertEquals(78, Xor100Reversed(OBJ_42));
var x = 0x23; var y = 0x35;
assertEquals(0x16, x ^ y);