2012-01-09 16:37:47 +00:00
|
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
|
// met:
|
|
|
|
|
//
|
|
|
|
|
// * Redistributions of source code must retain the above copyright
|
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
|
// * Redistributions in binary form must reproduce the above
|
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
|
// with the distribution.
|
|
|
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
|
//
|
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
|
#include "v8.h"
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
#include "arm/lithium-codegen-arm.h"
|
2011-02-17 15:25:38 +00:00
|
|
|
|
#include "arm/lithium-gap-resolver-arm.h"
|
2010-12-07 11:31:57 +00:00
|
|
|
|
#include "code-stubs.h"
|
|
|
|
|
#include "stub-cache.h"
|
2013-09-09 16:34:40 +00:00
|
|
|
|
#include "hydrogen-osr.h"
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
|
namespace internal {
|
|
|
|
|
|
|
|
|
|
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class SafepointGenerator V8_FINAL : public CallWrapper {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
public:
|
|
|
|
|
SafepointGenerator(LCodeGen* codegen,
|
|
|
|
|
LPointerMap* pointers,
|
2011-11-16 08:44:30 +00:00
|
|
|
|
Safepoint::DeoptMode mode)
|
2010-12-07 11:31:57 +00:00
|
|
|
|
: codegen_(codegen),
|
|
|
|
|
pointers_(pointers),
|
2011-11-16 08:44:30 +00:00
|
|
|
|
deopt_mode_(mode) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual ~SafepointGenerator() {}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
|
2011-03-10 13:58:20 +00:00
|
|
|
|
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void AfterCall() const V8_OVERRIDE {
|
2011-11-16 08:44:30 +00:00
|
|
|
|
codegen_->RecordSafepoint(pointers_, deopt_mode_);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
LCodeGen* codegen_;
|
|
|
|
|
LPointerMap* pointers_;
|
2011-11-16 08:44:30 +00:00
|
|
|
|
Safepoint::DeoptMode deopt_mode_;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define __ masm()->
|
|
|
|
|
|
|
|
|
|
bool LCodeGen::GenerateCode() {
|
2013-06-25 12:22:26 +00:00
|
|
|
|
LPhase phase("Z_Code generation", chunk());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(is_unused());
|
|
|
|
|
status_ = GENERATING;
|
2011-09-15 11:30:45 +00:00
|
|
|
|
|
|
|
|
|
// Open a frame scope to indicate that there is a frame on the stack. The
|
|
|
|
|
// NONE indicates that the scope shouldn't actually generate code to set up
|
2011-09-16 13:06:51 +00:00
|
|
|
|
// the frame (that is done in GeneratePrologue).
|
2011-09-15 11:30:45 +00:00
|
|
|
|
FrameScope frame_scope(masm_, StackFrame::NONE);
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
return GeneratePrologue() &&
|
|
|
|
|
GenerateBody() &&
|
|
|
|
|
GenerateDeferredCode() &&
|
2011-05-23 12:48:17 +00:00
|
|
|
|
GenerateDeoptJumpTable() &&
|
2010-12-07 11:31:57 +00:00
|
|
|
|
GenerateSafepointTable();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::FinishCode(Handle<Code> code) {
|
|
|
|
|
ASSERT(is_done());
|
2011-04-15 07:58:22 +00:00
|
|
|
|
code->set_stack_slots(GetStackSlotCount());
|
2011-02-03 10:07:22 +00:00
|
|
|
|
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
|
2013-02-14 13:48:20 +00:00
|
|
|
|
if (FLAG_weak_embedded_maps_in_optimized_code) {
|
|
|
|
|
RegisterDependentCodeForEmbeddedMaps(code);
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
PopulateDeoptimizationData(code);
|
2013-06-26 16:17:12 +00:00
|
|
|
|
info()->CommitDependencies(code);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-02 09:53:11 +00:00
|
|
|
|
void LCodeGen::Abort(BailoutReason reason) {
|
2012-08-28 07:18:06 +00:00
|
|
|
|
info()->set_bailout_reason(reason);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
status_ = ABORTED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool LCodeGen::GeneratePrologue() {
|
|
|
|
|
ASSERT(is_generating());
|
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (info()->IsOptimizing()) {
|
|
|
|
|
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
|
2012-07-17 15:18:15 +00:00
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
#ifdef DEBUG
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (strlen(FLAG_stop_at) > 0 &&
|
2013-01-09 10:30:54 +00:00
|
|
|
|
info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
__ stop("stop_at");
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
#endif
|
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
// r1: Callee's JS function.
|
|
|
|
|
// cp: Callee's context.
|
|
|
|
|
// fp: Caller's frame pointer.
|
|
|
|
|
// lr: Caller's pc.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
// Strict mode functions and builtins need to replace the receiver
|
|
|
|
|
// with undefined when called as functions (without an explicit
|
|
|
|
|
// receiver object). r5 is zero for method calls and non-zero for
|
|
|
|
|
// function calls.
|
|
|
|
|
if (!info_->is_classic_mode() || info_->is_native()) {
|
|
|
|
|
Label ok;
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(r5, Operand::Zero());
|
2012-12-18 16:25:45 +00:00
|
|
|
|
__ b(eq, &ok);
|
|
|
|
|
int receiver_offset = scope()->num_parameters() * kPointerSize;
|
|
|
|
|
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
|
|
|
|
|
__ str(r2, MemOperand(sp, receiver_offset));
|
|
|
|
|
__ bind(&ok);
|
|
|
|
|
}
|
2011-05-24 14:01:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-11-29 07:38:00 +00:00
|
|
|
|
info()->set_prologue_offset(masm_->pc_offset());
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (NeedsEagerFrame()) {
|
2013-02-04 12:01:59 +00:00
|
|
|
|
if (info()->IsStub()) {
|
|
|
|
|
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
|
|
|
|
|
__ Push(Smi::FromInt(StackFrame::STUB));
|
|
|
|
|
// Adjust FP to point to saved FP.
|
|
|
|
|
__ add(fp, sp, Operand(2 * kPointerSize));
|
|
|
|
|
} else {
|
|
|
|
|
PredictableCodeSizeScope predictible_code_size_scope(
|
|
|
|
|
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
|
|
|
|
|
// The following three instructions must remain together and unmodified
|
|
|
|
|
// for code aging to work properly.
|
|
|
|
|
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
|
2013-07-17 08:09:52 +00:00
|
|
|
|
__ nop(ip.code());
|
2013-02-04 12:01:59 +00:00
|
|
|
|
// Adjust FP to point to saved FP.
|
|
|
|
|
__ add(fp, sp, Operand(2 * kPointerSize));
|
|
|
|
|
}
|
2012-12-18 16:25:45 +00:00
|
|
|
|
frame_is_built_ = true;
|
2013-05-29 12:09:03 +00:00
|
|
|
|
info_->AddNoFrameRange(0, masm_->pc_offset());
|
2012-12-03 13:40:37 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
// Reserve space for the stack slots needed by the code.
|
2011-04-15 07:58:22 +00:00
|
|
|
|
int slots = GetStackSlotCount();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (slots > 0) {
|
|
|
|
|
if (FLAG_debug_code) {
|
2013-02-04 12:01:59 +00:00
|
|
|
|
__ sub(sp, sp, Operand(slots * kPointerSize));
|
|
|
|
|
__ push(r0);
|
|
|
|
|
__ push(r1);
|
|
|
|
|
__ add(r0, sp, Operand(slots * kPointerSize));
|
|
|
|
|
__ mov(r1, Operand(kSlotsZapValue));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Label loop;
|
|
|
|
|
__ bind(&loop);
|
2013-02-04 12:01:59 +00:00
|
|
|
|
__ sub(r0, r0, Operand(kPointerSize));
|
|
|
|
|
__ str(r1, MemOperand(r0, 2 * kPointerSize));
|
|
|
|
|
__ cmp(r0, sp);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ b(ne, &loop);
|
2013-02-04 12:01:59 +00:00
|
|
|
|
__ pop(r1);
|
|
|
|
|
__ pop(r0);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
|
|
|
|
__ sub(sp, sp, Operand(slots * kPointerSize));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
|
if (info()->saves_caller_doubles()) {
|
2013-02-04 12:01:59 +00:00
|
|
|
|
Comment(";;; Save clobbered callee double registers");
|
|
|
|
|
int count = 0;
|
|
|
|
|
BitVector* doubles = chunk()->allocated_double_registers();
|
|
|
|
|
BitVector::Iterator save_iterator(doubles);
|
|
|
|
|
while (!save_iterator.Done()) {
|
|
|
|
|
__ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
|
|
|
|
MemOperand(sp, count * kDoubleSize));
|
|
|
|
|
save_iterator.Advance();
|
|
|
|
|
count++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-02-22 16:56:57 +00:00
|
|
|
|
// Possibly allocate a local context.
|
2012-12-18 16:25:45 +00:00
|
|
|
|
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
|
2011-02-22 16:56:57 +00:00
|
|
|
|
if (heap_slots > 0) {
|
|
|
|
|
Comment(";;; Allocate local context");
|
|
|
|
|
// Argument to NewContext is the function, which is in r1.
|
|
|
|
|
__ push(r1);
|
|
|
|
|
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
|
|
|
|
|
FastNewContextStub stub(heap_slots);
|
|
|
|
|
__ CallStub(&stub);
|
|
|
|
|
} else {
|
2011-06-09 11:26:01 +00:00
|
|
|
|
__ CallRuntime(Runtime::kNewFunctionContext, 1);
|
2011-02-22 16:56:57 +00:00
|
|
|
|
}
|
2011-11-16 08:44:30 +00:00
|
|
|
|
RecordSafepoint(Safepoint::kNoLazyDeopt);
|
2011-02-22 16:56:57 +00:00
|
|
|
|
// Context is returned in both r0 and cp. It replaces the context
|
|
|
|
|
// passed to us. It's saved in the stack and kept live in cp.
|
|
|
|
|
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
|
|
|
|
// Copy any necessary parameters into the context.
|
|
|
|
|
int num_parameters = scope()->num_parameters();
|
|
|
|
|
for (int i = 0; i < num_parameters; i++) {
|
2011-09-07 11:02:31 +00:00
|
|
|
|
Variable* var = scope()->parameter(i);
|
|
|
|
|
if (var->IsContextSlot()) {
|
2011-02-22 16:56:57 +00:00
|
|
|
|
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
|
|
|
|
|
(num_parameters - 1 - i) * kPointerSize;
|
|
|
|
|
// Load parameter from stack.
|
|
|
|
|
__ ldr(r0, MemOperand(fp, parameter_offset));
|
|
|
|
|
// Store it in the context.
|
2011-09-19 18:36:47 +00:00
|
|
|
|
MemOperand target = ContextOperand(cp, var->index());
|
|
|
|
|
__ str(r0, target);
|
|
|
|
|
// Update the write barrier. This clobbers r3 and r0.
|
|
|
|
|
__ RecordWriteContextSlot(
|
2013-04-18 15:09:48 +00:00
|
|
|
|
cp,
|
|
|
|
|
target.offset(),
|
|
|
|
|
r0,
|
|
|
|
|
r3,
|
|
|
|
|
GetLinkRegisterState(),
|
2013-04-19 16:01:57 +00:00
|
|
|
|
kSaveFPRegs);
|
2011-02-22 16:56:57 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Comment(";;; End allocate local context");
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
// Trace the call.
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (FLAG_trace && info()->IsOptimizing()) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
// We have not executed any compiled code yet, so cp still holds the
|
|
|
|
|
// incoming context.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ CallRuntime(Runtime::kTraceEnter, 0);
|
|
|
|
|
}
|
|
|
|
|
return !is_aborted();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-09-09 16:34:40 +00:00
|
|
|
|
void LCodeGen::GenerateOsrPrologue() {
|
|
|
|
|
// Generate the OSR entry prologue at the first unknown OSR value, or if there
|
|
|
|
|
// are none, at the OSR entrypoint instruction.
|
|
|
|
|
if (osr_pc_offset_ >= 0) return;
|
|
|
|
|
|
|
|
|
|
osr_pc_offset_ = masm()->pc_offset();
|
|
|
|
|
|
|
|
|
|
// Adjust the frame size, subsuming the unoptimized frame into the
|
|
|
|
|
// optimized frame.
|
|
|
|
|
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
|
|
|
|
|
ASSERT(slots >= 0);
|
|
|
|
|
__ sub(sp, sp, Operand(slots * kPointerSize));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
bool LCodeGen::GenerateDeferredCode() {
|
|
|
|
|
ASSERT(is_generating());
|
2011-07-05 13:21:29 +00:00
|
|
|
|
if (deferred_.length() > 0) {
|
|
|
|
|
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
|
|
|
|
|
LDeferredCode* code = deferred_[i];
|
2013-08-08 02:16:12 +00:00
|
|
|
|
|
|
|
|
|
int pos = instructions_->at(code->instruction_index())->position();
|
|
|
|
|
RecordAndUpdatePosition(pos);
|
|
|
|
|
|
2013-04-22 09:48:35 +00:00
|
|
|
|
Comment(";;; <@%d,#%d> "
|
|
|
|
|
"-------------------- Deferred %s --------------------",
|
|
|
|
|
code->instruction_index(),
|
|
|
|
|
code->instr()->hydrogen_value()->id(),
|
|
|
|
|
code->instr()->Mnemonic());
|
2011-07-05 13:21:29 +00:00
|
|
|
|
__ bind(code->entry());
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (NeedsDeferredFrame()) {
|
2013-04-22 09:48:35 +00:00
|
|
|
|
Comment(";;; Build frame");
|
2012-12-18 16:25:45 +00:00
|
|
|
|
ASSERT(!frame_is_built_);
|
|
|
|
|
ASSERT(info()->IsStub());
|
|
|
|
|
frame_is_built_ = true;
|
|
|
|
|
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
|
|
|
|
|
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
|
|
|
|
|
__ push(scratch0());
|
|
|
|
|
__ add(fp, sp, Operand(2 * kPointerSize));
|
2013-04-22 09:48:35 +00:00
|
|
|
|
Comment(";;; Deferred code");
|
2012-12-18 16:25:45 +00:00
|
|
|
|
}
|
2011-07-05 13:21:29 +00:00
|
|
|
|
code->Generate();
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (NeedsDeferredFrame()) {
|
2013-04-22 09:48:35 +00:00
|
|
|
|
Comment(";;; Destroy frame");
|
2012-12-18 16:25:45 +00:00
|
|
|
|
ASSERT(frame_is_built_);
|
|
|
|
|
__ pop(ip);
|
|
|
|
|
__ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
|
|
|
|
|
frame_is_built_ = false;
|
|
|
|
|
}
|
2011-07-05 13:21:29 +00:00
|
|
|
|
__ jmp(code->exit());
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
2011-05-23 12:48:17 +00:00
|
|
|
|
// Force constant pool emission at the end of the deferred code to make
|
|
|
|
|
// sure that no constant pools are emitted after.
|
2011-01-17 12:45:39 +00:00
|
|
|
|
masm()->CheckConstPool(true, false);
|
|
|
|
|
|
2011-05-23 12:48:17 +00:00
|
|
|
|
return !is_aborted();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool LCodeGen::GenerateDeoptJumpTable() {
|
|
|
|
|
// Check that the jump table is accessible from everywhere in the function
|
2012-01-16 12:38:59 +00:00
|
|
|
|
// code, i.e. that offsets to the table can be encoded in the 24bit signed
|
2011-05-23 12:48:17 +00:00
|
|
|
|
// immediate of a branch instruction.
|
|
|
|
|
// To simplify we consider the code size from the first instruction to the
|
|
|
|
|
// end of the jump table. We also don't consider the pc load delta.
|
|
|
|
|
// Each entry in the jump table generates one instruction and inlines one
|
|
|
|
|
// 32bit data after it.
|
|
|
|
|
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
|
2012-12-18 16:25:45 +00:00
|
|
|
|
deopt_jump_table_.length() * 7)) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kGeneratedCodeIsTooLarge);
|
2011-05-23 12:48:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
2013-04-22 09:48:35 +00:00
|
|
|
|
if (deopt_jump_table_.length() > 0) {
|
|
|
|
|
Comment(";;; -------------------- Jump table --------------------");
|
|
|
|
|
}
|
2011-05-23 12:48:17 +00:00
|
|
|
|
Label table_start;
|
|
|
|
|
__ bind(&table_start);
|
2013-07-12 07:26:00 +00:00
|
|
|
|
Label needs_frame;
|
2011-05-23 12:48:17 +00:00
|
|
|
|
for (int i = 0; i < deopt_jump_table_.length(); i++) {
|
|
|
|
|
__ bind(&deopt_jump_table_[i].label);
|
2012-12-18 16:25:45 +00:00
|
|
|
|
Address entry = deopt_jump_table_[i].address;
|
2013-05-14 11:45:33 +00:00
|
|
|
|
Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
|
2013-03-18 13:57:49 +00:00
|
|
|
|
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
|
2013-01-28 14:50:47 +00:00
|
|
|
|
if (id == Deoptimizer::kNotDeoptimizationEntry) {
|
|
|
|
|
Comment(";;; jump table entry %d.", i);
|
|
|
|
|
} else {
|
|
|
|
|
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
|
|
|
|
}
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (deopt_jump_table_[i].needs_frame) {
|
|
|
|
|
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
|
2013-07-12 07:26:00 +00:00
|
|
|
|
if (needs_frame.is_bound()) {
|
|
|
|
|
__ b(&needs_frame);
|
2012-12-18 16:25:45 +00:00
|
|
|
|
} else {
|
2013-07-12 07:26:00 +00:00
|
|
|
|
__ bind(&needs_frame);
|
|
|
|
|
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
|
|
|
|
|
// This variant of deopt can only be used with stubs. Since we don't
|
|
|
|
|
// have a function pointer to install in the stack frame that we're
|
|
|
|
|
// building, install a special marker there instead.
|
|
|
|
|
ASSERT(info()->IsStub());
|
|
|
|
|
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
|
|
|
|
|
__ push(scratch0());
|
|
|
|
|
__ add(fp, sp, Operand(2 * kPointerSize));
|
2012-12-18 16:25:45 +00:00
|
|
|
|
__ mov(lr, Operand(pc), LeaveCC, al);
|
2013-07-12 07:26:00 +00:00
|
|
|
|
__ mov(pc, ip);
|
2012-12-18 16:25:45 +00:00
|
|
|
|
}
|
2013-07-12 07:26:00 +00:00
|
|
|
|
} else {
|
|
|
|
|
__ mov(lr, Operand(pc), LeaveCC, al);
|
|
|
|
|
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
|
2012-12-18 16:25:45 +00:00
|
|
|
|
}
|
|
|
|
|
masm()->CheckConstPool(false, false);
|
2011-05-23 12:48:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
// Force constant pool emission at the end of the deopt jump table to make
|
|
|
|
|
// sure that no constant pools are emitted after.
|
|
|
|
|
masm()->CheckConstPool(true, false);
|
|
|
|
|
|
2011-05-23 12:48:17 +00:00
|
|
|
|
// The deoptimization jump table is the last part of the instruction
|
|
|
|
|
// sequence. Mark the generated code as done unless we bailed out.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (!is_aborted()) status_ = DONE;
|
|
|
|
|
return !is_aborted();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool LCodeGen::GenerateSafepointTable() {
|
|
|
|
|
ASSERT(is_done());
|
2011-04-15 07:58:22 +00:00
|
|
|
|
safepoints_.Emit(masm(), GetStackSlotCount());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
return !is_aborted();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Register LCodeGen::ToRegister(int index) const {
|
|
|
|
|
return Register::FromAllocationIndex(index);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
|
|
|
|
|
return DwVfpRegister::FromAllocationIndex(index);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Register LCodeGen::ToRegister(LOperand* op) const {
|
|
|
|
|
ASSERT(op->IsRegister());
|
|
|
|
|
return ToRegister(op->index());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
|
|
|
|
|
if (op->IsRegister()) {
|
|
|
|
|
return ToRegister(op->index());
|
|
|
|
|
} else if (op->IsConstantOperand()) {
|
2011-12-06 12:11:08 +00:00
|
|
|
|
LConstantOperand* const_op = LConstantOperand::cast(op);
|
2012-07-11 16:17:02 +00:00
|
|
|
|
HConstant* constant = chunk_->LookupConstant(const_op);
|
2013-09-09 07:57:23 +00:00
|
|
|
|
Handle<Object> literal = constant->handle(isolate());
|
2011-12-06 12:11:08 +00:00
|
|
|
|
Representation r = chunk_->LookupLiteralRepresentation(const_op);
|
|
|
|
|
if (r.IsInteger32()) {
|
|
|
|
|
ASSERT(literal->IsNumber());
|
|
|
|
|
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
|
|
|
|
|
} else if (r.IsDouble()) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
|
2011-12-06 12:11:08 +00:00
|
|
|
|
} else {
|
2013-08-27 13:55:00 +00:00
|
|
|
|
ASSERT(r.IsSmiOrTagged());
|
2013-07-24 12:34:50 +00:00
|
|
|
|
__ LoadObject(scratch, literal);
|
2011-12-06 12:11:08 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
return scratch;
|
|
|
|
|
} else if (op->IsStackSlot() || op->IsArgument()) {
|
|
|
|
|
__ ldr(scratch, ToMemOperand(op));
|
|
|
|
|
return scratch;
|
|
|
|
|
}
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
return scratch;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(op->IsDoubleRegister());
|
|
|
|
|
return ToDoubleRegister(op->index());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
|
|
|
|
|
SwVfpRegister flt_scratch,
|
|
|
|
|
DwVfpRegister dbl_scratch) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (op->IsDoubleRegister()) {
|
|
|
|
|
return ToDoubleRegister(op->index());
|
|
|
|
|
} else if (op->IsConstantOperand()) {
|
|
|
|
|
LConstantOperand* const_op = LConstantOperand::cast(op);
|
2012-07-11 16:17:02 +00:00
|
|
|
|
HConstant* constant = chunk_->LookupConstant(const_op);
|
2013-09-09 07:57:23 +00:00
|
|
|
|
Handle<Object> literal = constant->handle(isolate());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Representation r = chunk_->LookupLiteralRepresentation(const_op);
|
|
|
|
|
if (r.IsInteger32()) {
|
|
|
|
|
ASSERT(literal->IsNumber());
|
|
|
|
|
__ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
|
|
|
|
|
__ vmov(flt_scratch, ip);
|
|
|
|
|
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
|
|
|
|
|
return dbl_scratch;
|
|
|
|
|
} else if (r.IsDouble()) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kUnsupportedDoubleImmediate);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else if (r.IsTagged()) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kUnsupportedTaggedImmediate);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
} else if (op->IsStackSlot() || op->IsArgument()) {
|
|
|
|
|
// TODO(regis): Why is vldr not taking a MemOperand?
|
|
|
|
|
// __ vldr(dbl_scratch, ToMemOperand(op));
|
|
|
|
|
MemOperand mem_op = ToMemOperand(op);
|
|
|
|
|
__ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
|
|
|
|
|
return dbl_scratch;
|
|
|
|
|
}
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
return dbl_scratch;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-12-23 10:39:01 +00:00
|
|
|
|
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
|
2012-07-11 16:17:02 +00:00
|
|
|
|
HConstant* constant = chunk_->LookupConstant(op);
|
2013-05-23 08:32:07 +00:00
|
|
|
|
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
|
2013-09-09 07:57:23 +00:00
|
|
|
|
return constant->handle(isolate());
|
2011-12-23 10:39:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
|
2013-05-27 08:43:58 +00:00
|
|
|
|
return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
|
2011-12-23 10:39:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-05-23 08:32:07 +00:00
|
|
|
|
bool LCodeGen::IsSmi(LConstantOperand* op) const {
|
|
|
|
|
return chunk_->LookupLiteralRepresentation(op).IsSmi();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-07-25 11:53:38 +00:00
|
|
|
|
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
|
|
|
|
|
return ToRepresentation(op, Representation::Integer32());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
|
|
|
|
|
const Representation& r) const {
|
2012-07-11 16:17:02 +00:00
|
|
|
|
HConstant* constant = chunk_->LookupConstant(op);
|
2013-07-25 11:53:38 +00:00
|
|
|
|
int32_t value = constant->Integer32Value();
|
|
|
|
|
if (r.IsInteger32()) return value;
|
|
|
|
|
ASSERT(r.IsSmiOrTagged());
|
|
|
|
|
return reinterpret_cast<int32_t>(Smi::FromInt(value));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-05-28 12:37:29 +00:00
|
|
|
|
Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
|
|
|
|
|
HConstant* constant = chunk_->LookupConstant(op);
|
|
|
|
|
return Smi::FromInt(constant->Integer32Value());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-10-20 10:26:45 +00:00
|
|
|
|
double LCodeGen::ToDouble(LConstantOperand* op) const {
|
2012-07-11 16:17:02 +00:00
|
|
|
|
HConstant* constant = chunk_->LookupConstant(op);
|
|
|
|
|
ASSERT(constant->HasDoubleValue());
|
|
|
|
|
return constant->DoubleValue();
|
2011-10-20 10:26:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Operand LCodeGen::ToOperand(LOperand* op) {
|
|
|
|
|
if (op->IsConstantOperand()) {
|
|
|
|
|
LConstantOperand* const_op = LConstantOperand::cast(op);
|
2012-07-11 16:17:02 +00:00
|
|
|
|
HConstant* constant = chunk()->LookupConstant(const_op);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Representation r = chunk_->LookupLiteralRepresentation(const_op);
|
2013-07-25 11:53:38 +00:00
|
|
|
|
if (r.IsSmi()) {
|
|
|
|
|
ASSERT(constant->HasSmiValue());
|
|
|
|
|
return Operand(Smi::FromInt(constant->Integer32Value()));
|
|
|
|
|
} else if (r.IsInteger32()) {
|
2012-07-11 16:17:02 +00:00
|
|
|
|
ASSERT(constant->HasInteger32Value());
|
|
|
|
|
return Operand(constant->Integer32Value());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else if (r.IsDouble()) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kToOperandUnsupportedDoubleImmediate);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
ASSERT(r.IsTagged());
|
2013-09-09 07:57:23 +00:00
|
|
|
|
return Operand(constant->handle(isolate()));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else if (op->IsRegister()) {
|
|
|
|
|
return Operand(ToRegister(op));
|
|
|
|
|
} else if (op->IsDoubleRegister()) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kToOperandIsDoubleRegisterUnimplemented);
|
2013-01-07 09:43:12 +00:00
|
|
|
|
return Operand::Zero();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
// Stack slots not implemented, use ToMemOperand instead.
|
|
|
|
|
UNREACHABLE();
|
2013-01-07 09:43:12 +00:00
|
|
|
|
return Operand::Zero();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
|
|
|
|
|
ASSERT(!op->IsRegister());
|
|
|
|
|
ASSERT(!op->IsDoubleRegister());
|
|
|
|
|
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
|
2013-02-07 13:15:41 +00:00
|
|
|
|
return MemOperand(fp, StackSlotOffset(op->index()));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-02-17 15:25:38 +00:00
|
|
|
|
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
|
|
|
|
|
ASSERT(op->IsDoubleStackSlot());
|
2013-02-07 13:15:41 +00:00
|
|
|
|
return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
|
2011-02-17 15:25:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-01-11 15:51:08 +00:00
|
|
|
|
void LCodeGen::WriteTranslation(LEnvironment* environment,
|
2013-06-12 14:22:49 +00:00
|
|
|
|
Translation* translation) {
|
2011-01-11 15:51:08 +00:00
|
|
|
|
if (environment == NULL) return;
|
|
|
|
|
|
|
|
|
|
// The translation includes one command per value in the environment.
|
2013-06-12 14:22:49 +00:00
|
|
|
|
int translation_size = environment->translation_size();
|
2011-01-11 15:51:08 +00:00
|
|
|
|
// The output frame height does not include the parameters.
|
|
|
|
|
int height = translation_size - environment->parameter_count();
|
|
|
|
|
|
2013-06-12 14:22:49 +00:00
|
|
|
|
WriteTranslation(environment->outer(), translation);
|
2012-12-18 16:25:45 +00:00
|
|
|
|
bool has_closure_id = !info()->closure().is_null() &&
|
2013-04-23 09:23:07 +00:00
|
|
|
|
!info()->closure().is_identical_to(environment->closure());
|
2012-12-18 16:25:45 +00:00
|
|
|
|
int closure_id = has_closure_id
|
2012-06-14 14:06:22 +00:00
|
|
|
|
? DefineDeoptimizationLiteral(environment->closure())
|
|
|
|
|
: Translation::kSelfLiteralId;
|
|
|
|
|
|
2012-02-28 09:05:55 +00:00
|
|
|
|
switch (environment->frame_type()) {
|
|
|
|
|
case JS_FUNCTION:
|
|
|
|
|
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
|
|
|
|
|
break;
|
|
|
|
|
case JS_CONSTRUCT:
|
|
|
|
|
translation->BeginConstructStubFrame(closure_id, translation_size);
|
|
|
|
|
break;
|
2012-09-07 09:01:54 +00:00
|
|
|
|
case JS_GETTER:
|
|
|
|
|
ASSERT(translation_size == 1);
|
|
|
|
|
ASSERT(height == 0);
|
|
|
|
|
translation->BeginGetterStubFrame(closure_id);
|
|
|
|
|
break;
|
2012-08-10 09:05:42 +00:00
|
|
|
|
case JS_SETTER:
|
2012-08-17 10:43:32 +00:00
|
|
|
|
ASSERT(translation_size == 2);
|
|
|
|
|
ASSERT(height == 0);
|
|
|
|
|
translation->BeginSetterStubFrame(closure_id);
|
2012-08-10 09:05:42 +00:00
|
|
|
|
break;
|
2012-12-18 16:25:45 +00:00
|
|
|
|
case STUB:
|
|
|
|
|
translation->BeginCompiledStubFrame();
|
|
|
|
|
break;
|
2012-02-28 09:05:55 +00:00
|
|
|
|
case ARGUMENTS_ADAPTOR:
|
|
|
|
|
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
|
|
|
|
|
break;
|
2012-01-24 08:43:12 +00:00
|
|
|
|
}
|
2012-09-12 12:28:42 +00:00
|
|
|
|
|
2013-08-07 11:24:14 +00:00
|
|
|
|
int object_index = 0;
|
|
|
|
|
int dematerialized_index = 0;
|
2011-01-11 15:51:08 +00:00
|
|
|
|
for (int i = 0; i < translation_size; ++i) {
|
|
|
|
|
LOperand* value = environment->values()->at(i);
|
2013-08-07 11:24:14 +00:00
|
|
|
|
AddToTranslation(environment,
|
|
|
|
|
translation,
|
2012-08-22 15:44:17 +00:00
|
|
|
|
value,
|
|
|
|
|
environment->HasTaggedValueAt(i),
|
2013-08-07 11:24:14 +00:00
|
|
|
|
environment->HasUint32ValueAt(i),
|
|
|
|
|
&object_index,
|
|
|
|
|
&dematerialized_index);
|
2011-01-11 15:51:08 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-07 11:24:14 +00:00
|
|
|
|
void LCodeGen::AddToTranslation(LEnvironment* environment,
|
|
|
|
|
Translation* translation,
|
2010-12-07 11:31:57 +00:00
|
|
|
|
LOperand* op,
|
2012-08-22 15:44:17 +00:00
|
|
|
|
bool is_tagged,
|
2013-08-07 11:24:14 +00:00
|
|
|
|
bool is_uint32,
|
|
|
|
|
int* object_index_pointer,
|
|
|
|
|
int* dematerialized_index_pointer) {
|
|
|
|
|
if (op == LEnvironment::materialization_marker()) {
|
|
|
|
|
int object_index = (*object_index_pointer)++;
|
|
|
|
|
if (environment->ObjectIsDuplicateAt(object_index)) {
|
|
|
|
|
int dupe_of = environment->ObjectDuplicateOfAt(object_index);
|
|
|
|
|
translation->DuplicateObject(dupe_of);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
int object_length = environment->ObjectLengthAt(object_index);
|
|
|
|
|
if (environment->ObjectIsArgumentsAt(object_index)) {
|
|
|
|
|
translation->BeginArgumentsObject(object_length);
|
|
|
|
|
} else {
|
|
|
|
|
translation->BeginCapturedObject(object_length);
|
|
|
|
|
}
|
|
|
|
|
int dematerialized_index = *dematerialized_index_pointer;
|
|
|
|
|
int env_offset = environment->translation_size() + dematerialized_index;
|
|
|
|
|
*dematerialized_index_pointer += object_length;
|
|
|
|
|
for (int i = 0; i < object_length; ++i) {
|
|
|
|
|
LOperand* value = environment->values()->at(env_offset + i);
|
|
|
|
|
AddToTranslation(environment,
|
|
|
|
|
translation,
|
|
|
|
|
value,
|
|
|
|
|
environment->HasTaggedValueAt(env_offset + i),
|
|
|
|
|
environment->HasUint32ValueAt(env_offset + i),
|
|
|
|
|
object_index_pointer,
|
|
|
|
|
dematerialized_index_pointer);
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-12 14:22:49 +00:00
|
|
|
|
if (op->IsStackSlot()) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (is_tagged) {
|
|
|
|
|
translation->StoreStackSlot(op->index());
|
2012-08-22 15:44:17 +00:00
|
|
|
|
} else if (is_uint32) {
|
|
|
|
|
translation->StoreUint32StackSlot(op->index());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
|
|
|
|
translation->StoreInt32StackSlot(op->index());
|
|
|
|
|
}
|
|
|
|
|
} else if (op->IsDoubleStackSlot()) {
|
|
|
|
|
translation->StoreDoubleStackSlot(op->index());
|
|
|
|
|
} else if (op->IsArgument()) {
|
|
|
|
|
ASSERT(is_tagged);
|
2011-04-15 07:58:22 +00:00
|
|
|
|
int src_index = GetStackSlotCount() + op->index();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
translation->StoreStackSlot(src_index);
|
|
|
|
|
} else if (op->IsRegister()) {
|
|
|
|
|
Register reg = ToRegister(op);
|
|
|
|
|
if (is_tagged) {
|
|
|
|
|
translation->StoreRegister(reg);
|
2012-08-22 15:44:17 +00:00
|
|
|
|
} else if (is_uint32) {
|
|
|
|
|
translation->StoreUint32Register(reg);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
|
|
|
|
translation->StoreInt32Register(reg);
|
|
|
|
|
}
|
|
|
|
|
} else if (op->IsDoubleRegister()) {
|
|
|
|
|
DoubleRegister reg = ToDoubleRegister(op);
|
|
|
|
|
translation->StoreDoubleRegister(reg);
|
|
|
|
|
} else if (op->IsConstantOperand()) {
|
2012-07-11 16:17:02 +00:00
|
|
|
|
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
|
2013-09-09 07:57:23 +00:00
|
|
|
|
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
translation->StoreLiteral(src_index);
|
|
|
|
|
} else {
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::CallCode(Handle<Code> code,
|
|
|
|
|
RelocInfo::Mode mode,
|
2012-10-18 12:21:42 +00:00
|
|
|
|
LInstruction* instr,
|
|
|
|
|
TargetAddressStorageMode storage_mode) {
|
|
|
|
|
CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
|
2011-04-07 13:32:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::CallCodeGeneric(Handle<Code> code,
|
|
|
|
|
RelocInfo::Mode mode,
|
|
|
|
|
LInstruction* instr,
|
2012-10-18 12:21:42 +00:00
|
|
|
|
SafepointMode safepoint_mode,
|
|
|
|
|
TargetAddressStorageMode storage_mode) {
|
2013-10-02 11:43:41 +00:00
|
|
|
|
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
|
2011-02-02 13:55:29 +00:00
|
|
|
|
ASSERT(instr != NULL);
|
2012-06-12 17:26:28 +00:00
|
|
|
|
// Block literal pool emission to ensure nop indicating no inlined smi code
|
|
|
|
|
// is in the correct position.
|
|
|
|
|
Assembler::BlockConstPoolScope block_const_pool(masm());
|
2011-02-02 13:55:29 +00:00
|
|
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
|
|
|
RecordPosition(pointers->position());
|
2012-10-18 12:21:42 +00:00
|
|
|
|
__ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
|
2011-11-16 08:44:30 +00:00
|
|
|
|
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
|
Avoid patching code after the call to binary operation stub in optimized code
This patch just adds a nop after the call to the binary operation stub in optimized code to avoid the patching for the inlined smi case used in the full code generator to kick in if the next instruction generated by the lithium code generator should accidentially enable that. For calls generated by CallCodeGeneric this was already handled on Intel platforms, but missing on ARM.
On IA-32 I did also try to check for whether the code containing the call was optimized (patch below), but that caused regressions on some benchmarks.
diff --git src/ia32/ic-ia32.cc src/ia32/ic-ia32.cc
index 5f143b1..f70e208 100644
--- src/ia32/ic-ia32.cc
+++ src/ia32/ic-ia32.cc
@@ -1603,12 +1603,18 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address());
+ PatchInlinedSmiCode(address(), isolate());
}
}
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, Isolate* isolate) {
+ // Never patch in optimized code.
+ Code* code = isolate->pc_to_code_cache()->GetCacheEntry(address)->code;
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ return;
+ }
+
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
diff --git src/ic.cc src/ic.cc
index f70f75a..62e79da 100644
--- src/ic.cc
+++ src/ic.cc
@@ -2384,7 +2384,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
// Activate inlined smi code.
if (previous_type == BinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address());
+ PatchInlinedSmiCode(ic.address(), isolate);
}
}
diff --git src/ic.h src/ic.h
index 11c2e3a..9ef4b20 100644
--- src/ic.h
+++ src/ic.h
@@ -721,7 +721,7 @@ class CompareIC: public IC {
};
// Helper for BinaryOpIC and CompareIC.
-void PatchInlinedSmiCode(Address address);
+void PatchInlinedSmiCode(Address address, Isolate* isolate);
} } // namespace v8::internal
R=danno@chromium.org
BUG=none
TEST=none
Review URL: http://codereview.chromium.org//7350015
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8623 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-07-13 09:31:17 +00:00
|
|
|
|
|
|
|
|
|
// Signal that we don't inline smi code before these stubs in the
|
|
|
|
|
// optimizing code generator.
|
|
|
|
|
if (code->kind() == Code::BINARY_OP_IC ||
|
|
|
|
|
code->kind() == Code::COMPARE_IC) {
|
|
|
|
|
__ nop();
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
|
void LCodeGen::CallRuntime(const Runtime::Function* function,
|
2010-12-07 11:31:57 +00:00
|
|
|
|
int num_arguments,
|
2013-10-01 11:56:42 +00:00
|
|
|
|
LInstruction* instr,
|
|
|
|
|
SaveFPRegsMode save_doubles) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(instr != NULL);
|
|
|
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
|
|
|
ASSERT(pointers != NULL);
|
|
|
|
|
RecordPosition(pointers->position());
|
|
|
|
|
|
2013-10-01 11:56:42 +00:00
|
|
|
|
__ CallRuntime(function, num_arguments, save_doubles);
|
|
|
|
|
|
2011-11-16 08:44:30 +00:00
|
|
|
|
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-09-27 13:59:28 +00:00
|
|
|
|
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
|
|
|
|
|
if (context->IsRegister()) {
|
|
|
|
|
__ Move(cp, ToRegister(context));
|
|
|
|
|
} else if (context->IsStackSlot()) {
|
|
|
|
|
__ ldr(cp, ToMemOperand(context));
|
2013-09-30 15:08:20 +00:00
|
|
|
|
} else if (context->IsConstantOperand()) {
|
|
|
|
|
HConstant* constant =
|
|
|
|
|
chunk_->LookupConstant(LConstantOperand::cast(context));
|
|
|
|
|
__ LoadObject(cp, Handle<Object>::cast(constant->handle(isolate())));
|
2013-09-27 13:59:28 +00:00
|
|
|
|
} else {
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
|
|
|
|
int argc,
|
2013-09-27 13:59:28 +00:00
|
|
|
|
LInstruction* instr,
|
|
|
|
|
LOperand* context) {
|
|
|
|
|
LoadContextFromDeferred(context);
|
2011-04-07 13:32:45 +00:00
|
|
|
|
__ CallRuntimeSaveDoubles(id);
|
|
|
|
|
RecordSafepointWithRegisters(
|
2011-11-16 08:44:30 +00:00
|
|
|
|
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
|
2011-04-07 13:32:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-11-16 08:44:30 +00:00
|
|
|
|
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
|
|
|
|
|
Safepoint::DeoptMode mode) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (!environment->HasBeenRegistered()) {
|
|
|
|
|
// Physical stack frame layout:
|
|
|
|
|
// -x ............. -4 0 ..................................... y
|
|
|
|
|
// [incoming arguments] [spill slots] [pushed outgoing arguments]
|
|
|
|
|
|
|
|
|
|
// Layout of the environment:
|
|
|
|
|
// 0 ..................................................... size-1
|
|
|
|
|
// [parameters] [locals] [expression stack including arguments]
|
|
|
|
|
|
|
|
|
|
// Layout of the translation:
|
|
|
|
|
// 0 ........................................................ size - 1 + 4
|
|
|
|
|
// [expression stack including arguments] [locals] [4 words] [parameters]
|
|
|
|
|
// |>------------ translation_size ------------<|
|
|
|
|
|
|
|
|
|
|
int frame_count = 0;
|
2012-01-24 08:43:12 +00:00
|
|
|
|
int jsframe_count = 0;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
|
|
|
|
|
++frame_count;
|
2012-02-28 09:05:55 +00:00
|
|
|
|
if (e->frame_type() == JS_FUNCTION) {
|
2012-01-24 08:43:12 +00:00
|
|
|
|
++jsframe_count;
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
2012-09-12 12:28:42 +00:00
|
|
|
|
Translation translation(&translations_, frame_count, jsframe_count, zone());
|
2013-06-12 14:22:49 +00:00
|
|
|
|
WriteTranslation(environment, &translation);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
int deoptimization_index = deoptimizations_.length();
|
2011-11-16 08:44:30 +00:00
|
|
|
|
int pc_offset = masm()->pc_offset();
|
|
|
|
|
environment->Register(deoptimization_index,
|
|
|
|
|
translation.index(),
|
|
|
|
|
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
|
2012-06-11 12:42:31 +00:00
|
|
|
|
deoptimizations_.Add(environment, zone());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-14 08:54:27 +00:00
|
|
|
|
void LCodeGen::DeoptimizeIf(Condition condition,
|
2013-05-14 11:45:33 +00:00
|
|
|
|
LEnvironment* environment,
|
|
|
|
|
Deoptimizer::BailoutType bailout_type) {
|
2011-11-16 08:44:30 +00:00
|
|
|
|
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(environment->HasBeenRegistered());
|
|
|
|
|
int id = environment->deoptimization_index();
|
2013-02-08 17:32:47 +00:00
|
|
|
|
ASSERT(info()->IsOptimizing() || info()->IsStub());
|
2013-02-27 14:45:59 +00:00
|
|
|
|
Address entry =
|
|
|
|
|
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (entry == NULL) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kBailoutWasNotPrepared);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
|
2013-04-08 07:51:32 +00:00
|
|
|
|
if (FLAG_deopt_every_n_times == 1 &&
|
|
|
|
|
!info()->IsStub() &&
|
|
|
|
|
info()->opt_count() == id) {
|
2013-07-15 13:36:36 +00:00
|
|
|
|
ASSERT(frame_is_built_);
|
|
|
|
|
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-09 08:24:29 +00:00
|
|
|
|
if (info()->ShouldTrapOnDeopt()) {
|
2013-08-14 08:54:27 +00:00
|
|
|
|
__ stop("trap_on_deopt", condition);
|
2013-02-08 17:32:47 +00:00
|
|
|
|
}
|
2011-05-23 12:48:17 +00:00
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
ASSERT(info()->IsStub() || frame_is_built_);
|
2013-08-14 08:54:27 +00:00
|
|
|
|
if (condition == al && frame_is_built_) {
|
2013-07-12 07:26:00 +00:00
|
|
|
|
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
2011-05-23 12:48:17 +00:00
|
|
|
|
// We often have several deopts to the same entry, reuse the last
|
|
|
|
|
// jump entry if this is the case.
|
|
|
|
|
if (deopt_jump_table_.is_empty() ||
|
2012-12-18 16:25:45 +00:00
|
|
|
|
(deopt_jump_table_.last().address != entry) ||
|
2013-05-14 11:45:33 +00:00
|
|
|
|
(deopt_jump_table_.last().bailout_type != bailout_type) ||
|
2012-12-18 16:25:45 +00:00
|
|
|
|
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
|
2013-05-14 11:45:33 +00:00
|
|
|
|
Deoptimizer::JumpTableEntry table_entry(entry,
|
|
|
|
|
bailout_type,
|
|
|
|
|
!frame_is_built_);
|
2012-12-18 16:25:45 +00:00
|
|
|
|
deopt_jump_table_.Add(table_entry, zone());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
2013-08-14 08:54:27 +00:00
|
|
|
|
__ b(condition, &deopt_jump_table_.last().label);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-14 08:54:27 +00:00
|
|
|
|
void LCodeGen::DeoptimizeIf(Condition condition,
|
2013-05-14 11:45:33 +00:00
|
|
|
|
LEnvironment* environment) {
|
|
|
|
|
Deoptimizer::BailoutType bailout_type = info()->IsStub()
|
|
|
|
|
? Deoptimizer::LAZY
|
|
|
|
|
: Deoptimizer::EAGER;
|
2013-08-14 08:54:27 +00:00
|
|
|
|
DeoptimizeIf(condition, environment, bailout_type);
|
2013-05-14 11:45:33 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-02-14 13:48:20 +00:00
|
|
|
|
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
|
|
|
|
|
ZoneList<Handle<Map> > maps(1, zone());
|
2013-10-04 07:25:24 +00:00
|
|
|
|
ZoneList<Handle<JSObject> > objects(1, zone());
|
2013-02-14 13:48:20 +00:00
|
|
|
|
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
|
|
|
|
|
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
|
2013-10-04 07:25:24 +00:00
|
|
|
|
if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
|
|
|
|
|
if (it.rinfo()->target_object()->IsMap()) {
|
|
|
|
|
Handle<Map> map(Map::cast(it.rinfo()->target_object()));
|
2013-02-14 13:48:20 +00:00
|
|
|
|
maps.Add(map, zone());
|
2013-10-04 07:25:24 +00:00
|
|
|
|
} else if (it.rinfo()->target_object()->IsJSObject()) {
|
|
|
|
|
Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
|
|
|
|
|
objects.Add(object, zone());
|
2013-02-14 13:48:20 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#ifdef VERIFY_HEAP
|
2013-10-04 07:25:24 +00:00
|
|
|
|
// This disables verification of weak embedded objects after full GC.
|
2013-02-14 13:48:20 +00:00
|
|
|
|
// AddDependentCode can cause a GC, which would observe the state where
|
|
|
|
|
// this code is not yet in the depended code lists of the embedded maps.
|
2013-10-04 07:25:24 +00:00
|
|
|
|
NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
|
2013-02-14 13:48:20 +00:00
|
|
|
|
#endif
|
|
|
|
|
for (int i = 0; i < maps.length(); i++) {
|
2013-02-20 11:49:54 +00:00
|
|
|
|
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
|
2013-02-14 13:48:20 +00:00
|
|
|
|
}
|
2013-10-04 07:25:24 +00:00
|
|
|
|
for (int i = 0; i < objects.length(); i++) {
|
|
|
|
|
AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
|
|
|
|
|
}
|
2013-02-14 13:48:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
|
|
|
|
int length = deoptimizations_.length();
|
|
|
|
|
if (length == 0) return;
|
|
|
|
|
Handle<DeoptimizationInputData> data =
|
2011-03-18 20:35:07 +00:00
|
|
|
|
factory()->NewDeoptimizationInputData(length, TENURED);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-03-18 13:57:49 +00:00
|
|
|
|
Handle<ByteArray> translations =
|
|
|
|
|
translations_.CreateByteArray(isolate()->factory());
|
2011-03-01 13:16:57 +00:00
|
|
|
|
data->SetTranslationByteArray(*translations);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
|
|
|
|
|
|
|
|
|
|
Handle<FixedArray> literals =
|
2011-03-18 20:35:07 +00:00
|
|
|
|
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
|
2013-06-03 15:32:22 +00:00
|
|
|
|
{ AllowDeferredHandleDereference copy_handles;
|
2013-04-23 09:23:07 +00:00
|
|
|
|
for (int i = 0; i < deoptimization_literals_.length(); i++) {
|
|
|
|
|
literals->set(i, *deoptimization_literals_[i]);
|
|
|
|
|
}
|
|
|
|
|
data->SetLiteralArray(*literals);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-08-06 14:13:09 +00:00
|
|
|
|
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
|
|
|
|
|
|
|
|
|
|
// Populate the deoptimization entries.
|
|
|
|
|
for (int i = 0; i < length; i++) {
|
|
|
|
|
LEnvironment* env = deoptimizations_[i];
|
2012-08-06 14:13:09 +00:00
|
|
|
|
data->SetAstId(i, env->ast_id());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
|
|
|
|
|
data->SetArgumentsStackHeight(i,
|
|
|
|
|
Smi::FromInt(env->arguments_stack_height()));
|
2011-11-16 08:44:30 +00:00
|
|
|
|
data->SetPc(i, Smi::FromInt(env->pc_offset()));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
code->set_deoptimization_data(*data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
|
|
|
|
|
int result = deoptimization_literals_.length();
|
|
|
|
|
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
|
|
|
|
|
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
|
|
|
|
|
}
|
2012-06-11 12:42:31 +00:00
|
|
|
|
deoptimization_literals_.Add(literal, zone());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
|
|
|
|
|
ASSERT(deoptimization_literals_.length() == 0);
|
|
|
|
|
|
|
|
|
|
const ZoneList<Handle<JSFunction> >* inlined_closures =
|
|
|
|
|
chunk()->inlined_closures();
|
|
|
|
|
|
|
|
|
|
for (int i = 0, length = inlined_closures->length();
|
|
|
|
|
i < length;
|
|
|
|
|
i++) {
|
|
|
|
|
DefineDeoptimizationLiteral(inlined_closures->at(i));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inlined_function_count_ = deoptimization_literals_.length();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-11-16 08:44:30 +00:00
|
|
|
|
void LCodeGen::RecordSafepointWithLazyDeopt(
|
|
|
|
|
LInstruction* instr, SafepointMode safepoint_mode) {
|
|
|
|
|
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
|
|
|
|
|
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
|
|
|
|
|
} else {
|
|
|
|
|
ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
|
|
|
|
RecordSafepointWithRegisters(
|
|
|
|
|
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-01-26 20:48:48 +00:00
|
|
|
|
void LCodeGen::RecordSafepoint(
|
|
|
|
|
LPointerMap* pointers,
|
|
|
|
|
Safepoint::Kind kind,
|
|
|
|
|
int arguments,
|
2011-11-16 08:44:30 +00:00
|
|
|
|
Safepoint::DeoptMode deopt_mode) {
|
2011-04-07 13:32:45 +00:00
|
|
|
|
ASSERT(expected_safepoint_kind_ == kind);
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
|
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
|
2011-11-16 08:44:30 +00:00
|
|
|
|
kind, arguments, deopt_mode);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
for (int i = 0; i < operands->length(); i++) {
|
|
|
|
|
LOperand* pointer = operands->at(i);
|
|
|
|
|
if (pointer->IsStackSlot()) {
|
2012-06-04 14:42:58 +00:00
|
|
|
|
safepoint.DefinePointerSlot(pointer->index(), zone());
|
2011-01-26 20:48:48 +00:00
|
|
|
|
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
|
2012-06-11 12:42:31 +00:00
|
|
|
|
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2011-01-26 20:48:48 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
|
2011-11-16 08:44:30 +00:00
|
|
|
|
Safepoint::DeoptMode deopt_mode) {
|
|
|
|
|
RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-11-16 08:44:30 +00:00
|
|
|
|
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
|
2012-06-11 12:42:31 +00:00
|
|
|
|
LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
|
2011-11-16 08:44:30 +00:00
|
|
|
|
RecordSafepoint(&empty_pointers, deopt_mode);
|
2011-02-22 16:56:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
|
|
|
|
|
int arguments,
|
2011-11-16 08:44:30 +00:00
|
|
|
|
Safepoint::DeoptMode deopt_mode) {
|
|
|
|
|
RecordSafepoint(
|
|
|
|
|
pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-01-14 08:49:52 +00:00
|
|
|
|
void LCodeGen::RecordSafepointWithRegistersAndDoubles(
|
|
|
|
|
LPointerMap* pointers,
|
|
|
|
|
int arguments,
|
2011-11-16 08:44:30 +00:00
|
|
|
|
Safepoint::DeoptMode deopt_mode) {
|
|
|
|
|
RecordSafepoint(
|
|
|
|
|
pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
|
2011-01-14 08:49:52 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::RecordPosition(int position) {
|
2011-06-10 07:15:46 +00:00
|
|
|
|
if (position == RelocInfo::kNoPosition) return;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
masm()->positions_recorder()->RecordPosition(position);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-08 02:16:12 +00:00
|
|
|
|
void LCodeGen::RecordAndUpdatePosition(int position) {
|
|
|
|
|
if (position >= 0 && position != old_position_) {
|
|
|
|
|
masm()->positions_recorder()->RecordPosition(position);
|
|
|
|
|
old_position_ = position;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-22 09:48:35 +00:00
|
|
|
|
static const char* LabelType(LLabel* label) {
|
|
|
|
|
if (label->is_loop_header()) return " (loop header)";
|
|
|
|
|
if (label->is_osr_entry()) return " (OSR entry)";
|
|
|
|
|
return "";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoLabel(LLabel* label) {
|
2013-04-22 09:48:35 +00:00
|
|
|
|
Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
|
|
|
|
|
current_instruction_,
|
|
|
|
|
label->hydrogen_value()->id(),
|
2013-04-18 13:45:19 +00:00
|
|
|
|
label->block_id(),
|
2013-04-22 09:48:35 +00:00
|
|
|
|
LabelType(label));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ bind(label->label());
|
|
|
|
|
current_block_ = label->block_id();
|
2011-04-27 11:41:42 +00:00
|
|
|
|
DoGap(label);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoParallelMove(LParallelMove* move) {
|
2011-02-17 15:25:38 +00:00
|
|
|
|
resolver_.Resolve(move);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoGap(LGap* gap) {
|
|
|
|
|
for (int i = LGap::FIRST_INNER_POSITION;
|
|
|
|
|
i <= LGap::LAST_INNER_POSITION;
|
|
|
|
|
i++) {
|
|
|
|
|
LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
|
|
|
|
|
LParallelMove* move = gap->GetParallelMove(inner_pos);
|
|
|
|
|
if (move != NULL) DoParallelMove(move);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-04-27 11:41:42 +00:00
|
|
|
|
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
|
|
|
|
|
DoGap(instr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoParameter(LParameter* instr) {
|
|
|
|
|
// Nothing to do.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCallStub(LCallStub* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-01-04 13:02:51 +00:00
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
switch (instr->hydrogen()->major_key()) {
|
|
|
|
|
case CodeStub::RegExpConstructResult: {
|
|
|
|
|
RegExpConstructResultStub stub;
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-01-04 13:02:51 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case CodeStub::RegExpExec: {
|
|
|
|
|
RegExpExecStub stub;
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-01-04 13:02:51 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case CodeStub::SubString: {
|
|
|
|
|
SubStringStub stub;
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-01-04 13:02:51 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case CodeStub::StringCompare: {
|
|
|
|
|
StringCompareStub stub;
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-01-04 13:02:51 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case CodeStub::TranscendentalCache: {
|
2011-01-11 11:54:37 +00:00
|
|
|
|
__ ldr(r0, MemOperand(sp, 0));
|
2011-03-02 14:40:38 +00:00
|
|
|
|
TranscendentalCacheStub stub(instr->transcendental_type(),
|
|
|
|
|
TranscendentalCacheStub::TAGGED);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-01-04 13:02:51 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
|
2013-09-09 16:34:40 +00:00
|
|
|
|
GenerateOsrPrologue();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoModI(LModI* instr) {
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
HMod* hmod = instr->hydrogen();
|
|
|
|
|
HValue* left = hmod->left();
|
|
|
|
|
HValue* right = hmod->right();
|
|
|
|
|
if (hmod->HasPowerOf2Divisor()) {
|
|
|
|
|
// TODO(svenpanne) We should really do the strength reduction on the
|
|
|
|
|
// Hydrogen level.
|
|
|
|
|
Register left_reg = ToRegister(instr->left());
|
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
|
|
|
|
|
|
|
|
|
// Note: The code below even works when right contains kMinInt.
|
|
|
|
|
int32_t divisor = Abs(right->GetInteger32Constant());
|
|
|
|
|
|
|
|
|
|
Label left_is_not_negative, done;
|
|
|
|
|
if (left->CanBeNegative()) {
|
|
|
|
|
__ cmp(left_reg, Operand::Zero());
|
|
|
|
|
__ b(pl, &left_is_not_negative);
|
|
|
|
|
__ rsb(result_reg, left_reg, Operand::Zero());
|
|
|
|
|
__ and_(result_reg, result_reg, Operand(divisor - 1));
|
|
|
|
|
__ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
|
|
|
|
|
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
__ b(&done);
|
|
|
|
|
}
|
2011-03-14 14:42:14 +00:00
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
__ bind(&left_is_not_negative);
|
|
|
|
|
__ and_(result_reg, left_reg, Operand(divisor - 1));
|
|
|
|
|
__ bind(&done);
|
2011-03-14 14:42:14 +00:00
|
|
|
|
|
2013-06-21 11:10:06 +00:00
|
|
|
|
} else if (hmod->fixed_right_arg().has_value) {
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
Register left_reg = ToRegister(instr->left());
|
|
|
|
|
Register right_reg = ToRegister(instr->right());
|
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
|
|
|
|
|
2013-06-21 11:10:06 +00:00
|
|
|
|
int32_t divisor = hmod->fixed_right_arg().value;
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
ASSERT(IsPowerOf2(divisor));
|
|
|
|
|
|
|
|
|
|
// Check if our assumption of a fixed right operand still holds.
|
|
|
|
|
__ cmp(right_reg, Operand(divisor));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
|
|
|
|
|
Label left_is_not_negative, done;
|
|
|
|
|
if (left->CanBeNegative()) {
|
|
|
|
|
__ cmp(left_reg, Operand::Zero());
|
|
|
|
|
__ b(pl, &left_is_not_negative);
|
|
|
|
|
__ rsb(result_reg, left_reg, Operand::Zero());
|
|
|
|
|
__ and_(result_reg, result_reg, Operand(divisor - 1));
|
|
|
|
|
__ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
|
|
|
|
|
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
__ b(&done);
|
2011-03-14 14:42:14 +00:00
|
|
|
|
}
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
|
|
|
|
|
__ bind(&left_is_not_negative);
|
|
|
|
|
__ and_(result_reg, left_reg, Operand(divisor - 1));
|
2011-03-14 14:42:14 +00:00
|
|
|
|
__ bind(&done);
|
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
} else if (CpuFeatures::IsSupported(SUDIV)) {
|
|
|
|
|
CpuFeatureScope scope(masm(), SUDIV);
|
2011-03-22 10:00:43 +00:00
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
Register left_reg = ToRegister(instr->left());
|
|
|
|
|
Register right_reg = ToRegister(instr->right());
|
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
2011-01-14 11:48:43 +00:00
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
Label done;
|
|
|
|
|
// Check for x % 0, sdiv might signal an exception. We have to deopt in this
|
|
|
|
|
// case because we can't return a NaN.
|
|
|
|
|
if (right->CanBeZero()) {
|
|
|
|
|
__ cmp(right_reg, Operand::Zero());
|
2012-12-20 09:07:05 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
|
|
|
|
|
// want. We have to deopt if we care about -0, because we can't return that.
|
|
|
|
|
if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
|
|
|
|
|
Label no_overflow_possible;
|
|
|
|
|
__ cmp(left_reg, Operand(kMinInt));
|
|
|
|
|
__ b(ne, &no_overflow_possible);
|
|
|
|
|
__ cmp(right_reg, Operand(-1));
|
|
|
|
|
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
} else {
|
|
|
|
|
__ b(ne, &no_overflow_possible);
|
|
|
|
|
__ mov(result_reg, Operand::Zero());
|
|
|
|
|
__ jmp(&done);
|
|
|
|
|
}
|
|
|
|
|
__ bind(&no_overflow_possible);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// For 'r3 = r1 % r2' we can have the following ARM code:
|
|
|
|
|
// sdiv r3, r1, r2
|
|
|
|
|
// mls r3, r3, r2, r1
|
2012-10-01 21:27:33 +00:00
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
__ sdiv(result_reg, left_reg, right_reg);
|
|
|
|
|
__ mls(result_reg, result_reg, right_reg, left_reg);
|
2011-01-20 17:42:29 +00:00
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
// If we care about -0, test if the dividend is <0 and the result is 0.
|
|
|
|
|
if (left->CanBeNegative() &&
|
|
|
|
|
hmod->CanBeZero() &&
|
|
|
|
|
hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
|
|
|
__ cmp(result_reg, Operand::Zero());
|
2013-04-29 13:45:34 +00:00
|
|
|
|
__ b(ne, &done);
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
__ cmp(left_reg, Operand::Zero());
|
2013-04-29 13:45:34 +00:00
|
|
|
|
DeoptimizeIf(lt, instr->environment());
|
2012-10-01 21:27:33 +00:00
|
|
|
|
}
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
__ bind(&done);
|
|
|
|
|
|
2011-03-22 10:00:43 +00:00
|
|
|
|
} else {
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
// General case, without any SDIV support.
|
|
|
|
|
Register left_reg = ToRegister(instr->left());
|
|
|
|
|
Register right_reg = ToRegister(instr->right());
|
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
2012-10-01 21:27:33 +00:00
|
|
|
|
Register scratch = scratch0();
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
ASSERT(!scratch.is(left_reg));
|
|
|
|
|
ASSERT(!scratch.is(right_reg));
|
|
|
|
|
ASSERT(!scratch.is(result_reg));
|
|
|
|
|
DwVfpRegister dividend = ToDoubleRegister(instr->temp());
|
|
|
|
|
DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
|
|
|
|
|
ASSERT(!divisor.is(dividend));
|
2013-07-25 15:04:38 +00:00
|
|
|
|
LowDwVfpRegister quotient = double_scratch0();
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
ASSERT(!quotient.is(dividend));
|
|
|
|
|
ASSERT(!quotient.is(divisor));
|
|
|
|
|
|
|
|
|
|
Label done;
|
|
|
|
|
// Check for x % 0, we have to deopt in this case because we can't return a
|
|
|
|
|
// NaN.
|
|
|
|
|
if (right->CanBeZero()) {
|
|
|
|
|
__ cmp(right_reg, Operand::Zero());
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
2012-10-01 21:27:33 +00:00
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
__ Move(result_reg, left_reg);
|
|
|
|
|
// Load the arguments in VFP registers. The divisor value is preloaded
|
|
|
|
|
// before. Be careful that 'right_reg' is only live on entry.
|
|
|
|
|
// TODO(svenpanne) The last comments seems to be wrong nowadays.
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vmov(double_scratch0().low(), left_reg);
|
|
|
|
|
__ vcvt_f64_s32(dividend, double_scratch0().low());
|
|
|
|
|
__ vmov(double_scratch0().low(), right_reg);
|
|
|
|
|
__ vcvt_f64_s32(divisor, double_scratch0().low());
|
2012-10-01 21:27:33 +00:00
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
// We do not care about the sign of the divisor. Note that we still handle
|
|
|
|
|
// the kMinInt % -1 case correctly, though.
|
2012-10-01 21:27:33 +00:00
|
|
|
|
__ vabs(divisor, divisor);
|
|
|
|
|
// Compute the quotient and round it to a 32bit integer.
|
|
|
|
|
__ vdiv(quotient, dividend, divisor);
|
|
|
|
|
__ vcvt_s32_f64(quotient.low(), quotient);
|
|
|
|
|
__ vcvt_f64_s32(quotient, quotient.low());
|
|
|
|
|
|
|
|
|
|
// Compute the remainder in result.
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vmul(double_scratch0(), divisor, quotient);
|
|
|
|
|
__ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
|
|
|
|
|
__ vmov(scratch, double_scratch0().low());
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
__ sub(result_reg, left_reg, scratch, SetCC);
|
2012-10-01 21:27:33 +00:00
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
// If we care about -0, test if the dividend is <0 and the result is 0.
|
|
|
|
|
if (left->CanBeNegative() &&
|
|
|
|
|
hmod->CanBeZero() &&
|
|
|
|
|
hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
|
|
|
__ b(ne, &done);
|
|
|
|
|
__ cmp(left_reg, Operand::Zero());
|
2012-10-01 21:27:33 +00:00
|
|
|
|
DeoptimizeIf(mi, instr->environment());
|
|
|
|
|
}
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
|
__ bind(&done);
|
2012-10-01 21:27:33 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-04-24 15:59:07 +00:00
|
|
|
|
void LCodeGen::EmitSignedIntegerDivisionByConstant(
|
|
|
|
|
Register result,
|
|
|
|
|
Register dividend,
|
|
|
|
|
int32_t divisor,
|
|
|
|
|
Register remainder,
|
|
|
|
|
Register scratch,
|
|
|
|
|
LEnvironment* environment) {
|
|
|
|
|
ASSERT(!AreAliased(dividend, scratch, ip));
|
|
|
|
|
ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
|
|
|
|
|
|
|
|
|
|
uint32_t divisor_abs = abs(divisor);
|
|
|
|
|
|
|
|
|
|
int32_t power_of_2_factor =
|
|
|
|
|
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
|
|
|
|
|
|
|
|
|
|
switch (divisor_abs) {
|
|
|
|
|
case 0:
|
|
|
|
|
DeoptimizeIf(al, environment);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
|
if (divisor > 0) {
|
|
|
|
|
__ Move(result, dividend);
|
|
|
|
|
} else {
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ rsb(result, dividend, Operand::Zero(), SetCC);
|
2012-04-24 15:59:07 +00:00
|
|
|
|
DeoptimizeIf(vs, environment);
|
|
|
|
|
}
|
|
|
|
|
// Compute the remainder.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ mov(remainder, Operand::Zero());
|
2012-04-24 15:59:07 +00:00
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
if (IsPowerOf2(divisor_abs)) {
|
|
|
|
|
// Branch and condition free code for integer division by a power
|
|
|
|
|
// of two.
|
|
|
|
|
int32_t power = WhichPowerOf2(divisor_abs);
|
|
|
|
|
if (power > 1) {
|
|
|
|
|
__ mov(scratch, Operand(dividend, ASR, power - 1));
|
|
|
|
|
}
|
|
|
|
|
__ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
|
|
|
|
|
__ mov(result, Operand(scratch, ASR, power));
|
|
|
|
|
// Negate if necessary.
|
|
|
|
|
// We don't need to check for overflow because the case '-1' is
|
|
|
|
|
// handled separately.
|
|
|
|
|
if (divisor < 0) {
|
|
|
|
|
ASSERT(divisor != -1);
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ rsb(result, result, Operand::Zero());
|
2012-04-24 15:59:07 +00:00
|
|
|
|
}
|
|
|
|
|
// Compute the remainder.
|
|
|
|
|
if (divisor > 0) {
|
|
|
|
|
__ sub(remainder, dividend, Operand(result, LSL, power));
|
|
|
|
|
} else {
|
|
|
|
|
__ add(remainder, dividend, Operand(result, LSL, power));
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
} else {
|
|
|
|
|
// Use magic numbers for a few specific divisors.
|
|
|
|
|
// Details and proofs can be found in:
|
|
|
|
|
// - Hacker's Delight, Henry S. Warren, Jr.
|
|
|
|
|
// - The PowerPC Compiler Writer’s Guide
|
|
|
|
|
// and probably many others.
|
|
|
|
|
//
|
|
|
|
|
// We handle
|
|
|
|
|
// <divisor with magic numbers> * <power of 2>
|
|
|
|
|
// but not
|
|
|
|
|
// <divisor with magic numbers> * <other divisor with magic numbers>
|
|
|
|
|
DivMagicNumbers magic_numbers =
|
|
|
|
|
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
|
|
|
|
|
// Branch and condition free code for integer division by a power
|
|
|
|
|
// of two.
|
|
|
|
|
const int32_t M = magic_numbers.M;
|
|
|
|
|
const int32_t s = magic_numbers.s + power_of_2_factor;
|
|
|
|
|
|
|
|
|
|
__ mov(ip, Operand(M));
|
|
|
|
|
__ smull(ip, scratch, dividend, ip);
|
|
|
|
|
if (M < 0) {
|
|
|
|
|
__ add(scratch, scratch, Operand(dividend));
|
|
|
|
|
}
|
|
|
|
|
if (s > 0) {
|
|
|
|
|
__ mov(scratch, Operand(scratch, ASR, s));
|
|
|
|
|
}
|
|
|
|
|
__ add(result, scratch, Operand(dividend, LSR, 31));
|
2013-01-07 09:43:12 +00:00
|
|
|
|
if (divisor < 0) __ rsb(result, result, Operand::Zero());
|
2012-04-24 15:59:07 +00:00
|
|
|
|
// Compute the remainder.
|
|
|
|
|
__ mov(ip, Operand(divisor));
|
|
|
|
|
// This sequence could be replaced with 'mls' when
|
|
|
|
|
// it gets implemented.
|
|
|
|
|
__ mul(scratch, result, ip);
|
|
|
|
|
__ sub(remainder, dividend, scratch);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoDivI(LDivI* instr) {
|
2013-03-05 08:47:59 +00:00
|
|
|
|
if (instr->hydrogen()->HasPowerOf2Divisor()) {
|
2013-09-16 11:28:18 +00:00
|
|
|
|
const Register dividend = ToRegister(instr->left());
|
|
|
|
|
const Register result = ToRegister(instr->result());
|
2013-06-11 11:43:57 +00:00
|
|
|
|
int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
|
2013-03-05 08:47:59 +00:00
|
|
|
|
int32_t test_value = 0;
|
|
|
|
|
int32_t power = 0;
|
|
|
|
|
|
|
|
|
|
if (divisor > 0) {
|
|
|
|
|
test_value = divisor - 1;
|
|
|
|
|
power = WhichPowerOf2(divisor);
|
|
|
|
|
} else {
|
|
|
|
|
// Check for (0 / -x) that will produce negative zero.
|
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
2013-09-16 11:28:18 +00:00
|
|
|
|
__ cmp(dividend, Operand::Zero());
|
2013-03-05 08:47:59 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
// Check for (kMinInt / -1).
|
|
|
|
|
if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
|
|
|
|
__ cmp(dividend, Operand(kMinInt));
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
test_value = - divisor - 1;
|
|
|
|
|
power = WhichPowerOf2(-divisor);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (test_value != 0) {
|
2013-06-11 11:43:57 +00:00
|
|
|
|
if (instr->hydrogen()->CheckFlag(
|
|
|
|
|
HInstruction::kAllUsesTruncatingToInt32)) {
|
2013-09-16 11:28:18 +00:00
|
|
|
|
__ sub(result, dividend, Operand::Zero(), SetCC);
|
|
|
|
|
__ rsb(result, result, Operand::Zero(), LeaveCC, lt);
|
|
|
|
|
__ mov(result, Operand(result, ASR, power));
|
|
|
|
|
if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
|
|
|
|
|
if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
|
2013-06-11 11:43:57 +00:00
|
|
|
|
return; // Don't fall through to "__ rsb" below.
|
|
|
|
|
} else {
|
|
|
|
|
// Deoptimize if remainder is not 0.
|
|
|
|
|
__ tst(dividend, Operand(test_value));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
2013-09-16 11:28:18 +00:00
|
|
|
|
__ mov(result, Operand(dividend, ASR, power));
|
|
|
|
|
if (divisor < 0) __ rsb(result, result, Operand(0));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if (divisor < 0) {
|
|
|
|
|
__ rsb(result, dividend, Operand(0));
|
|
|
|
|
} else {
|
|
|
|
|
__ Move(result, dividend);
|
2013-06-11 11:43:57 +00:00
|
|
|
|
}
|
2013-03-05 08:47:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-17 10:54:26 +00:00
|
|
|
|
const Register left = ToRegister(instr->left());
|
|
|
|
|
const Register right = ToRegister(instr->right());
|
2011-01-14 08:49:52 +00:00
|
|
|
|
const Register result = ToRegister(instr->result());
|
|
|
|
|
|
|
|
|
|
// Check for x / 0.
|
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(right, Operand::Zero());
|
2011-01-14 08:49:52 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check for (0 / -x) that will produce negative zero.
|
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
2013-09-16 11:28:18 +00:00
|
|
|
|
Label positive;
|
|
|
|
|
if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
|
|
|
|
|
// Do the test only if it hadn't be done above.
|
|
|
|
|
__ cmp(right, Operand::Zero());
|
|
|
|
|
}
|
|
|
|
|
__ b(pl, &positive);
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(left, Operand::Zero());
|
2013-09-16 11:28:18 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
__ bind(&positive);
|
2011-01-14 08:49:52 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-12-20 09:07:05 +00:00
|
|
|
|
// Check for (kMinInt / -1).
|
2011-01-14 08:49:52 +00:00
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
|
|
|
|
Label left_not_min_int;
|
|
|
|
|
__ cmp(left, Operand(kMinInt));
|
|
|
|
|
__ b(ne, &left_not_min_int);
|
|
|
|
|
__ cmp(right, Operand(-1));
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
__ bind(&left_not_min_int);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 10:47:44 +00:00
|
|
|
|
if (CpuFeatures::IsSupported(SUDIV)) {
|
|
|
|
|
CpuFeatureScope scope(masm(), SUDIV);
|
|
|
|
|
__ sdiv(result, left, right);
|
2011-01-14 08:49:52 +00:00
|
|
|
|
|
2013-06-11 11:43:57 +00:00
|
|
|
|
if (!instr->hydrogen()->CheckFlag(
|
|
|
|
|
HInstruction::kAllUsesTruncatingToInt32)) {
|
|
|
|
|
// Compute remainder and deopt if it's not zero.
|
|
|
|
|
const Register remainder = scratch0();
|
|
|
|
|
__ mls(remainder, result, right, left);
|
|
|
|
|
__ cmp(remainder, Operand::Zero());
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
}
|
2013-06-11 10:47:44 +00:00
|
|
|
|
} else {
|
|
|
|
|
const DoubleRegister vleft = ToDoubleRegister(instr->temp());
|
|
|
|
|
const DoubleRegister vright = double_scratch0();
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vmov(double_scratch0().low(), left);
|
|
|
|
|
__ vcvt_f64_s32(vleft, double_scratch0().low());
|
|
|
|
|
__ vmov(double_scratch0().low(), right);
|
|
|
|
|
__ vcvt_f64_s32(vright, double_scratch0().low());
|
2013-06-11 10:47:44 +00:00
|
|
|
|
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vcvt_s32_f64(double_scratch0().low(), vleft);
|
|
|
|
|
__ vmov(result, double_scratch0().low());
|
2013-06-11 11:43:57 +00:00
|
|
|
|
|
|
|
|
|
if (!instr->hydrogen()->CheckFlag(
|
|
|
|
|
HInstruction::kAllUsesTruncatingToInt32)) {
|
|
|
|
|
// Deopt if exact conversion to integer was not possible.
|
|
|
|
|
// Use vright as scratch register.
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
|
|
|
|
|
__ VFPCompareAndSetFlags(vleft, double_scratch0());
|
2013-06-11 11:43:57 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
}
|
2013-06-11 10:47:44 +00:00
|
|
|
|
}
|
2011-01-14 08:49:52 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-14 11:01:18 +00:00
|
|
|
|
void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
|
|
|
|
|
DwVfpRegister addend = ToDoubleRegister(instr->addend());
|
|
|
|
|
DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
|
|
|
|
|
DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
|
|
|
|
|
|
|
|
|
|
// This is computed in-place.
|
|
|
|
|
ASSERT(addend.is(ToDoubleRegister(instr->result())));
|
|
|
|
|
|
|
|
|
|
__ vmla(addend, multiplier, multiplicand);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-02-27 10:24:40 +00:00
|
|
|
|
void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
|
|
|
|
|
DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
|
|
|
|
|
DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
|
|
|
|
|
DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
|
|
|
|
|
|
|
|
|
|
// This is computed in-place.
|
|
|
|
|
ASSERT(minuend.is(ToDoubleRegister(instr->result())));
|
|
|
|
|
|
|
|
|
|
__ vmls(minuend, multiplier, multiplicand);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-04-24 15:59:07 +00:00
|
|
|
|
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
|
|
|
|
|
const Register result = ToRegister(instr->result());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
const Register left = ToRegister(instr->left());
|
|
|
|
|
const Register remainder = ToRegister(instr->temp());
|
2012-04-24 15:59:07 +00:00
|
|
|
|
const Register scratch = scratch0();
|
|
|
|
|
|
2012-12-20 16:31:19 +00:00
|
|
|
|
if (!CpuFeatures::IsSupported(SUDIV)) {
|
|
|
|
|
// If the CPU doesn't support sdiv instruction, we only optimize when we
|
|
|
|
|
// have magic numbers for the divisor. The standard integer division routine
|
|
|
|
|
// is usually slower than transitionning to VFP.
|
|
|
|
|
ASSERT(instr->right()->IsConstantOperand());
|
|
|
|
|
int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
|
|
|
|
|
ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
|
|
|
|
|
if (divisor < 0) {
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(left, Operand::Zero());
|
2012-12-20 16:31:19 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
EmitSignedIntegerDivisionByConstant(result,
|
|
|
|
|
left,
|
|
|
|
|
divisor,
|
|
|
|
|
remainder,
|
|
|
|
|
scratch,
|
|
|
|
|
instr->environment());
|
|
|
|
|
// We performed a truncating division. Correct the result if necessary.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(remainder, Operand::Zero());
|
2012-12-20 16:31:19 +00:00
|
|
|
|
__ teq(remainder, Operand(divisor), ne);
|
|
|
|
|
__ sub(result, result, Operand(1), LeaveCC, mi);
|
|
|
|
|
} else {
|
2013-03-05 10:48:16 +00:00
|
|
|
|
CpuFeatureScope scope(masm(), SUDIV);
|
2012-12-20 16:31:19 +00:00
|
|
|
|
const Register right = ToRegister(instr->right());
|
|
|
|
|
|
|
|
|
|
// Check for x / 0.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(right, Operand::Zero());
|
2012-04-24 15:59:07 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
2012-12-20 16:31:19 +00:00
|
|
|
|
|
|
|
|
|
// Check for (kMinInt / -1).
|
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
|
|
|
|
Label left_not_min_int;
|
|
|
|
|
__ cmp(left, Operand(kMinInt));
|
|
|
|
|
__ b(ne, &left_not_min_int);
|
|
|
|
|
__ cmp(right, Operand(-1));
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
__ bind(&left_not_min_int);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check for (0 / -x) that will produce negative zero.
|
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(right, Operand::Zero());
|
|
|
|
|
__ cmp(left, Operand::Zero(), mi);
|
2012-12-20 16:31:19 +00:00
|
|
|
|
// "right" can't be null because the code would have already been
|
|
|
|
|
// deoptimized. The Z flag is set only if (right < 0) and (left == 0).
|
|
|
|
|
// In this case we need to deoptimize to produce a -0.
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Label done;
|
|
|
|
|
__ sdiv(result, left, right);
|
|
|
|
|
// If both operands have the same sign then we are done.
|
|
|
|
|
__ eor(remainder, left, Operand(right), SetCC);
|
|
|
|
|
__ b(pl, &done);
|
|
|
|
|
|
|
|
|
|
// Check if the result needs to be corrected.
|
|
|
|
|
__ mls(remainder, result, right, left);
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(remainder, Operand::Zero());
|
2012-12-20 16:31:19 +00:00
|
|
|
|
__ sub(result, result, Operand(1), LeaveCC, ne);
|
|
|
|
|
|
|
|
|
|
__ bind(&done);
|
2012-04-24 15:59:07 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoMulI(LMulI* instr) {
|
2011-06-06 07:47:21 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
// Note that result may alias left.
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register left = ToRegister(instr->left());
|
|
|
|
|
LOperand* right_op = instr->right();
|
2011-04-12 06:44:15 +00:00
|
|
|
|
|
2011-06-06 07:47:21 +00:00
|
|
|
|
bool bailout_on_minus_zero =
|
|
|
|
|
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
|
2013-09-06 13:12:46 +00:00
|
|
|
|
bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
2011-06-06 07:47:21 +00:00
|
|
|
|
|
2013-09-06 13:12:46 +00:00
|
|
|
|
if (right_op->IsConstantOperand()) {
|
2013-08-27 13:55:00 +00:00
|
|
|
|
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
|
2011-06-06 07:47:21 +00:00
|
|
|
|
|
|
|
|
|
if (bailout_on_minus_zero && (constant < 0)) {
|
|
|
|
|
// The case of a null constant will be handled separately.
|
|
|
|
|
// If constant is negative and left is null, the result should be -0.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(left, Operand::Zero());
|
2011-06-06 07:47:21 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (constant) {
|
|
|
|
|
case -1:
|
2013-09-06 13:12:46 +00:00
|
|
|
|
if (overflow) {
|
|
|
|
|
__ rsb(result, left, Operand::Zero(), SetCC);
|
|
|
|
|
DeoptimizeIf(vs, instr->environment());
|
|
|
|
|
} else {
|
|
|
|
|
__ rsb(result, left, Operand::Zero());
|
|
|
|
|
}
|
2011-06-06 07:47:21 +00:00
|
|
|
|
break;
|
|
|
|
|
case 0:
|
|
|
|
|
if (bailout_on_minus_zero) {
|
|
|
|
|
// If left is strictly negative and the constant is null, the
|
|
|
|
|
// result is -0. Deoptimize if required, otherwise return 0.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(left, Operand::Zero());
|
2011-06-06 07:47:21 +00:00
|
|
|
|
DeoptimizeIf(mi, instr->environment());
|
|
|
|
|
}
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ mov(result, Operand::Zero());
|
2011-06-06 07:47:21 +00:00
|
|
|
|
break;
|
|
|
|
|
case 1:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ Move(result, left);
|
2011-06-06 07:47:21 +00:00
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
// Multiplying by powers of two and powers of two plus or minus
|
|
|
|
|
// one can be done faster with shifted operands.
|
|
|
|
|
// For other constants we emit standard code.
|
|
|
|
|
int32_t mask = constant >> 31;
|
|
|
|
|
uint32_t constant_abs = (constant + mask) ^ mask;
|
|
|
|
|
|
2013-09-06 13:12:46 +00:00
|
|
|
|
if (IsPowerOf2(constant_abs)) {
|
|
|
|
|
int32_t shift = WhichPowerOf2(constant_abs);
|
|
|
|
|
__ mov(result, Operand(left, LSL, shift));
|
|
|
|
|
// Correct the sign of the result is the constant is negative.
|
|
|
|
|
if (constant < 0) __ rsb(result, result, Operand::Zero());
|
|
|
|
|
} else if (IsPowerOf2(constant_abs - 1)) {
|
|
|
|
|
int32_t shift = WhichPowerOf2(constant_abs - 1);
|
|
|
|
|
__ add(result, left, Operand(left, LSL, shift));
|
|
|
|
|
// Correct the sign of the result is the constant is negative.
|
|
|
|
|
if (constant < 0) __ rsb(result, result, Operand::Zero());
|
|
|
|
|
} else if (IsPowerOf2(constant_abs + 1)) {
|
|
|
|
|
int32_t shift = WhichPowerOf2(constant_abs + 1);
|
|
|
|
|
__ rsb(result, left, Operand(left, LSL, shift));
|
2011-06-06 07:47:21 +00:00
|
|
|
|
// Correct the sign of the result is the constant is negative.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
if (constant < 0) __ rsb(result, result, Operand::Zero());
|
2011-06-06 07:47:21 +00:00
|
|
|
|
} else {
|
|
|
|
|
// Generate standard code.
|
|
|
|
|
__ mov(ip, Operand(constant));
|
|
|
|
|
__ mul(result, left, ip);
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-04-12 06:44:15 +00:00
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
2013-09-06 13:12:46 +00:00
|
|
|
|
ASSERT(right_op->IsRegister());
|
|
|
|
|
Register right = ToRegister(right_op);
|
2011-03-21 16:10:05 +00:00
|
|
|
|
|
2013-09-06 13:12:46 +00:00
|
|
|
|
if (overflow) {
|
|
|
|
|
Register scratch = scratch0();
|
2011-06-06 07:47:21 +00:00
|
|
|
|
// scratch:result = left * right.
|
2013-07-25 11:53:38 +00:00
|
|
|
|
if (instr->hydrogen()->representation().IsSmi()) {
|
|
|
|
|
__ SmiUntag(result, left);
|
|
|
|
|
__ smull(result, scratch, result, right);
|
|
|
|
|
} else {
|
|
|
|
|
__ smull(result, scratch, left, right);
|
|
|
|
|
}
|
2011-06-06 07:47:21 +00:00
|
|
|
|
__ cmp(scratch, Operand(result, ASR, 31));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
2013-07-25 11:53:38 +00:00
|
|
|
|
if (instr->hydrogen()->representation().IsSmi()) {
|
|
|
|
|
__ SmiUntag(result, left);
|
|
|
|
|
__ mul(result, result, right);
|
|
|
|
|
} else {
|
|
|
|
|
__ mul(result, left, right);
|
|
|
|
|
}
|
2011-06-06 07:47:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (bailout_on_minus_zero) {
|
|
|
|
|
Label done;
|
2013-09-06 13:12:46 +00:00
|
|
|
|
__ teq(left, Operand(right));
|
|
|
|
|
__ b(pl, &done);
|
|
|
|
|
// Bail out if the result is minus zero.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(result, Operand::Zero());
|
2013-09-06 13:12:46 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
2011-06-06 07:47:21 +00:00
|
|
|
|
__ bind(&done);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoBitI(LBitI* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* left_op = instr->left();
|
|
|
|
|
LOperand* right_op = instr->right();
|
2011-06-29 10:51:06 +00:00
|
|
|
|
ASSERT(left_op->IsRegister());
|
|
|
|
|
Register left = ToRegister(left_op);
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
Operand right(no_reg);
|
2011-03-25 07:41:35 +00:00
|
|
|
|
|
2011-06-29 10:51:06 +00:00
|
|
|
|
if (right_op->IsStackSlot() || right_op->IsArgument()) {
|
|
|
|
|
right = Operand(EmitLoadRegister(right_op, ip));
|
2011-03-25 07:41:35 +00:00
|
|
|
|
} else {
|
2011-06-29 10:51:06 +00:00
|
|
|
|
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
|
|
|
|
|
right = ToOperand(right_op);
|
2011-03-25 07:41:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
switch (instr->op()) {
|
|
|
|
|
case Token::BIT_AND:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ and_(result, left, right);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
case Token::BIT_OR:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ orr(result, left, right);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
case Token::BIT_XOR:
|
2013-08-06 13:34:51 +00:00
|
|
|
|
if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
|
|
|
|
|
__ mvn(result, Operand(left));
|
|
|
|
|
} else {
|
|
|
|
|
__ eor(result, left, right);
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoShiftI(LShiftI* instr) {
|
2011-06-29 10:51:06 +00:00
|
|
|
|
// Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
|
|
|
|
|
// result may alias either of them.
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* right_op = instr->right();
|
|
|
|
|
Register left = ToRegister(instr->left());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2011-01-04 14:32:54 +00:00
|
|
|
|
Register scratch = scratch0();
|
2011-06-29 10:51:06 +00:00
|
|
|
|
if (right_op->IsRegister()) {
|
|
|
|
|
// Mask the right_op operand.
|
|
|
|
|
__ and_(scratch, ToRegister(right_op), Operand(0x1F));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
switch (instr->op()) {
|
2012-11-05 13:28:10 +00:00
|
|
|
|
case Token::ROR:
|
|
|
|
|
__ mov(result, Operand(left, ROR, scratch));
|
|
|
|
|
break;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
case Token::SAR:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ mov(result, Operand(left, ASR, scratch));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
case Token::SHR:
|
|
|
|
|
if (instr->can_deopt()) {
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ mov(result, Operand(left, LSR, scratch), SetCC);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
DeoptimizeIf(mi, instr->environment());
|
|
|
|
|
} else {
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ mov(result, Operand(left, LSR, scratch));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case Token::SHL:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ mov(result, Operand(left, LSL, scratch));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2011-06-29 10:51:06 +00:00
|
|
|
|
// Mask the right_op operand.
|
|
|
|
|
int value = ToInteger32(LConstantOperand::cast(right_op));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
|
|
|
|
|
switch (instr->op()) {
|
2012-11-05 13:28:10 +00:00
|
|
|
|
case Token::ROR:
|
|
|
|
|
if (shift_count != 0) {
|
|
|
|
|
__ mov(result, Operand(left, ROR, shift_count));
|
|
|
|
|
} else {
|
|
|
|
|
__ Move(result, left);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
case Token::SAR:
|
|
|
|
|
if (shift_count != 0) {
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ mov(result, Operand(left, ASR, shift_count));
|
|
|
|
|
} else {
|
|
|
|
|
__ Move(result, left);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case Token::SHR:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
if (shift_count != 0) {
|
|
|
|
|
__ mov(result, Operand(left, LSR, shift_count));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
2011-06-29 10:51:06 +00:00
|
|
|
|
if (instr->can_deopt()) {
|
|
|
|
|
__ tst(left, Operand(0x80000000));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
__ Move(result, left);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case Token::SHL:
|
|
|
|
|
if (shift_count != 0) {
|
2013-07-26 13:35:44 +00:00
|
|
|
|
if (instr->hydrogen_value()->representation().IsSmi() &&
|
|
|
|
|
instr->can_deopt()) {
|
2013-07-30 13:30:03 +00:00
|
|
|
|
if (shift_count != 1) {
|
|
|
|
|
__ mov(result, Operand(left, LSL, shift_count - 1));
|
2013-07-31 12:46:54 +00:00
|
|
|
|
__ SmiTag(result, result, SetCC);
|
|
|
|
|
} else {
|
|
|
|
|
__ SmiTag(result, left, SetCC);
|
2013-07-30 13:30:03 +00:00
|
|
|
|
}
|
2013-07-26 13:35:44 +00:00
|
|
|
|
DeoptimizeIf(vs, instr->environment());
|
|
|
|
|
} else {
|
|
|
|
|
__ mov(result, Operand(left, LSL, shift_count));
|
|
|
|
|
}
|
2011-06-29 10:51:06 +00:00
|
|
|
|
} else {
|
|
|
|
|
__ Move(result, left);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoSubI(LSubI* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* left = instr->left();
|
|
|
|
|
LOperand* right = instr->right();
|
2011-06-29 10:51:06 +00:00
|
|
|
|
LOperand* result = instr->result();
|
2011-03-25 07:41:35 +00:00
|
|
|
|
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
|
|
|
|
SBit set_cond = can_overflow ? SetCC : LeaveCC;
|
|
|
|
|
|
|
|
|
|
if (right->IsStackSlot() || right->IsArgument()) {
|
|
|
|
|
Register right_reg = EmitLoadRegister(right, ip);
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
|
2011-03-25 07:41:35 +00:00
|
|
|
|
} else {
|
|
|
|
|
ASSERT(right->IsRegister() || right->IsConstantOperand());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
|
2011-03-25 07:41:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (can_overflow) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
DeoptimizeIf(vs, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-20 14:46:16 +00:00
|
|
|
|
void LCodeGen::DoRSubI(LRSubI* instr) {
|
|
|
|
|
LOperand* left = instr->left();
|
|
|
|
|
LOperand* right = instr->right();
|
|
|
|
|
LOperand* result = instr->result();
|
|
|
|
|
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
|
|
|
|
SBit set_cond = can_overflow ? SetCC : LeaveCC;
|
|
|
|
|
|
|
|
|
|
if (right->IsStackSlot() || right->IsArgument()) {
|
|
|
|
|
Register right_reg = EmitLoadRegister(right, ip);
|
|
|
|
|
__ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
|
|
|
|
|
} else {
|
|
|
|
|
ASSERT(right->IsRegister() || right->IsConstantOperand());
|
|
|
|
|
__ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (can_overflow) {
|
|
|
|
|
DeoptimizeIf(vs, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoConstantI(LConstantI* instr) {
|
2013-05-29 10:47:55 +00:00
|
|
|
|
__ mov(ToRegister(instr->result()), Operand(instr->value()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoConstantS(LConstantS* instr) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ mov(ToRegister(instr->result()), Operand(instr->value()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoConstantD(LConstantD* instr) {
|
2011-01-18 11:31:17 +00:00
|
|
|
|
ASSERT(instr->result()->IsDoubleRegister());
|
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
|
|
|
|
double v = instr->value();
|
2012-09-25 14:32:07 +00:00
|
|
|
|
__ Vmov(result, v, scratch0());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-07-29 13:56:51 +00:00
|
|
|
|
void LCodeGen::DoConstantE(LConstantE* instr) {
|
|
|
|
|
__ mov(ToRegister(instr->result()), Operand(instr->value()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoConstantT(LConstantT* instr) {
|
2013-09-09 07:57:23 +00:00
|
|
|
|
Handle<Object> value = instr->value(isolate());
|
2013-06-03 15:32:22 +00:00
|
|
|
|
AllowDeferredHandleDereference smi_check;
|
2013-07-24 12:34:50 +00:00
|
|
|
|
__ LoadObject(ToRegister(instr->result()), value);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-08-28 14:20:50 +00:00
|
|
|
|
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
|
|
|
|
|
Register result = ToRegister(instr->result());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register map = ToRegister(instr->value());
|
2012-08-28 14:20:50 +00:00
|
|
|
|
__ EnumLength(result, map);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-06-20 10:19:00 +00:00
|
|
|
|
void LCodeGen::DoElementsKind(LElementsKind* instr) {
|
|
|
|
|
Register result = ToRegister(instr->result());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-06-20 10:19:00 +00:00
|
|
|
|
|
|
|
|
|
// Load map into |result|.
|
|
|
|
|
__ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
|
|
|
|
|
// Load the map's "bit field 2" into |result|. We only need the first byte,
|
|
|
|
|
// but the following bit field extraction takes care of that anyway.
|
|
|
|
|
__ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
|
|
|
|
|
// Retrieve elements_kind from bit field 2.
|
|
|
|
|
__ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoValueOf(LValueOf* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-01-11 14:01:53 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register map = ToRegister(instr->temp());
|
2011-01-11 14:01:53 +00:00
|
|
|
|
Label done;
|
|
|
|
|
|
2013-06-26 17:37:55 +00:00
|
|
|
|
if (!instr->hydrogen()->value()->IsHeapObject()) {
|
|
|
|
|
// If the object is a smi return the object.
|
|
|
|
|
__ SmiTst(input);
|
|
|
|
|
__ Move(result, input, eq);
|
|
|
|
|
__ b(eq, &done);
|
|
|
|
|
}
|
2011-01-11 14:01:53 +00:00
|
|
|
|
|
|
|
|
|
// If the object is not a value type, return the object.
|
|
|
|
|
__ CompareObjectType(input, map, map, JS_VALUE_TYPE);
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ Move(result, input, ne);
|
2011-01-11 14:01:53 +00:00
|
|
|
|
__ b(ne, &done);
|
|
|
|
|
__ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
|
|
|
|
|
|
|
|
|
|
__ bind(&done);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-03-09 11:11:55 +00:00
|
|
|
|
void LCodeGen::DoDateField(LDateField* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register object = ToRegister(instr->date());
|
2012-03-09 11:11:55 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register scratch = ToRegister(instr->temp());
|
2012-03-09 12:07:29 +00:00
|
|
|
|
Smi* index = instr->index();
|
|
|
|
|
Label runtime, done;
|
|
|
|
|
ASSERT(object.is(result));
|
|
|
|
|
ASSERT(object.is(r0));
|
|
|
|
|
ASSERT(!scratch.is(scratch0()));
|
|
|
|
|
ASSERT(!scratch.is(object));
|
2012-03-09 11:11:55 +00:00
|
|
|
|
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ SmiTst(object);
|
2012-09-10 08:35:26 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
2012-03-09 12:07:29 +00:00
|
|
|
|
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
|
2012-09-10 08:35:26 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
2012-03-09 11:11:55 +00:00
|
|
|
|
|
2012-03-09 12:07:29 +00:00
|
|
|
|
if (index->value() == 0) {
|
|
|
|
|
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
|
|
|
|
|
} else {
|
|
|
|
|
if (index->value() < JSDate::kFirstUncachedField) {
|
|
|
|
|
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
|
|
|
|
|
__ mov(scratch, Operand(stamp));
|
|
|
|
|
__ ldr(scratch, MemOperand(scratch));
|
|
|
|
|
__ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
|
|
|
|
|
__ cmp(scratch, scratch0());
|
|
|
|
|
__ b(ne, &runtime);
|
|
|
|
|
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
|
|
|
|
|
kPointerSize * index->value()));
|
|
|
|
|
__ jmp(&done);
|
|
|
|
|
}
|
|
|
|
|
__ bind(&runtime);
|
|
|
|
|
__ PrepareCallCFunction(2, scratch);
|
|
|
|
|
__ mov(r1, Operand(index));
|
|
|
|
|
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
|
|
|
|
|
__ bind(&done);
|
2012-03-09 11:11:55 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-12-05 15:49:22 +00:00
|
|
|
|
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
|
2013-05-23 09:51:06 +00:00
|
|
|
|
Register string = ToRegister(instr->string());
|
2013-09-13 09:13:58 +00:00
|
|
|
|
LOperand* index_op = instr->index();
|
2013-05-23 09:51:06 +00:00
|
|
|
|
Register value = ToRegister(instr->value());
|
2013-09-13 09:13:58 +00:00
|
|
|
|
Register scratch = scratch0();
|
2013-05-23 09:51:06 +00:00
|
|
|
|
String::Encoding encoding = instr->encoding();
|
|
|
|
|
|
|
|
|
|
if (FLAG_debug_code) {
|
2013-09-13 09:13:58 +00:00
|
|
|
|
__ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
|
|
|
|
|
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
2013-05-23 09:51:06 +00:00
|
|
|
|
|
2013-09-13 09:13:58 +00:00
|
|
|
|
__ and_(scratch, scratch,
|
|
|
|
|
Operand(kStringRepresentationMask | kStringEncodingMask));
|
2013-05-23 09:51:06 +00:00
|
|
|
|
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
|
|
|
|
|
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
|
2013-09-13 09:13:58 +00:00
|
|
|
|
__ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
|
|
|
|
|
? one_byte_seq_type : two_byte_seq_type));
|
2013-08-02 09:53:11 +00:00
|
|
|
|
__ Check(eq, kUnexpectedStringType);
|
2013-05-23 09:51:06 +00:00
|
|
|
|
}
|
|
|
|
|
|
2013-09-13 09:13:58 +00:00
|
|
|
|
if (index_op->IsConstantOperand()) {
|
|
|
|
|
int constant_index = ToInteger32(LConstantOperand::cast(index_op));
|
|
|
|
|
if (encoding == String::ONE_BYTE_ENCODING) {
|
|
|
|
|
__ strb(value,
|
|
|
|
|
FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
|
|
|
|
|
} else {
|
|
|
|
|
__ strh(value,
|
|
|
|
|
FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
|
|
|
|
|
}
|
2013-05-23 09:51:06 +00:00
|
|
|
|
} else {
|
2013-09-13 09:13:58 +00:00
|
|
|
|
Register index = ToRegister(index_op);
|
|
|
|
|
if (encoding == String::ONE_BYTE_ENCODING) {
|
|
|
|
|
__ add(scratch, string, Operand(index));
|
|
|
|
|
__ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
|
|
|
|
|
} else {
|
|
|
|
|
__ add(scratch, string, Operand(index, LSL, 1));
|
|
|
|
|
__ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
|
|
|
|
|
}
|
2013-05-23 09:51:06 +00:00
|
|
|
|
}
|
2012-12-05 15:49:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoThrow(LThrow* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input_reg = EmitLoadRegister(instr->value(), ip);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ push(input_reg);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
CallRuntime(Runtime::kThrow, 1, instr);
|
|
|
|
|
|
|
|
|
|
if (FLAG_debug_code) {
|
|
|
|
|
__ stop("Unreachable code.");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoAddI(LAddI* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* left = instr->left();
|
|
|
|
|
LOperand* right = instr->right();
|
2011-06-29 10:51:06 +00:00
|
|
|
|
LOperand* result = instr->result();
|
2011-03-25 07:41:35 +00:00
|
|
|
|
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
|
|
|
|
SBit set_cond = can_overflow ? SetCC : LeaveCC;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2011-03-25 07:41:35 +00:00
|
|
|
|
if (right->IsStackSlot() || right->IsArgument()) {
|
|
|
|
|
Register right_reg = EmitLoadRegister(right, ip);
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
|
2011-03-25 07:41:35 +00:00
|
|
|
|
} else {
|
|
|
|
|
ASSERT(right->IsRegister() || right->IsConstantOperand());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
|
2011-03-25 07:41:35 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2011-03-25 07:41:35 +00:00
|
|
|
|
if (can_overflow) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
DeoptimizeIf(vs, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-08-06 14:28:27 +00:00
|
|
|
|
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* left = instr->left();
|
|
|
|
|
LOperand* right = instr->right();
|
2012-08-06 14:28:27 +00:00
|
|
|
|
HMathMinMax::Operation operation = instr->hydrogen()->operation();
|
2013-07-25 11:53:38 +00:00
|
|
|
|
if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
|
2013-04-17 15:21:01 +00:00
|
|
|
|
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
|
2012-08-06 14:28:27 +00:00
|
|
|
|
Register left_reg = ToRegister(left);
|
|
|
|
|
Operand right_op = (right->IsRegister() || right->IsConstantOperand())
|
|
|
|
|
? ToOperand(right)
|
|
|
|
|
: Operand(EmitLoadRegister(right, ip));
|
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
|
|
|
|
__ cmp(left_reg, right_op);
|
2013-04-17 15:21:01 +00:00
|
|
|
|
__ Move(result_reg, left_reg, condition);
|
2012-08-06 14:28:27 +00:00
|
|
|
|
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
|
|
|
|
|
} else {
|
|
|
|
|
ASSERT(instr->hydrogen()->representation().IsDouble());
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister left_reg = ToDoubleRegister(left);
|
|
|
|
|
DwVfpRegister right_reg = ToDoubleRegister(right);
|
|
|
|
|
DwVfpRegister result_reg = ToDoubleRegister(instr->result());
|
2013-04-17 15:21:01 +00:00
|
|
|
|
Label result_is_nan, return_left, return_right, check_zero, done;
|
2012-08-06 14:28:27 +00:00
|
|
|
|
__ VFPCompareAndSetFlags(left_reg, right_reg);
|
2013-04-17 15:21:01 +00:00
|
|
|
|
if (operation == HMathMinMax::kMathMin) {
|
|
|
|
|
__ b(mi, &return_left);
|
|
|
|
|
__ b(gt, &return_right);
|
|
|
|
|
} else {
|
|
|
|
|
__ b(mi, &return_right);
|
|
|
|
|
__ b(gt, &return_left);
|
|
|
|
|
}
|
|
|
|
|
__ b(vs, &result_is_nan);
|
|
|
|
|
// Left equals right => check for -0.
|
2012-08-06 14:28:27 +00:00
|
|
|
|
__ VFPCompareAndSetFlags(left_reg, 0.0);
|
2013-04-17 15:21:01 +00:00
|
|
|
|
if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
|
|
|
|
|
__ b(ne, &done); // left == right != 0.
|
|
|
|
|
} else {
|
|
|
|
|
__ b(ne, &return_left); // left == right != 0.
|
|
|
|
|
}
|
2012-08-06 14:28:27 +00:00
|
|
|
|
// At this point, both left and right are either 0 or -0.
|
|
|
|
|
if (operation == HMathMinMax::kMathMin) {
|
|
|
|
|
// We could use a single 'vorr' instruction here if we had NEON support.
|
|
|
|
|
__ vneg(left_reg, left_reg);
|
|
|
|
|
__ vsub(result_reg, left_reg, right_reg);
|
|
|
|
|
__ vneg(result_reg, result_reg);
|
|
|
|
|
} else {
|
|
|
|
|
// Since we operate on +0 and/or -0, vadd and vand have the same effect;
|
|
|
|
|
// the decision for vadd is easy because vand is a NEON instruction.
|
|
|
|
|
__ vadd(result_reg, left_reg, right_reg);
|
|
|
|
|
}
|
2013-04-17 15:21:01 +00:00
|
|
|
|
__ b(&done);
|
|
|
|
|
|
|
|
|
|
__ bind(&result_is_nan);
|
|
|
|
|
__ vadd(result_reg, left_reg, right_reg);
|
|
|
|
|
__ b(&done);
|
2012-08-06 14:28:27 +00:00
|
|
|
|
|
|
|
|
|
__ bind(&return_right);
|
2013-04-17 15:21:01 +00:00
|
|
|
|
__ Move(result_reg, right_reg);
|
|
|
|
|
if (!left_reg.is(result_reg)) {
|
|
|
|
|
__ b(&done);
|
2012-08-06 14:28:27 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__ bind(&return_left);
|
2013-04-17 15:21:01 +00:00
|
|
|
|
__ Move(result_reg, left_reg);
|
|
|
|
|
|
2012-08-06 14:28:27 +00:00
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister left = ToDoubleRegister(instr->left());
|
|
|
|
|
DwVfpRegister right = ToDoubleRegister(instr->right());
|
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
switch (instr->op()) {
|
|
|
|
|
case Token::ADD:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ vadd(result, left, right);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
case Token::SUB:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ vsub(result, left, right);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
case Token::MUL:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ vmul(result, left, right);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
case Token::DIV:
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ vdiv(result, left, right);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
case Token::MOD: {
|
2011-02-03 08:59:14 +00:00
|
|
|
|
// Save r0-r3 on the stack.
|
|
|
|
|
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
|
|
|
|
|
|
2011-04-27 14:29:25 +00:00
|
|
|
|
__ PrepareCallCFunction(0, 2, scratch0());
|
|
|
|
|
__ SetCallCDoubleArguments(left, right);
|
2011-03-22 13:20:04 +00:00
|
|
|
|
__ CallCFunction(
|
2011-04-27 14:29:25 +00:00
|
|
|
|
ExternalReference::double_fp_operation(Token::MOD, isolate()),
|
|
|
|
|
0, 2);
|
2011-02-03 08:59:14 +00:00
|
|
|
|
// Move the result in the double result register.
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ GetCFunctionDoubleResult(result);
|
2011-02-03 08:59:14 +00:00
|
|
|
|
|
|
|
|
|
// Restore r0-r3.
|
|
|
|
|
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2012-09-17 10:54:26 +00:00
|
|
|
|
ASSERT(ToRegister(instr->left()).is(r1));
|
|
|
|
|
ASSERT(ToRegister(instr->right()).is(r0));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
2011-05-24 12:20:16 +00:00
|
|
|
|
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
|
2012-06-12 17:26:28 +00:00
|
|
|
|
// Block literal pool emission to ensure nop indicating no inlined smi code
|
|
|
|
|
// is in the correct position.
|
|
|
|
|
Assembler::BlockConstPoolScope block_const_pool(masm());
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
Avoid patching code after the call to binary operation stub in optimized code
This patch just adds a nop after the call to the binary operation stub in optimized code to avoid the patching for the inlined smi case used in the full code generator to kick in if the next instruction generated by the lithium code generator should accidentially enable that. For calls generated by CallCodeGeneric this was already handled on Intel platforms, but missing on ARM.
On IA-32 I did also try to check for whether the code containing the call was optimized (patch below), but that caused regressions on some benchmarks.
diff --git src/ia32/ic-ia32.cc src/ia32/ic-ia32.cc
index 5f143b1..f70e208 100644
--- src/ia32/ic-ia32.cc
+++ src/ia32/ic-ia32.cc
@@ -1603,12 +1603,18 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address());
+ PatchInlinedSmiCode(address(), isolate());
}
}
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, Isolate* isolate) {
+ // Never patch in optimized code.
+ Code* code = isolate->pc_to_code_cache()->GetCacheEntry(address)->code;
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ return;
+ }
+
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
diff --git src/ic.cc src/ic.cc
index f70f75a..62e79da 100644
--- src/ic.cc
+++ src/ic.cc
@@ -2384,7 +2384,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
// Activate inlined smi code.
if (previous_type == BinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address());
+ PatchInlinedSmiCode(ic.address(), isolate);
}
}
diff --git src/ic.h src/ic.h
index 11c2e3a..9ef4b20 100644
--- src/ic.h
+++ src/ic.h
@@ -721,7 +721,7 @@ class CompareIC: public IC {
};
// Helper for BinaryOpIC and CompareIC.
-void PatchInlinedSmiCode(Address address);
+void PatchInlinedSmiCode(Address address, Isolate* isolate);
} } // namespace v8::internal
R=danno@chromium.org
BUG=none
TEST=none
Review URL: http://codereview.chromium.org//7350015
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8623 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-07-13 09:31:17 +00:00
|
|
|
|
__ nop(); // Signals no inlined code.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-06-20 11:50:50 +00:00
|
|
|
|
template<class InstrType>
|
2013-08-14 08:54:27 +00:00
|
|
|
|
void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
|
2013-07-03 19:57:25 +00:00
|
|
|
|
int left_block = instr->TrueDestination(chunk_);
|
2013-07-05 09:26:22 +00:00
|
|
|
|
int right_block = instr->FalseDestination(chunk_);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-04-19 12:02:12 +00:00
|
|
|
|
int next_block = GetNextEmittedBlock();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-08-14 08:54:27 +00:00
|
|
|
|
if (right_block == left_block || condition == al) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
EmitGoto(left_block);
|
|
|
|
|
} else if (left_block == next_block) {
|
2013-08-14 08:54:27 +00:00
|
|
|
|
__ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else if (right_block == next_block) {
|
2013-08-14 08:54:27 +00:00
|
|
|
|
__ b(condition, chunk_->GetAssemblyLabel(left_block));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
2013-08-14 08:54:27 +00:00
|
|
|
|
__ b(condition, chunk_->GetAssemblyLabel(left_block));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ b(chunk_->GetAssemblyLabel(right_block));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-14 08:54:27 +00:00
|
|
|
|
template<class InstrType>
|
|
|
|
|
void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
|
|
|
|
|
int false_block = instr->FalseDestination(chunk_);
|
|
|
|
|
__ b(condition, chunk_->GetAssemblyLabel(false_block));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-05-15 14:24:47 +00:00
|
|
|
|
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
|
|
|
|
|
__ stop("LBreak");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoBranch(LBranch* instr) {
|
2011-06-30 14:19:52 +00:00
|
|
|
|
Representation r = instr->hydrogen()->value()->representation();
|
2013-05-23 08:32:07 +00:00
|
|
|
|
if (r.IsInteger32() || r.IsSmi()) {
|
2013-05-29 14:49:28 +00:00
|
|
|
|
ASSERT(!info()->IsStub());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register reg = ToRegister(instr->value());
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(reg, Operand::Zero());
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, ne);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else if (r.IsDouble()) {
|
2013-05-29 14:49:28 +00:00
|
|
|
|
ASSERT(!info()->IsStub());
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister reg = ToDoubleRegister(instr->value());
|
2011-01-11 12:45:25 +00:00
|
|
|
|
// Test the double value. Zero and NaN are false.
|
2013-04-17 15:21:01 +00:00
|
|
|
|
__ VFPCompareAndSetFlags(reg, 0.0);
|
2013-06-20 13:51:03 +00:00
|
|
|
|
__ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, ne);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
|
|
|
|
ASSERT(r.IsTagged());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register reg = ToRegister(instr->value());
|
2011-08-09 07:59:00 +00:00
|
|
|
|
HType type = instr->hydrogen()->value()->type();
|
|
|
|
|
if (type.IsBoolean()) {
|
2013-05-29 14:49:28 +00:00
|
|
|
|
ASSERT(!info()->IsStub());
|
2011-08-09 07:59:00 +00:00
|
|
|
|
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, eq);
|
2011-08-09 07:59:00 +00:00
|
|
|
|
} else if (type.IsSmi()) {
|
2013-05-29 14:49:28 +00:00
|
|
|
|
ASSERT(!info()->IsStub());
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(reg, Operand::Zero());
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, ne);
|
2013-06-20 13:51:03 +00:00
|
|
|
|
} else if (type.IsJSArray()) {
|
|
|
|
|
ASSERT(!info()->IsStub());
|
|
|
|
|
EmitBranch(instr, al);
|
|
|
|
|
} else if (type.IsHeapNumber()) {
|
|
|
|
|
ASSERT(!info()->IsStub());
|
|
|
|
|
DwVfpRegister dbl_scratch = double_scratch0();
|
|
|
|
|
__ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
|
|
|
|
// Test the double value. Zero and NaN are false.
|
|
|
|
|
__ VFPCompareAndSetFlags(dbl_scratch, 0.0);
|
|
|
|
|
__ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
|
|
|
|
|
EmitBranch(instr, ne);
|
|
|
|
|
} else if (type.IsString()) {
|
|
|
|
|
ASSERT(!info()->IsStub());
|
|
|
|
|
__ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
|
|
|
|
|
__ cmp(ip, Operand::Zero());
|
|
|
|
|
EmitBranch(instr, ne);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
2011-08-09 07:59:00 +00:00
|
|
|
|
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
|
|
|
|
|
// Avoid deopts in the case where we've never executed this path before.
|
2013-06-20 13:51:03 +00:00
|
|
|
|
if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
|
2011-08-09 07:59:00 +00:00
|
|
|
|
|
|
|
|
|
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
|
|
|
|
|
// undefined -> false.
|
|
|
|
|
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(eq, instr->FalseLabel(chunk_));
|
2011-08-09 07:59:00 +00:00
|
|
|
|
}
|
|
|
|
|
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
|
|
|
|
|
// Boolean -> its value.
|
|
|
|
|
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(eq, instr->TrueLabel(chunk_));
|
2011-08-09 07:59:00 +00:00
|
|
|
|
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(eq, instr->FalseLabel(chunk_));
|
2011-08-09 07:59:00 +00:00
|
|
|
|
}
|
|
|
|
|
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
|
|
|
|
|
// 'null' -> false.
|
|
|
|
|
__ CompareRoot(reg, Heap::kNullValueRootIndex);
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(eq, instr->FalseLabel(chunk_));
|
2011-08-09 07:59:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (expected.Contains(ToBooleanStub::SMI)) {
|
|
|
|
|
// Smis: 0 -> false, all other -> true.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(reg, Operand::Zero());
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(eq, instr->FalseLabel(chunk_));
|
|
|
|
|
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
|
2011-08-09 07:59:00 +00:00
|
|
|
|
} else if (expected.NeedsMap()) {
|
|
|
|
|
// If we need a map later and have a Smi -> deopt.
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ SmiTst(reg);
|
2011-08-09 07:59:00 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const Register map = scratch0();
|
|
|
|
|
if (expected.NeedsMap()) {
|
|
|
|
|
__ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
|
2011-08-11 07:22:16 +00:00
|
|
|
|
|
|
|
|
|
if (expected.CanBeUndetectable()) {
|
|
|
|
|
// Undetectable -> false.
|
|
|
|
|
__ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
|
|
|
|
|
__ tst(ip, Operand(1 << Map::kIsUndetectable));
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(ne, instr->FalseLabel(chunk_));
|
2011-08-11 07:22:16 +00:00
|
|
|
|
}
|
2011-08-09 07:59:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
|
|
|
|
|
// spec object -> true.
|
|
|
|
|
__ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(ge, instr->TrueLabel(chunk_));
|
2011-08-09 07:59:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (expected.Contains(ToBooleanStub::STRING)) {
|
|
|
|
|
// String value -> false iff empty.
|
|
|
|
|
Label not_string;
|
|
|
|
|
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
|
|
|
|
|
__ b(ge, ¬_string);
|
|
|
|
|
__ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(ip, Operand::Zero());
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(ne, instr->TrueLabel(chunk_));
|
|
|
|
|
__ b(instr->FalseLabel(chunk_));
|
2011-08-09 07:59:00 +00:00
|
|
|
|
__ bind(¬_string);
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-22 16:33:50 +00:00
|
|
|
|
if (expected.Contains(ToBooleanStub::SYMBOL)) {
|
|
|
|
|
// Symbol value -> true.
|
|
|
|
|
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(eq, instr->TrueLabel(chunk_));
|
2013-03-22 16:33:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
2011-08-09 07:59:00 +00:00
|
|
|
|
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
|
|
|
|
|
// heap number -> false iff +0, -0, or NaN.
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister dbl_scratch = double_scratch0();
|
2011-08-09 07:59:00 +00:00
|
|
|
|
Label not_heap_number;
|
|
|
|
|
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
|
|
|
|
|
__ b(ne, ¬_heap_number);
|
|
|
|
|
__ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
|
|
|
|
__ VFPCompareAndSetFlags(dbl_scratch, 0.0);
|
2013-04-17 15:21:01 +00:00
|
|
|
|
__ cmp(r0, r0, vs); // NaN -> false.
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
|
|
|
|
|
__ b(instr->TrueLabel(chunk_));
|
2011-08-09 07:59:00 +00:00
|
|
|
|
__ bind(¬_heap_number);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-20 13:51:03 +00:00
|
|
|
|
if (!expected.IsGeneric()) {
|
|
|
|
|
// We've seen something for the first time -> deopt.
|
|
|
|
|
// This can only happen if we are not generic already.
|
|
|
|
|
DeoptimizeIf(al, instr->environment());
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-06-27 12:12:27 +00:00
|
|
|
|
void LCodeGen::EmitGoto(int block) {
|
2013-04-22 09:48:35 +00:00
|
|
|
|
if (!IsNextEmittedBlock(block)) {
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoGoto(LGoto* instr) {
|
2011-06-27 12:12:27 +00:00
|
|
|
|
EmitGoto(instr->block_id());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
|
2011-01-26 08:32:54 +00:00
|
|
|
|
Condition cond = kNoCondition;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
switch (op) {
|
|
|
|
|
case Token::EQ:
|
|
|
|
|
case Token::EQ_STRICT:
|
|
|
|
|
cond = eq;
|
|
|
|
|
break;
|
|
|
|
|
case Token::LT:
|
|
|
|
|
cond = is_unsigned ? lo : lt;
|
|
|
|
|
break;
|
|
|
|
|
case Token::GT:
|
|
|
|
|
cond = is_unsigned ? hi : gt;
|
|
|
|
|
break;
|
|
|
|
|
case Token::LTE:
|
|
|
|
|
cond = is_unsigned ? ls : le;
|
|
|
|
|
break;
|
|
|
|
|
case Token::GTE:
|
|
|
|
|
cond = is_unsigned ? hs : ge;
|
|
|
|
|
break;
|
|
|
|
|
case Token::IN:
|
|
|
|
|
case Token::INSTANCEOF:
|
|
|
|
|
default:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
}
|
|
|
|
|
return cond;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-07-05 10:40:14 +00:00
|
|
|
|
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* left = instr->left();
|
|
|
|
|
LOperand* right = instr->right();
|
2011-10-20 10:26:45 +00:00
|
|
|
|
Condition cond = TokenToCondition(instr->op(), false);
|
|
|
|
|
|
|
|
|
|
if (left->IsConstantOperand() && right->IsConstantOperand()) {
|
|
|
|
|
// We can statically evaluate the comparison.
|
|
|
|
|
double left_val = ToDouble(LConstantOperand::cast(left));
|
|
|
|
|
double right_val = ToDouble(LConstantOperand::cast(right));
|
2013-06-20 11:50:50 +00:00
|
|
|
|
int next_block = EvalComparison(instr->op(), left_val, right_val) ?
|
|
|
|
|
instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
|
2011-10-20 10:26:45 +00:00
|
|
|
|
EmitGoto(next_block);
|
2011-02-04 11:22:18 +00:00
|
|
|
|
} else {
|
2011-10-20 10:26:45 +00:00
|
|
|
|
if (instr->is_double()) {
|
|
|
|
|
// Compare left and right operands as doubles and load the
|
|
|
|
|
// resulting flags into the normal status register.
|
|
|
|
|
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
|
|
|
|
|
// If a NaN is involved, i.e. the result is unordered (V set),
|
|
|
|
|
// jump to false block label.
|
2013-06-20 11:50:50 +00:00
|
|
|
|
__ b(vs, instr->FalseLabel(chunk_));
|
2011-10-20 10:26:45 +00:00
|
|
|
|
} else {
|
|
|
|
|
if (right->IsConstantOperand()) {
|
2013-05-28 09:24:39 +00:00
|
|
|
|
int32_t value = ToInteger32(LConstantOperand::cast(right));
|
|
|
|
|
if (instr->hydrogen_value()->representation().IsSmi()) {
|
|
|
|
|
__ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
|
|
|
|
|
} else {
|
|
|
|
|
__ cmp(ToRegister(left), Operand(value));
|
|
|
|
|
}
|
2011-10-20 10:26:45 +00:00
|
|
|
|
} else if (left->IsConstantOperand()) {
|
2013-05-28 09:24:39 +00:00
|
|
|
|
int32_t value = ToInteger32(LConstantOperand::cast(left));
|
|
|
|
|
if (instr->hydrogen_value()->representation().IsSmi()) {
|
|
|
|
|
__ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
|
|
|
|
|
} else {
|
|
|
|
|
__ cmp(ToRegister(right), Operand(value));
|
|
|
|
|
}
|
2011-10-20 10:26:45 +00:00
|
|
|
|
// We transposed the operands. Reverse the condition.
|
|
|
|
|
cond = ReverseCondition(cond);
|
|
|
|
|
} else {
|
|
|
|
|
__ cmp(ToRegister(left), ToRegister(right));
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, cond);
|
2011-02-04 11:22:18 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-06-21 11:18:15 +00:00
|
|
|
|
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register left = ToRegister(instr->left());
|
|
|
|
|
Register right = ToRegister(instr->right());
|
2011-01-19 11:56:32 +00:00
|
|
|
|
|
|
|
|
|
__ cmp(left, Operand(right));
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, eq);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-14 08:54:27 +00:00
|
|
|
|
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
|
|
|
|
|
if (instr->hydrogen()->representation().IsTagged()) {
|
|
|
|
|
Register input_reg = ToRegister(instr->object());
|
|
|
|
|
__ mov(ip, Operand(factory()->the_hole_value()));
|
|
|
|
|
__ cmp(input_reg, ip);
|
|
|
|
|
EmitBranch(instr, eq);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
DwVfpRegister input_reg = ToDoubleRegister(instr->object());
|
|
|
|
|
__ VFPCompareAndSetFlags(input_reg, input_reg);
|
|
|
|
|
EmitFalseBranch(instr, vc);
|
|
|
|
|
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
__ VmovHigh(scratch, input_reg);
|
|
|
|
|
__ cmp(scratch, Operand(kHoleNanUpper32));
|
|
|
|
|
EmitBranch(instr, eq);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-15 12:32:19 +00:00
|
|
|
|
Condition LCodeGen::EmitIsObject(Register input,
|
|
|
|
|
Register temp1,
|
|
|
|
|
Label* is_not_object,
|
|
|
|
|
Label* is_object) {
|
2011-06-28 14:21:55 +00:00
|
|
|
|
Register temp2 = scratch0();
|
2011-01-26 07:44:45 +00:00
|
|
|
|
__ JumpIfSmi(input, is_not_object);
|
2011-01-24 12:28:38 +00:00
|
|
|
|
|
2011-06-28 14:21:55 +00:00
|
|
|
|
__ LoadRoot(temp2, Heap::kNullValueRootIndex);
|
|
|
|
|
__ cmp(input, temp2);
|
2011-01-24 12:28:38 +00:00
|
|
|
|
__ b(eq, is_object);
|
|
|
|
|
|
|
|
|
|
// Load map.
|
|
|
|
|
__ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
|
|
|
|
|
// Undetectable objects behave like undefined.
|
|
|
|
|
__ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
|
|
|
|
|
__ tst(temp2, Operand(1 << Map::kIsUndetectable));
|
|
|
|
|
__ b(ne, is_not_object);
|
|
|
|
|
|
|
|
|
|
// Load instance type and check that it is in object type range.
|
|
|
|
|
__ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
|
Implement set trap for proxies, and revamp class hierarchy in preparation:
- Introduce a class JSReceiver, that is a common superclass of JSObject and
JSProxy. Use JSReceiver where appropriate (probably lots of places that we
still have to migrate, but we will find those later with proxy test suite).
- Move appropriate methods to JSReceiver class (SetProperty,
GetPropertyAttribute, Get/SetPrototype, Lookup, and so on).
- Introduce new JSFunctionProxy subclass of JSProxy. Currently only a stub.
- Overhaul enum InstanceType:
* Introduce FIRST/LAST_SPEC_OBJECT_TYPE that ranges over all types that
represent JS objects, and use that consistently to check language types.
* Rename FIRST/LAST_JS_OBJECT_TYPE and FIRST/LAST_FUNCTION_CLASS_TYPE
to FIRST/LAST_[NON]CALLABLE_SPEC_OBJECT_TYPE for clarity.
* Eliminate the overlap over JS_REGEXP_TYPE.
* Also replace FIRST_JS_OBJECT with FIRST_JS_RECEIVER, but only use it where
we exclusively talk about the internal representation type.
* Insert JS_PROXY and JS_FUNCTION_PROXY in the appropriate places.
- Fix all checks concerning classification, especially for functions, to
use the CALLABLE_SPEC_OBJECT range (that includes funciton proxies).
- Handle proxies in SetProperty (that was the easiest part :) ).
- A few simple test cases.
R=kmillikin@chromium.org
Review URL: http://codereview.chromium.org/6992072
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8126 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-05-31 16:38:40 +00:00
|
|
|
|
__ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
|
2011-01-24 12:28:38 +00:00
|
|
|
|
__ b(lt, is_not_object);
|
Implement set trap for proxies, and revamp class hierarchy in preparation:
- Introduce a class JSReceiver, that is a common superclass of JSObject and
JSProxy. Use JSReceiver where appropriate (probably lots of places that we
still have to migrate, but we will find those later with proxy test suite).
- Move appropriate methods to JSReceiver class (SetProperty,
GetPropertyAttribute, Get/SetPrototype, Lookup, and so on).
- Introduce new JSFunctionProxy subclass of JSProxy. Currently only a stub.
- Overhaul enum InstanceType:
* Introduce FIRST/LAST_SPEC_OBJECT_TYPE that ranges over all types that
represent JS objects, and use that consistently to check language types.
* Rename FIRST/LAST_JS_OBJECT_TYPE and FIRST/LAST_FUNCTION_CLASS_TYPE
to FIRST/LAST_[NON]CALLABLE_SPEC_OBJECT_TYPE for clarity.
* Eliminate the overlap over JS_REGEXP_TYPE.
* Also replace FIRST_JS_OBJECT with FIRST_JS_RECEIVER, but only use it where
we exclusively talk about the internal representation type.
* Insert JS_PROXY and JS_FUNCTION_PROXY in the appropriate places.
- Fix all checks concerning classification, especially for functions, to
use the CALLABLE_SPEC_OBJECT range (that includes funciton proxies).
- Handle proxies in SetProperty (that was the easiest part :) ).
- A few simple test cases.
R=kmillikin@chromium.org
Review URL: http://codereview.chromium.org/6992072
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8126 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-05-31 16:38:40 +00:00
|
|
|
|
__ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
|
2011-01-24 12:28:38 +00:00
|
|
|
|
return le;
|
2010-12-15 12:32:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register reg = ToRegister(instr->value());
|
|
|
|
|
Register temp1 = ToRegister(instr->temp());
|
2011-01-24 12:28:38 +00:00
|
|
|
|
|
|
|
|
|
Condition true_cond =
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitIsObject(reg, temp1,
|
|
|
|
|
instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
|
2011-01-24 12:28:38 +00:00
|
|
|
|
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, true_cond);
|
2010-12-15 12:32:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-11-17 13:57:55 +00:00
|
|
|
|
Condition LCodeGen::EmitIsString(Register input,
|
|
|
|
|
Register temp1,
|
2013-06-26 17:37:55 +00:00
|
|
|
|
Label* is_not_string,
|
|
|
|
|
SmiCheck check_needed = INLINE_SMI_CHECK) {
|
|
|
|
|
if (check_needed == INLINE_SMI_CHECK) {
|
|
|
|
|
__ JumpIfSmi(input, is_not_string);
|
|
|
|
|
}
|
2011-11-17 13:57:55 +00:00
|
|
|
|
__ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
|
|
|
|
|
|
|
|
|
|
return lt;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register reg = ToRegister(instr->value());
|
|
|
|
|
Register temp1 = ToRegister(instr->temp());
|
2011-11-17 13:57:55 +00:00
|
|
|
|
|
2013-06-26 17:37:55 +00:00
|
|
|
|
SmiCheck check_needed =
|
|
|
|
|
instr->hydrogen()->value()->IsHeapObject()
|
|
|
|
|
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
|
2011-11-17 13:57:55 +00:00
|
|
|
|
Condition true_cond =
|
2013-06-26 17:37:55 +00:00
|
|
|
|
EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
|
2011-11-17 13:57:55 +00:00
|
|
|
|
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, true_cond);
|
2011-11-17 13:57:55 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input_reg = EmitLoadRegister(instr->value(), ip);
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ SmiTst(input_reg);
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, eq);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-05-11 11:53:43 +00:00
|
|
|
|
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
|
|
|
|
Register temp = ToRegister(instr->temp());
|
2011-05-11 11:53:43 +00:00
|
|
|
|
|
2013-06-26 17:37:55 +00:00
|
|
|
|
if (!instr->hydrogen()->value()->IsHeapObject()) {
|
|
|
|
|
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
|
|
|
|
|
}
|
2011-05-11 11:53:43 +00:00
|
|
|
|
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
|
|
|
|
|
__ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
|
|
|
|
|
__ tst(temp, Operand(1 << Map::kIsUndetectable));
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, ne);
|
2011-05-11 11:53:43 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-11-17 13:57:55 +00:00
|
|
|
|
static Condition ComputeCompareCondition(Token::Value op) {
|
|
|
|
|
switch (op) {
|
|
|
|
|
case Token::EQ_STRICT:
|
|
|
|
|
case Token::EQ:
|
|
|
|
|
return eq;
|
|
|
|
|
case Token::LT:
|
|
|
|
|
return lt;
|
|
|
|
|
case Token::GT:
|
|
|
|
|
return gt;
|
|
|
|
|
case Token::LTE:
|
|
|
|
|
return le;
|
|
|
|
|
case Token::GTE:
|
|
|
|
|
return ge;
|
|
|
|
|
default:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
return kNoCondition;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-11-17 13:57:55 +00:00
|
|
|
|
Token::Value op = instr->op();
|
|
|
|
|
|
2013-02-27 12:33:24 +00:00
|
|
|
|
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
|
2011-11-17 13:57:55 +00:00
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
2013-01-07 09:43:12 +00:00
|
|
|
|
// This instruction also signals no smi code inlined.
|
|
|
|
|
__ cmp(r0, Operand::Zero());
|
2011-11-17 13:57:55 +00:00
|
|
|
|
|
|
|
|
|
Condition condition = ComputeCompareCondition(op);
|
|
|
|
|
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, condition);
|
2011-11-17 13:57:55 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-06-30 14:19:52 +00:00
|
|
|
|
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
|
2011-01-19 13:04:21 +00:00
|
|
|
|
InstanceType from = instr->from();
|
|
|
|
|
InstanceType to = instr->to();
|
|
|
|
|
if (from == FIRST_TYPE) return to;
|
|
|
|
|
ASSERT(from == to || to == LAST_TYPE);
|
|
|
|
|
return from;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-06-30 14:19:52 +00:00
|
|
|
|
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
|
2011-01-19 13:04:21 +00:00
|
|
|
|
InstanceType from = instr->from();
|
|
|
|
|
InstanceType to = instr->to();
|
|
|
|
|
if (from == to) return eq;
|
|
|
|
|
if (to == LAST_TYPE) return hs;
|
|
|
|
|
if (from == FIRST_TYPE) return ls;
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
return eq;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
|
2011-01-04 14:32:54 +00:00
|
|
|
|
Register scratch = scratch0();
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-06-26 17:37:55 +00:00
|
|
|
|
if (!instr->hydrogen()->value()->IsHeapObject()) {
|
|
|
|
|
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2011-01-24 09:43:14 +00:00
|
|
|
|
__ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, BranchCondition(instr->hydrogen()));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-02-16 08:21:45 +00:00
|
|
|
|
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-02-16 08:21:45 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
|
2012-10-12 11:09:14 +00:00
|
|
|
|
__ AssertString(input);
|
2011-03-03 09:33:08 +00:00
|
|
|
|
|
|
|
|
|
__ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
|
|
|
|
|
__ IndexFromHash(result, result);
|
2011-02-16 08:21:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoHasCachedArrayIndexAndBranch(
|
|
|
|
|
LHasCachedArrayIndexAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-02-15 10:22:24 +00:00
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
|
|
|
|
|
__ ldr(scratch,
|
2011-02-16 08:21:45 +00:00
|
|
|
|
FieldMemOperand(input, String::kHashFieldOffset));
|
2011-02-15 10:22:24 +00:00
|
|
|
|
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, eq);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-01-11 14:01:53 +00:00
|
|
|
|
// Branches to a label or falls through with the answer in flags. Trashes
|
2012-01-11 08:29:42 +00:00
|
|
|
|
// the temp registers, but not the input.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::EmitClassOfTest(Label* is_true,
|
|
|
|
|
Label* is_false,
|
|
|
|
|
Handle<String>class_name,
|
|
|
|
|
Register input,
|
|
|
|
|
Register temp,
|
|
|
|
|
Register temp2) {
|
2011-01-11 14:01:53 +00:00
|
|
|
|
ASSERT(!input.is(temp));
|
2012-01-11 08:29:42 +00:00
|
|
|
|
ASSERT(!input.is(temp2));
|
|
|
|
|
ASSERT(!temp.is(temp2));
|
|
|
|
|
|
2011-06-17 18:32:36 +00:00
|
|
|
|
__ JumpIfSmi(input, is_false);
|
2011-01-11 14:01:53 +00:00
|
|
|
|
|
2013-01-09 10:30:54 +00:00
|
|
|
|
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
|
2011-09-21 14:46:54 +00:00
|
|
|
|
// Assuming the following assertions, we can use the same compares to test
|
|
|
|
|
// for both being a function type and being in the object type range.
|
|
|
|
|
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
|
|
|
|
|
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
|
|
|
|
|
FIRST_SPEC_OBJECT_TYPE + 1);
|
|
|
|
|
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
|
|
|
|
|
LAST_SPEC_OBJECT_TYPE - 1);
|
|
|
|
|
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
|
|
|
|
|
__ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
|
|
|
|
|
__ b(lt, is_false);
|
|
|
|
|
__ b(eq, is_true);
|
|
|
|
|
__ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
|
|
|
|
|
__ b(eq, is_true);
|
2011-01-11 14:01:53 +00:00
|
|
|
|
} else {
|
2011-09-21 14:46:54 +00:00
|
|
|
|
// Faster code path to avoid two compares: subtract lower bound from the
|
|
|
|
|
// actual type and do a signed compare with the width of the type range.
|
|
|
|
|
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
|
|
|
|
|
__ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
|
|
|
|
|
__ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
|
|
|
|
|
__ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
|
|
|
|
|
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
|
|
|
|
|
__ b(gt, is_false);
|
2011-01-11 14:01:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
2011-09-21 14:46:54 +00:00
|
|
|
|
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
|
2011-01-11 14:01:53 +00:00
|
|
|
|
// Check if the constructor in the map is a function.
|
|
|
|
|
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
|
|
|
|
|
|
|
|
|
|
// Objects with a non-function constructor have class 'Object'.
|
|
|
|
|
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
|
2013-01-09 10:30:54 +00:00
|
|
|
|
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
|
2011-01-11 14:01:53 +00:00
|
|
|
|
__ b(ne, is_true);
|
|
|
|
|
} else {
|
|
|
|
|
__ b(ne, is_false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// temp now contains the constructor function. Grab the
|
|
|
|
|
// instance class name from there.
|
|
|
|
|
__ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
|
|
|
|
|
__ ldr(temp, FieldMemOperand(temp,
|
|
|
|
|
SharedFunctionInfo::kInstanceClassNameOffset));
|
2013-02-28 17:03:34 +00:00
|
|
|
|
// The class name we are testing against is internalized since it's a literal.
|
|
|
|
|
// The name in the constructor is internalized because of the way the context
|
|
|
|
|
// is booted. This routine isn't expected to work for random API-created
|
2011-01-11 14:01:53 +00:00
|
|
|
|
// classes and it doesn't have to because you can't access it with natives
|
2013-02-28 17:03:34 +00:00
|
|
|
|
// syntax. Since both sides are internalized it is sufficient to use an
|
|
|
|
|
// identity comparison.
|
2011-01-11 14:01:53 +00:00
|
|
|
|
__ cmp(temp, Operand(class_name));
|
|
|
|
|
// End with the answer in flags.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-01-11 14:01:53 +00:00
|
|
|
|
Register temp = scratch0();
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register temp2 = ToRegister(instr->temp());
|
2011-01-11 14:01:53 +00:00
|
|
|
|
Handle<String> class_name = instr->hydrogen()->class_name();
|
|
|
|
|
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
|
|
|
|
|
class_name, input, temp, temp2);
|
2011-01-11 14:01:53 +00:00
|
|
|
|
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, eq);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register reg = ToRegister(instr->value());
|
|
|
|
|
Register temp = ToRegister(instr->temp());
|
2011-01-04 13:02:51 +00:00
|
|
|
|
|
|
|
|
|
__ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
|
|
|
|
|
__ cmp(temp, Operand(instr->map()));
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, eq);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2012-09-17 10:54:26 +00:00
|
|
|
|
ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
|
|
|
|
|
ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
|
2010-12-23 16:33:30 +00:00
|
|
|
|
|
2010-12-21 10:52:50 +00:00
|
|
|
|
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2010-12-21 10:52:50 +00:00
|
|
|
|
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(r0, Operand::Zero());
|
2011-03-18 20:35:07 +00:00
|
|
|
|
__ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
|
|
|
|
|
__ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-01-05 12:01:53 +00:00
|
|
|
|
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
|
2011-01-19 14:53:38 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
|
|
|
|
|
LInstanceOfKnownGlobal* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
2011-11-16 08:44:30 +00:00
|
|
|
|
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
|
2011-01-19 14:53:38 +00:00
|
|
|
|
}
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2011-01-19 14:53:38 +00:00
|
|
|
|
Label* map_check() { return &map_check_; }
|
|
|
|
|
private:
|
|
|
|
|
LInstanceOfKnownGlobal* instr_;
|
|
|
|
|
Label map_check_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
DeferredInstanceOfKnownGlobal* deferred;
|
2012-06-11 12:42:31 +00:00
|
|
|
|
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
|
2011-01-19 14:53:38 +00:00
|
|
|
|
|
|
|
|
|
Label done, false_result;
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register object = ToRegister(instr->value());
|
|
|
|
|
Register temp = ToRegister(instr->temp());
|
2011-01-19 14:53:38 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
|
|
|
|
|
ASSERT(object.is(r0));
|
|
|
|
|
ASSERT(result.is(r0));
|
|
|
|
|
|
|
|
|
|
// A Smi is not instance of anything.
|
2011-01-26 07:44:45 +00:00
|
|
|
|
__ JumpIfSmi(object, &false_result);
|
2011-01-19 14:53:38 +00:00
|
|
|
|
|
|
|
|
|
// This is the inlined call site instanceof cache. The two occurences of the
|
|
|
|
|
// hole value will be patched to the last map/result pair generated by the
|
|
|
|
|
// instanceof stub.
|
|
|
|
|
Label cache_miss;
|
|
|
|
|
Register map = temp;
|
|
|
|
|
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
|
2012-06-12 17:26:28 +00:00
|
|
|
|
{
|
|
|
|
|
// Block constant pool emission to ensure the positions of instructions are
|
|
|
|
|
// as expected by the patcher. See InstanceofStub::Generate().
|
|
|
|
|
Assembler::BlockConstPoolScope block_const_pool(masm());
|
|
|
|
|
__ bind(deferred->map_check()); // Label for calculating code patching.
|
|
|
|
|
// We use Factory::the_hole_value() on purpose instead of loading from the
|
|
|
|
|
// root array to force relocation to be able to later patch with
|
|
|
|
|
// the cached map.
|
2012-11-22 14:59:52 +00:00
|
|
|
|
PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
|
2013-06-12 15:03:44 +00:00
|
|
|
|
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
|
2012-06-12 17:26:28 +00:00
|
|
|
|
__ mov(ip, Operand(Handle<Object>(cell)));
|
2013-06-14 16:06:12 +00:00
|
|
|
|
__ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
|
2012-06-12 17:26:28 +00:00
|
|
|
|
__ cmp(map, Operand(ip));
|
|
|
|
|
__ b(ne, &cache_miss);
|
|
|
|
|
// We use Factory::the_hole_value() on purpose instead of loading from the
|
|
|
|
|
// root array to force relocation to be able to later patch
|
|
|
|
|
// with true or false.
|
|
|
|
|
__ mov(result, Operand(factory()->the_hole_value()));
|
|
|
|
|
}
|
2011-01-19 14:53:38 +00:00
|
|
|
|
__ b(&done);
|
|
|
|
|
|
|
|
|
|
// The inlined call site cache did not match. Check null and string before
|
|
|
|
|
// calling the deferred code.
|
|
|
|
|
__ bind(&cache_miss);
|
|
|
|
|
// Null is not instance of anything.
|
|
|
|
|
__ LoadRoot(ip, Heap::kNullValueRootIndex);
|
|
|
|
|
__ cmp(object, Operand(ip));
|
|
|
|
|
__ b(eq, &false_result);
|
|
|
|
|
|
|
|
|
|
// String values is not instance of anything.
|
|
|
|
|
Condition is_string = masm_->IsObjectStringType(object, temp);
|
|
|
|
|
__ b(is_string, &false_result);
|
|
|
|
|
|
|
|
|
|
// Go to the deferred code.
|
|
|
|
|
__ b(deferred->entry());
|
|
|
|
|
|
|
|
|
|
__ bind(&false_result);
|
|
|
|
|
__ LoadRoot(result, Heap::kFalseValueRootIndex);
|
|
|
|
|
|
|
|
|
|
// Here result has either true or false. Deferred code also produces true or
|
|
|
|
|
// false object.
|
|
|
|
|
__ bind(deferred->exit());
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-11-16 08:44:30 +00:00
|
|
|
|
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
|
|
|
|
Label* map_check) {
|
2011-01-19 14:53:38 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
ASSERT(result.is(r0));
|
|
|
|
|
|
|
|
|
|
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
|
|
|
|
|
flags = static_cast<InstanceofStub::Flags>(
|
|
|
|
|
flags | InstanceofStub::kArgsInRegisters);
|
|
|
|
|
flags = static_cast<InstanceofStub::Flags>(
|
|
|
|
|
flags | InstanceofStub::kCallSiteInlineCheck);
|
|
|
|
|
flags = static_cast<InstanceofStub::Flags>(
|
|
|
|
|
flags | InstanceofStub::kReturnTrueFalseObject);
|
|
|
|
|
InstanceofStub stub(flags);
|
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
LoadContextFromDeferred(instr->context());
|
2011-01-19 14:53:38 +00:00
|
|
|
|
|
|
|
|
|
// Get the temp register reserved by the instruction. This needs to be r4 as
|
|
|
|
|
// its slot of the pushing of safepoint registers is used to communicate the
|
|
|
|
|
// offset to the location of the map check.
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register temp = ToRegister(instr->temp());
|
2011-01-19 14:53:38 +00:00
|
|
|
|
ASSERT(temp.is(r4));
|
2011-12-06 12:11:08 +00:00
|
|
|
|
__ LoadHeapObject(InstanceofStub::right(), instr->function());
|
2012-09-11 11:36:48 +00:00
|
|
|
|
static const int kAdditionalDelta = 5;
|
2012-10-18 12:21:42 +00:00
|
|
|
|
// Make sure that code size is predicable, since we use specific constants
|
|
|
|
|
// offsets in the code to find embedded values..
|
2012-11-22 14:59:52 +00:00
|
|
|
|
PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
|
2011-01-19 14:53:38 +00:00
|
|
|
|
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
|
|
|
|
|
Label before_push_delta;
|
|
|
|
|
__ bind(&before_push_delta);
|
|
|
|
|
__ BlockConstPoolFor(kAdditionalDelta);
|
|
|
|
|
__ mov(temp, Operand(delta * kPointerSize));
|
2012-09-11 11:36:48 +00:00
|
|
|
|
// The mov above can generate one or two instructions. The delta was computed
|
|
|
|
|
// for two instructions, so we need to pad here in case of one instruction.
|
|
|
|
|
if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
|
|
|
|
|
ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
|
|
|
|
|
__ nop();
|
|
|
|
|
}
|
2011-02-21 11:29:45 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(temp, temp);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCodeGeneric(stub.GetCode(isolate()),
|
2011-04-07 13:32:45 +00:00
|
|
|
|
RelocInfo::CODE_TARGET,
|
|
|
|
|
instr,
|
|
|
|
|
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
2012-04-26 12:43:00 +00:00
|
|
|
|
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
|
2011-11-16 08:44:30 +00:00
|
|
|
|
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
|
2011-01-19 14:53:38 +00:00
|
|
|
|
// Put the result value into the result register slot and
|
|
|
|
|
// restore all registers.
|
2011-02-21 11:29:45 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(result, result);
|
2011-01-05 12:01:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-09-23 18:57:32 +00:00
|
|
|
|
void LCodeGen::DoCmpT(LCmpT* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Token::Value op = instr->op();
|
|
|
|
|
|
2013-02-27 12:33:24 +00:00
|
|
|
|
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
2013-01-07 09:43:12 +00:00
|
|
|
|
// This instruction also signals no smi code inlined.
|
|
|
|
|
__ cmp(r0, Operand::Zero());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
Condition condition = ComputeCompareCondition(op);
|
2013-09-23 18:57:32 +00:00
|
|
|
|
__ LoadRoot(ToRegister(instr->result()),
|
|
|
|
|
Heap::kTrueValueRootIndex,
|
|
|
|
|
condition);
|
|
|
|
|
__ LoadRoot(ToRegister(instr->result()),
|
|
|
|
|
Heap::kFalseValueRootIndex,
|
|
|
|
|
NegateCondition(condition));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoReturn(LReturn* instr) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (FLAG_trace && info()->IsOptimizing()) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
// Push the return value on the stack as the parameter.
|
2013-09-27 13:59:28 +00:00
|
|
|
|
// Runtime::TraceExit returns its parameter in r0. We're leaving the code
|
|
|
|
|
// managed by the register allocator and tearing down the frame, it's
|
|
|
|
|
// safe to write to the context register.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ push(r0);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ CallRuntime(Runtime::kTraceExit, 1);
|
|
|
|
|
}
|
2013-04-07 04:34:20 +00:00
|
|
|
|
if (info()->saves_caller_doubles()) {
|
2013-02-04 12:01:59 +00:00
|
|
|
|
ASSERT(NeedsEagerFrame());
|
|
|
|
|
BitVector* doubles = chunk()->allocated_double_registers();
|
|
|
|
|
BitVector::Iterator save_iterator(doubles);
|
|
|
|
|
int count = 0;
|
|
|
|
|
while (!save_iterator.Done()) {
|
|
|
|
|
__ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
|
|
|
|
MemOperand(sp, count * kDoubleSize));
|
|
|
|
|
save_iterator.Advance();
|
|
|
|
|
count++;
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-05-14 22:51:33 +00:00
|
|
|
|
int no_frame_start = -1;
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (NeedsEagerFrame()) {
|
|
|
|
|
__ mov(sp, fp);
|
2013-05-14 22:51:33 +00:00
|
|
|
|
no_frame_start = masm_->pc_offset();
|
2013-05-29 12:09:03 +00:00
|
|
|
|
__ ldm(ia_w, sp, fp.bit() | lr.bit());
|
2013-04-25 16:00:32 +00:00
|
|
|
|
}
|
|
|
|
|
if (instr->has_constant_parameter_count()) {
|
|
|
|
|
int parameter_count = ToInteger32(instr->constant_parameter_count());
|
|
|
|
|
int32_t sp_delta = (parameter_count + 1) * kPointerSize;
|
|
|
|
|
if (sp_delta != 0) {
|
|
|
|
|
__ add(sp, sp, Operand(sp_delta));
|
2013-02-04 12:01:59 +00:00
|
|
|
|
}
|
2013-04-25 16:00:32 +00:00
|
|
|
|
} else {
|
|
|
|
|
Register reg = ToRegister(instr->parameter_count());
|
|
|
|
|
// The argument count parameter is a smi
|
|
|
|
|
__ SmiUntag(reg);
|
|
|
|
|
__ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
|
2012-12-18 16:25:45 +00:00
|
|
|
|
}
|
2013-04-25 16:00:32 +00:00
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ Jump(lr);
|
2013-05-14 22:51:33 +00:00
|
|
|
|
|
|
|
|
|
if (no_frame_start != -1) {
|
|
|
|
|
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-04-01 11:54:04 +00:00
|
|
|
|
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2013-09-20 12:32:31 +00:00
|
|
|
|
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
|
2013-06-12 15:03:44 +00:00
|
|
|
|
__ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
|
2011-09-27 13:03:19 +00:00
|
|
|
|
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
|
|
|
|
__ cmp(result, ip);
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-04-01 11:54:04 +00:00
|
|
|
|
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-04-01 11:54:04 +00:00
|
|
|
|
ASSERT(ToRegister(instr->global_object()).is(r0));
|
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
|
|
|
|
__ mov(r2, Operand(instr->name()));
|
|
|
|
|
RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
|
|
|
|
|
: RelocInfo::CODE_TARGET_CONTEXT;
|
|
|
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
|
|
|
|
CallCode(ic, mode, instr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-04-04 15:03:34 +00:00
|
|
|
|
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
|
2011-12-20 10:57:12 +00:00
|
|
|
|
Register value = ToRegister(instr->value());
|
|
|
|
|
Register cell = scratch0();
|
2011-01-27 08:35:39 +00:00
|
|
|
|
|
|
|
|
|
// Load the cell.
|
2013-09-20 12:32:31 +00:00
|
|
|
|
__ mov(cell, Operand(instr->hydrogen()->cell().handle()));
|
2011-01-27 08:35:39 +00:00
|
|
|
|
|
|
|
|
|
// If the cell we are storing to contains the hole it could have
|
|
|
|
|
// been deleted from the property dictionary. In that case, we need
|
|
|
|
|
// to update the property details in the property dictionary to mark
|
|
|
|
|
// it as no longer deleted.
|
2011-09-27 13:03:19 +00:00
|
|
|
|
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2011-12-20 10:57:12 +00:00
|
|
|
|
// We use a temp to check the payload (CompareRoot might clobber ip).
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register payload = ToRegister(instr->temp());
|
2013-06-12 15:03:44 +00:00
|
|
|
|
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
|
2011-12-20 10:57:12 +00:00
|
|
|
|
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
|
2011-01-27 08:35:39 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Store the value.
|
2013-06-12 15:03:44 +00:00
|
|
|
|
__ str(value, FieldMemOperand(cell, Cell::kValueOffset));
|
2011-12-07 08:49:06 +00:00
|
|
|
|
// Cells are always rescanned, so no write barrier here.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-04-04 15:03:34 +00:00
|
|
|
|
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-04-04 15:03:34 +00:00
|
|
|
|
ASSERT(ToRegister(instr->global_object()).is(r1));
|
|
|
|
|
ASSERT(ToRegister(instr->value()).is(r0));
|
|
|
|
|
|
|
|
|
|
__ mov(r2, Operand(instr->name()));
|
2011-11-24 15:17:04 +00:00
|
|
|
|
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
|
2011-04-08 14:30:10 +00:00
|
|
|
|
? isolate()->builtins()->StoreIC_Initialize_Strict()
|
|
|
|
|
: isolate()->builtins()->StoreIC_Initialize();
|
2011-04-04 15:03:34 +00:00
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-01-17 08:11:03 +00:00
|
|
|
|
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
|
2011-02-03 13:10:28 +00:00
|
|
|
|
Register context = ToRegister(instr->context());
|
2011-01-17 08:11:03 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2011-02-24 07:17:43 +00:00
|
|
|
|
__ ldr(result, ContextOperand(context, instr->slot_index()));
|
2011-12-09 09:50:30 +00:00
|
|
|
|
if (instr->hydrogen()->RequiresHoleCheck()) {
|
|
|
|
|
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
|
|
|
|
__ cmp(result, ip);
|
2011-12-13 17:10:34 +00:00
|
|
|
|
if (instr->hydrogen()->DeoptimizesOnHole()) {
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
} else {
|
|
|
|
|
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
|
|
|
|
|
}
|
2011-12-09 09:50:30 +00:00
|
|
|
|
}
|
2011-01-17 08:11:03 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-02-03 13:10:28 +00:00
|
|
|
|
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
|
|
|
|
Register context = ToRegister(instr->context());
|
|
|
|
|
Register value = ToRegister(instr->value());
|
2011-12-13 17:10:34 +00:00
|
|
|
|
Register scratch = scratch0();
|
2011-09-19 18:36:47 +00:00
|
|
|
|
MemOperand target = ContextOperand(context, instr->slot_index());
|
2011-12-13 17:10:34 +00:00
|
|
|
|
|
|
|
|
|
Label skip_assignment;
|
|
|
|
|
|
2011-12-09 09:50:30 +00:00
|
|
|
|
if (instr->hydrogen()->RequiresHoleCheck()) {
|
|
|
|
|
__ ldr(scratch, target);
|
|
|
|
|
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
|
|
|
|
__ cmp(scratch, ip);
|
2011-12-13 17:10:34 +00:00
|
|
|
|
if (instr->hydrogen()->DeoptimizesOnHole()) {
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
} else {
|
|
|
|
|
__ b(ne, &skip_assignment);
|
|
|
|
|
}
|
2011-12-09 09:50:30 +00:00
|
|
|
|
}
|
2011-12-13 17:10:34 +00:00
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
|
__ str(value, target);
|
2011-10-14 07:45:18 +00:00
|
|
|
|
if (instr->hydrogen()->NeedsWriteBarrier()) {
|
|
|
|
|
SmiCheck check_needed =
|
2013-06-26 17:37:55 +00:00
|
|
|
|
instr->hydrogen()->value()->IsHeapObject()
|
|
|
|
|
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
|
2011-09-19 18:36:47 +00:00
|
|
|
|
__ RecordWriteContextSlot(context,
|
|
|
|
|
target.offset(),
|
|
|
|
|
value,
|
2011-12-13 17:10:34 +00:00
|
|
|
|
scratch,
|
2013-03-20 10:37:13 +00:00
|
|
|
|
GetLinkRegisterState(),
|
2013-04-19 16:01:57 +00:00
|
|
|
|
kSaveFPRegs,
|
2011-10-14 07:45:18 +00:00
|
|
|
|
EMIT_REMEMBERED_SET,
|
|
|
|
|
check_needed);
|
2011-02-03 13:10:28 +00:00
|
|
|
|
}
|
2011-12-13 17:10:34 +00:00
|
|
|
|
|
|
|
|
|
__ bind(&skip_assignment);
|
2011-02-03 13:10:28 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
2013-05-24 12:40:08 +00:00
|
|
|
|
HObjectAccess access = instr->hydrogen()->access();
|
|
|
|
|
int offset = access.offset();
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register object = ToRegister(instr->object());
|
2013-07-29 13:56:51 +00:00
|
|
|
|
|
|
|
|
|
if (access.IsExternalMemory()) {
|
|
|
|
|
Register result = ToRegister(instr->result());
|
2013-10-04 07:13:43 +00:00
|
|
|
|
MemOperand operand = MemOperand(object, offset);
|
|
|
|
|
if (access.representation().IsByte()) {
|
|
|
|
|
__ ldrb(result, operand);
|
|
|
|
|
} else {
|
|
|
|
|
__ ldr(result, operand);
|
|
|
|
|
}
|
2013-07-29 13:56:51 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-05-08 15:02:08 +00:00
|
|
|
|
if (instr->hydrogen()->representation().IsDouble()) {
|
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
|
|
|
|
__ vldr(result, FieldMemOperand(object, offset));
|
|
|
|
|
return;
|
2013-04-26 15:30:41 +00:00
|
|
|
|
}
|
2013-05-08 15:02:08 +00:00
|
|
|
|
|
|
|
|
|
Register result = ToRegister(instr->result());
|
2013-10-04 07:13:43 +00:00
|
|
|
|
if (!access.IsInobject()) {
|
2013-10-04 05:47:35 +00:00
|
|
|
|
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
|
2013-10-04 07:13:43 +00:00
|
|
|
|
object = result;
|
|
|
|
|
}
|
|
|
|
|
MemOperand operand = FieldMemOperand(object, offset);
|
|
|
|
|
if (access.representation().IsByte()) {
|
|
|
|
|
__ ldrb(result, operand);
|
|
|
|
|
} else {
|
|
|
|
|
__ ldr(result, operand);
|
2011-01-04 13:02:51 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(ToRegister(instr->object()).is(r0));
|
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
|
|
|
|
// Name is always in r2.
|
|
|
|
|
__ mov(r2, Operand(instr->name()));
|
2011-03-23 13:40:07 +00:00
|
|
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
2012-10-18 12:21:42 +00:00
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-22 15:43:32 +00:00
|
|
|
|
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
|
2011-01-04 14:32:54 +00:00
|
|
|
|
Register scratch = scratch0();
|
2010-12-22 15:43:32 +00:00
|
|
|
|
Register function = ToRegister(instr->function());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
|
|
|
|
|
// Check that the function really is a function. Load map into the
|
|
|
|
|
// result register.
|
2011-01-04 14:32:54 +00:00
|
|
|
|
__ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
|
2010-12-22 15:43:32 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
|
|
|
|
|
// Make sure that the function has an instance prototype.
|
|
|
|
|
Label non_instance;
|
2011-01-04 14:32:54 +00:00
|
|
|
|
__ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
|
|
|
|
|
__ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
|
2010-12-22 15:43:32 +00:00
|
|
|
|
__ b(ne, &non_instance);
|
|
|
|
|
|
|
|
|
|
// Get the prototype or initial map from the function.
|
|
|
|
|
__ ldr(result,
|
|
|
|
|
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
|
|
|
|
|
|
|
// Check that the function has a prototype or an initial map.
|
|
|
|
|
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
|
|
|
|
__ cmp(result, ip);
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
|
|
|
|
|
// If the function does not have an initial map, we're done.
|
|
|
|
|
Label done;
|
2011-01-04 14:32:54 +00:00
|
|
|
|
__ CompareObjectType(result, scratch, scratch, MAP_TYPE);
|
2010-12-22 15:43:32 +00:00
|
|
|
|
__ b(ne, &done);
|
|
|
|
|
|
|
|
|
|
// Get the prototype from the initial map.
|
|
|
|
|
__ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
|
|
|
|
|
__ jmp(&done);
|
|
|
|
|
|
|
|
|
|
// Non-instance prototype: Fetch prototype from constructor field
|
|
|
|
|
// in initial map.
|
|
|
|
|
__ bind(&non_instance);
|
|
|
|
|
__ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
|
|
|
|
|
|
|
|
|
|
// All done.
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-09-19 06:08:13 +00:00
|
|
|
|
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
__ LoadRoot(result, instr->index());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-03-09 15:01:16 +00:00
|
|
|
|
void LCodeGen::DoLoadExternalArrayPointer(
|
|
|
|
|
LLoadExternalArrayPointer* instr) {
|
2011-02-10 12:02:36 +00:00
|
|
|
|
Register to_reg = ToRegister(instr->result());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register from_reg = ToRegister(instr->object());
|
2011-03-09 15:01:16 +00:00
|
|
|
|
__ ldr(to_reg, FieldMemOperand(from_reg,
|
|
|
|
|
ExternalArray::kExternalPointerOffset));
|
2011-02-10 12:02:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
|
2011-01-06 07:28:51 +00:00
|
|
|
|
Register arguments = ToRegister(instr->arguments());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
2013-04-22 07:47:25 +00:00
|
|
|
|
if (instr->length()->IsConstantOperand() &&
|
|
|
|
|
instr->index()->IsConstantOperand()) {
|
|
|
|
|
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
|
|
|
|
int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
|
|
|
|
|
int index = (const_length - const_index) + 1;
|
|
|
|
|
__ ldr(result, MemOperand(arguments, index * kPointerSize));
|
|
|
|
|
} else {
|
|
|
|
|
Register length = ToRegister(instr->length());
|
|
|
|
|
Register index = ToRegister(instr->index());
|
|
|
|
|
// There are two words between the frame pointer and the last argument.
|
|
|
|
|
// Subtracting from length accounts for one of them add one more.
|
|
|
|
|
__ sub(length, length, index);
|
|
|
|
|
__ add(length, length, Operand(1));
|
|
|
|
|
__ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-02 09:18:53 +00:00
|
|
|
|
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
|
|
|
|
Register external_pointer = ToRegister(instr->elements());
|
|
|
|
|
Register key = no_reg;
|
|
|
|
|
ElementsKind elements_kind = instr->elements_kind();
|
|
|
|
|
bool key_is_constant = instr->key()->IsConstantOperand();
|
|
|
|
|
int constant_key = 0;
|
|
|
|
|
if (key_is_constant) {
|
|
|
|
|
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
|
|
|
|
|
if (constant_key & 0xF0000000) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kArrayIndexConstantValueTooBig);
|
2012-07-31 08:44:51 +00:00
|
|
|
|
}
|
2012-11-02 09:18:53 +00:00
|
|
|
|
} else {
|
|
|
|
|
key = ToRegister(instr->key());
|
2012-07-20 11:00:33 +00:00
|
|
|
|
}
|
2012-11-02 09:18:53 +00:00
|
|
|
|
int element_size_shift = ElementsKindToShiftSize(elements_kind);
|
2013-05-24 08:52:35 +00:00
|
|
|
|
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
|
2012-11-02 09:18:53 +00:00
|
|
|
|
? (element_size_shift - kSmiTagSize) : element_size_shift;
|
|
|
|
|
int additional_offset = instr->additional_index() << element_size_shift;
|
2011-01-07 07:33:46 +00:00
|
|
|
|
|
2012-11-02 09:18:53 +00:00
|
|
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
|
|
|
|
|
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
|
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
|
|
|
|
Operand operand = key_is_constant
|
|
|
|
|
? Operand(constant_key << element_size_shift)
|
|
|
|
|
: Operand(key, LSL, shift_size);
|
|
|
|
|
__ add(scratch0(), external_pointer, operand);
|
2013-04-07 04:34:20 +00:00
|
|
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vldr(double_scratch0().low(), scratch0(), additional_offset);
|
|
|
|
|
__ vcvt_f64_f32(result, double_scratch0().low());
|
2013-04-07 04:34:20 +00:00
|
|
|
|
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
|
|
|
|
|
__ vldr(result, scratch0(), additional_offset);
|
2012-11-02 09:18:53 +00:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
MemOperand mem_operand = PrepareKeyedOperand(
|
|
|
|
|
key, external_pointer, key_is_constant, constant_key,
|
|
|
|
|
element_size_shift, shift_size,
|
|
|
|
|
instr->additional_index(), additional_offset);
|
|
|
|
|
switch (elements_kind) {
|
|
|
|
|
case EXTERNAL_BYTE_ELEMENTS:
|
|
|
|
|
__ ldrsb(result, mem_operand);
|
|
|
|
|
break;
|
|
|
|
|
case EXTERNAL_PIXEL_ELEMENTS:
|
|
|
|
|
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
|
|
|
|
__ ldrb(result, mem_operand);
|
|
|
|
|
break;
|
|
|
|
|
case EXTERNAL_SHORT_ELEMENTS:
|
|
|
|
|
__ ldrsh(result, mem_operand);
|
|
|
|
|
break;
|
|
|
|
|
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
|
|
|
|
__ ldrh(result, mem_operand);
|
|
|
|
|
break;
|
|
|
|
|
case EXTERNAL_INT_ELEMENTS:
|
|
|
|
|
__ ldr(result, mem_operand);
|
|
|
|
|
break;
|
|
|
|
|
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
|
|
|
|
__ ldr(result, mem_operand);
|
|
|
|
|
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
|
|
|
|
|
__ cmp(result, Operand(0x80000000));
|
|
|
|
|
DeoptimizeIf(cs, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case EXTERNAL_FLOAT_ELEMENTS:
|
|
|
|
|
case EXTERNAL_DOUBLE_ELEMENTS:
|
|
|
|
|
case FAST_HOLEY_DOUBLE_ELEMENTS:
|
|
|
|
|
case FAST_HOLEY_ELEMENTS:
|
|
|
|
|
case FAST_HOLEY_SMI_ELEMENTS:
|
|
|
|
|
case FAST_DOUBLE_ELEMENTS:
|
|
|
|
|
case FAST_ELEMENTS:
|
|
|
|
|
case FAST_SMI_ELEMENTS:
|
|
|
|
|
case DICTIONARY_ELEMENTS:
|
|
|
|
|
case NON_STRICT_ARGUMENTS_ELEMENTS:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
break;
|
2012-06-12 12:16:19 +00:00
|
|
|
|
}
|
2011-05-09 15:21:40 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-02 09:18:53 +00:00
|
|
|
|
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
|
2011-07-19 13:04:00 +00:00
|
|
|
|
Register elements = ToRegister(instr->elements());
|
|
|
|
|
bool key_is_constant = instr->key()->IsConstantOperand();
|
|
|
|
|
Register key = no_reg;
|
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
|
2012-07-20 11:00:33 +00:00
|
|
|
|
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
|
2013-10-01 10:44:00 +00:00
|
|
|
|
|
|
|
|
|
int base_offset =
|
|
|
|
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag +
|
|
|
|
|
(instr->additional_index() << element_size_shift);
|
2011-07-19 13:04:00 +00:00
|
|
|
|
if (key_is_constant) {
|
2013-10-01 10:44:00 +00:00
|
|
|
|
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
|
2011-07-19 13:04:00 +00:00
|
|
|
|
if (constant_key & 0xF0000000) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kArrayIndexConstantValueTooBig);
|
2011-07-19 13:04:00 +00:00
|
|
|
|
}
|
2013-10-01 10:44:00 +00:00
|
|
|
|
base_offset += constant_key << element_size_shift;
|
2011-07-19 13:04:00 +00:00
|
|
|
|
}
|
2013-10-01 10:44:00 +00:00
|
|
|
|
__ add(scratch, elements, Operand(base_offset));
|
2011-07-19 13:04:00 +00:00
|
|
|
|
|
|
|
|
|
if (!key_is_constant) {
|
2013-10-01 10:44:00 +00:00
|
|
|
|
key = ToRegister(instr->key());
|
|
|
|
|
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
|
|
|
|
|
? (element_size_shift - kSmiTagSize) : element_size_shift;
|
|
|
|
|
__ add(scratch, scratch, Operand(key, LSL, shift_size));
|
2012-12-18 16:25:45 +00:00
|
|
|
|
}
|
2013-10-01 10:44:00 +00:00
|
|
|
|
|
|
|
|
|
__ vldr(result, scratch, 0);
|
|
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
|
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2013-10-01 10:44:00 +00:00
|
|
|
|
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
|
2013-04-07 04:34:20 +00:00
|
|
|
|
__ cmp(scratch, Operand(kHoleNanUpper32));
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
2012-05-23 14:24:29 +00:00
|
|
|
|
}
|
2011-07-19 13:04:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-02 09:18:53 +00:00
|
|
|
|
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
|
|
|
|
Register elements = ToRegister(instr->elements());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
Register store_base = scratch;
|
|
|
|
|
int offset = 0;
|
|
|
|
|
|
|
|
|
|
if (instr->key()->IsConstantOperand()) {
|
|
|
|
|
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
|
|
|
|
|
offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
|
|
|
|
|
instr->additional_index());
|
|
|
|
|
store_base = elements;
|
|
|
|
|
} else {
|
2013-10-01 10:44:00 +00:00
|
|
|
|
Register key = ToRegister(instr->key());
|
2012-11-02 09:18:53 +00:00
|
|
|
|
// Even though the HLoadKeyed instruction forces the input
|
|
|
|
|
// representation for the key to be an integer, the input gets replaced
|
|
|
|
|
// during bound check elimination with the index argument to the bounds
|
|
|
|
|
// check, which can be tagged, so that case must be handled here, too.
|
2013-05-24 08:52:35 +00:00
|
|
|
|
if (instr->hydrogen()->key()->representation().IsSmi()) {
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
|
2012-11-02 09:18:53 +00:00
|
|
|
|
} else {
|
|
|
|
|
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
|
|
|
|
|
}
|
|
|
|
|
offset = FixedArray::OffsetOfElementAt(instr->additional_index());
|
|
|
|
|
}
|
|
|
|
|
__ ldr(result, FieldMemOperand(store_base, offset));
|
|
|
|
|
|
|
|
|
|
// Check for the hole value.
|
|
|
|
|
if (instr->hydrogen()->RequiresHoleCheck()) {
|
|
|
|
|
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ SmiTst(result);
|
2012-11-02 09:18:53 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
} else {
|
|
|
|
|
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
|
|
|
|
__ cmp(result, scratch);
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
|
|
|
|
|
if (instr->is_external()) {
|
|
|
|
|
DoLoadKeyedExternalArray(instr);
|
|
|
|
|
} else if (instr->hydrogen()->representation().IsDouble()) {
|
|
|
|
|
DoLoadKeyedFixedDoubleArray(instr);
|
|
|
|
|
} else {
|
|
|
|
|
DoLoadKeyedFixedArray(instr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-07-20 11:00:33 +00:00
|
|
|
|
MemOperand LCodeGen::PrepareKeyedOperand(Register key,
|
|
|
|
|
Register base,
|
|
|
|
|
bool key_is_constant,
|
|
|
|
|
int constant_key,
|
|
|
|
|
int element_size,
|
|
|
|
|
int shift_size,
|
|
|
|
|
int additional_index,
|
|
|
|
|
int additional_offset) {
|
|
|
|
|
if (additional_index != 0 && !key_is_constant) {
|
|
|
|
|
additional_index *= 1 << (element_size - shift_size);
|
|
|
|
|
__ add(scratch0(), key, Operand(additional_index));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (key_is_constant) {
|
|
|
|
|
return MemOperand(base,
|
|
|
|
|
(constant_key << element_size) + additional_offset);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (additional_index == 0) {
|
|
|
|
|
if (shift_size >= 0) {
|
|
|
|
|
return MemOperand(base, key, LSL, shift_size);
|
|
|
|
|
} else {
|
|
|
|
|
ASSERT_EQ(-1, shift_size);
|
|
|
|
|
return MemOperand(base, key, LSR, 1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (shift_size >= 0) {
|
|
|
|
|
return MemOperand(base, scratch0(), LSL, shift_size);
|
|
|
|
|
} else {
|
|
|
|
|
ASSERT_EQ(-1, shift_size);
|
|
|
|
|
return MemOperand(base, scratch0(), LSR, 1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(ToRegister(instr->object()).is(r1));
|
|
|
|
|
ASSERT(ToRegister(instr->key()).is(r0));
|
|
|
|
|
|
2011-03-23 13:40:07 +00:00
|
|
|
|
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
|
2012-10-18 12:21:42 +00:00
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
|
2011-01-06 07:28:51 +00:00
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
|
2012-04-11 13:40:55 +00:00
|
|
|
|
if (instr->hydrogen()->from_inlined()) {
|
|
|
|
|
__ sub(result, sp, Operand(2 * kPointerSize));
|
|
|
|
|
} else {
|
|
|
|
|
// Check if the calling frame is an arguments adaptor frame.
|
|
|
|
|
Label done, adapted;
|
|
|
|
|
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
|
__ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
|
|
|
|
|
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
2011-01-06 07:28:51 +00:00
|
|
|
|
|
2012-04-11 13:40:55 +00:00
|
|
|
|
// Result is the frame pointer for the frame if not adapted and for the real
|
|
|
|
|
// frame below the adaptor frame if adapted.
|
|
|
|
|
__ mov(result, fp, LeaveCC, ne);
|
|
|
|
|
__ mov(result, scratch, LeaveCC, eq);
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register elem = ToRegister(instr->elements());
|
2011-01-06 07:28:51 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
|
|
|
|
|
Label done;
|
|
|
|
|
|
|
|
|
|
// If no arguments adaptor frame the number of arguments is fixed.
|
|
|
|
|
__ cmp(fp, elem);
|
|
|
|
|
__ mov(result, Operand(scope()->num_parameters()));
|
|
|
|
|
__ b(eq, &done);
|
|
|
|
|
|
|
|
|
|
// Arguments adaptor frame present. Get argument length from there.
|
|
|
|
|
__ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
|
__ ldr(result,
|
|
|
|
|
MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
|
|
|
|
__ SmiUntag(result);
|
|
|
|
|
|
|
|
|
|
// Argument length is in result register.
|
|
|
|
|
__ bind(&done);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-03-12 12:49:41 +00:00
|
|
|
|
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
2011-01-11 14:11:03 +00:00
|
|
|
|
Register receiver = ToRegister(instr->receiver());
|
|
|
|
|
Register function = ToRegister(instr->function());
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
|
2011-05-31 10:38:41 +00:00
|
|
|
|
// If the receiver is null or undefined, we have to pass the global
|
|
|
|
|
// object as a receiver to normal functions. Values have to be
|
|
|
|
|
// passed unchanged to builtins and strict-mode functions.
|
2011-02-11 11:24:38 +00:00
|
|
|
|
Label global_object, receiver_ok;
|
2011-05-31 10:38:41 +00:00
|
|
|
|
|
|
|
|
|
// Do not transform the receiver to object for strict mode
|
|
|
|
|
// functions.
|
|
|
|
|
__ ldr(scratch,
|
|
|
|
|
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
|
|
|
|
__ ldr(scratch,
|
|
|
|
|
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
|
|
|
|
|
__ tst(scratch,
|
|
|
|
|
Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
|
|
|
|
|
__ b(ne, &receiver_ok);
|
|
|
|
|
|
|
|
|
|
// Do not transform the receiver to object for builtins.
|
|
|
|
|
__ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
|
|
|
|
|
__ b(ne, &receiver_ok);
|
|
|
|
|
|
|
|
|
|
// Normal function. Replace undefined or null with global receiver.
|
2011-01-11 14:11:03 +00:00
|
|
|
|
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
|
|
|
|
|
__ cmp(receiver, scratch);
|
2011-02-11 11:24:38 +00:00
|
|
|
|
__ b(eq, &global_object);
|
2011-01-11 14:11:03 +00:00
|
|
|
|
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
|
|
|
|
|
__ cmp(receiver, scratch);
|
2011-02-11 11:24:38 +00:00
|
|
|
|
__ b(eq, &global_object);
|
2011-01-11 14:11:03 +00:00
|
|
|
|
|
2011-02-11 11:24:38 +00:00
|
|
|
|
// Deoptimize if the receiver is not a JS object.
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ SmiTst(receiver);
|
2011-02-11 11:24:38 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
Implement set trap for proxies, and revamp class hierarchy in preparation:
- Introduce a class JSReceiver, that is a common superclass of JSObject and
JSProxy. Use JSReceiver where appropriate (probably lots of places that we
still have to migrate, but we will find those later with proxy test suite).
- Move appropriate methods to JSReceiver class (SetProperty,
GetPropertyAttribute, Get/SetPrototype, Lookup, and so on).
- Introduce new JSFunctionProxy subclass of JSProxy. Currently only a stub.
- Overhaul enum InstanceType:
* Introduce FIRST/LAST_SPEC_OBJECT_TYPE that ranges over all types that
represent JS objects, and use that consistently to check language types.
* Rename FIRST/LAST_JS_OBJECT_TYPE and FIRST/LAST_FUNCTION_CLASS_TYPE
to FIRST/LAST_[NON]CALLABLE_SPEC_OBJECT_TYPE for clarity.
* Eliminate the overlap over JS_REGEXP_TYPE.
* Also replace FIRST_JS_OBJECT with FIRST_JS_RECEIVER, but only use it where
we exclusively talk about the internal representation type.
* Insert JS_PROXY and JS_FUNCTION_PROXY in the appropriate places.
- Fix all checks concerning classification, especially for functions, to
use the CALLABLE_SPEC_OBJECT range (that includes funciton proxies).
- Handle proxies in SetProperty (that was the easiest part :) ).
- A few simple test cases.
R=kmillikin@chromium.org
Review URL: http://codereview.chromium.org/6992072
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8126 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-05-31 16:38:40 +00:00
|
|
|
|
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
|
|
|
|
|
DeoptimizeIf(lt, instr->environment());
|
2011-02-11 11:24:38 +00:00
|
|
|
|
__ jmp(&receiver_ok);
|
2011-01-11 14:11:03 +00:00
|
|
|
|
|
2011-02-11 11:24:38 +00:00
|
|
|
|
__ bind(&global_object);
|
|
|
|
|
__ ldr(receiver, GlobalObjectOperand());
|
2011-05-30 13:23:17 +00:00
|
|
|
|
__ ldr(receiver,
|
|
|
|
|
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
|
2011-02-11 11:24:38 +00:00
|
|
|
|
__ bind(&receiver_ok);
|
2012-03-12 12:49:41 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
|
|
|
|
Register receiver = ToRegister(instr->receiver());
|
|
|
|
|
Register function = ToRegister(instr->function());
|
|
|
|
|
Register length = ToRegister(instr->length());
|
|
|
|
|
Register elements = ToRegister(instr->elements());
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
ASSERT(receiver.is(r0)); // Used for parameter count.
|
|
|
|
|
ASSERT(function.is(r1)); // Required by InvokeFunction.
|
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
2011-01-11 14:11:03 +00:00
|
|
|
|
|
|
|
|
|
// Copy the arguments to this function possibly from the
|
|
|
|
|
// adaptor frame below it.
|
|
|
|
|
const uint32_t kArgumentsLimit = 1 * KB;
|
|
|
|
|
__ cmp(length, Operand(kArgumentsLimit));
|
|
|
|
|
DeoptimizeIf(hi, instr->environment());
|
|
|
|
|
|
|
|
|
|
// Push the receiver and use the register to keep the original
|
|
|
|
|
// number of arguments.
|
|
|
|
|
__ push(receiver);
|
|
|
|
|
__ mov(receiver, length);
|
|
|
|
|
// The arguments are at a one pointer size offset from elements.
|
|
|
|
|
__ add(elements, elements, Operand(1 * kPointerSize));
|
|
|
|
|
|
|
|
|
|
// Loop through the arguments pushing them onto the execution
|
|
|
|
|
// stack.
|
2011-02-11 11:24:38 +00:00
|
|
|
|
Label invoke, loop;
|
2011-01-11 14:11:03 +00:00
|
|
|
|
// length is a small non-negative integer, due to the test above.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(length, Operand::Zero());
|
2011-01-11 14:11:03 +00:00
|
|
|
|
__ b(eq, &invoke);
|
|
|
|
|
__ bind(&loop);
|
|
|
|
|
__ ldr(scratch, MemOperand(elements, length, LSL, 2));
|
|
|
|
|
__ push(scratch);
|
|
|
|
|
__ sub(length, length, Operand(1), SetCC);
|
|
|
|
|
__ b(ne, &loop);
|
|
|
|
|
|
|
|
|
|
__ bind(&invoke);
|
2012-04-26 12:43:00 +00:00
|
|
|
|
ASSERT(instr->HasPointerMap());
|
2011-02-02 13:55:29 +00:00
|
|
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
|
|
|
RecordPosition(pointers->position());
|
2011-11-16 08:44:30 +00:00
|
|
|
|
SafepointGenerator safepoint_generator(
|
|
|
|
|
this, pointers, Safepoint::kLazyDeopt);
|
2011-02-02 13:55:29 +00:00
|
|
|
|
// The number of arguments is stored in receiver which is r0, as expected
|
|
|
|
|
// by InvokeFunction.
|
2012-01-17 15:53:58 +00:00
|
|
|
|
ParameterCount actual(receiver);
|
2011-05-30 13:23:17 +00:00
|
|
|
|
__ InvokeFunction(function, actual, CALL_FUNCTION,
|
|
|
|
|
safepoint_generator, CALL_AS_METHOD);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* argument = instr->value();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kDoPushArgumentNotImplementedForDoubleType);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
|
|
|
|
Register argument_reg = EmitLoadRegister(argument, ip);
|
|
|
|
|
__ push(argument_reg);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-04-11 13:40:55 +00:00
|
|
|
|
void LCodeGen::DoDrop(LDrop* instr) {
|
|
|
|
|
__ Drop(instr->count());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-05-31 11:54:46 +00:00
|
|
|
|
void LCodeGen::DoThisFunction(LThisFunction* instr) {
|
|
|
|
|
Register result = ToRegister(instr->result());
|
2012-06-14 14:06:22 +00:00
|
|
|
|
__ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
2011-05-31 11:54:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-02-03 13:10:28 +00:00
|
|
|
|
void LCodeGen::DoContext(LContext* instr) {
|
2013-02-04 12:01:59 +00:00
|
|
|
|
// If there is a non-return use, the context must be moved to a register.
|
2011-02-03 13:10:28 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2013-09-27 13:59:28 +00:00
|
|
|
|
if (info()->IsOptimizing()) {
|
|
|
|
|
__ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
|
|
|
|
} else {
|
|
|
|
|
// If there is no frame, the context must be in cp.
|
|
|
|
|
ASSERT(result.is(cp));
|
2013-02-04 12:01:59 +00:00
|
|
|
|
}
|
2011-02-03 13:10:28 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoOuterContext(LOuterContext* instr) {
|
|
|
|
|
Register context = ToRegister(instr->context());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
__ ldr(result,
|
2011-06-09 17:44:50 +00:00
|
|
|
|
MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
2011-02-03 13:10:28 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-02-14 14:14:51 +00:00
|
|
|
|
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2012-02-14 14:14:51 +00:00
|
|
|
|
__ push(cp); // The context is the first argument.
|
|
|
|
|
__ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
|
|
|
|
|
__ push(scratch0());
|
|
|
|
|
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
|
|
|
|
|
__ push(scratch0());
|
|
|
|
|
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
Register context = ToRegister(instr->context());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2013-09-27 13:59:28 +00:00
|
|
|
|
__ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register global = ToRegister(instr->global_object());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2011-02-03 13:10:28 +00:00
|
|
|
|
__ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
2013-04-23 09:23:07 +00:00
|
|
|
|
int formal_parameter_count,
|
2010-12-07 11:31:57 +00:00
|
|
|
|
int arity,
|
2011-05-24 14:01:36 +00:00
|
|
|
|
LInstruction* instr,
|
2012-04-18 09:38:45 +00:00
|
|
|
|
CallKind call_kind,
|
|
|
|
|
R1State r1_state) {
|
2013-04-23 09:23:07 +00:00
|
|
|
|
bool dont_adapt_arguments =
|
|
|
|
|
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
|
|
|
|
|
bool can_invoke_directly =
|
|
|
|
|
dont_adapt_arguments || formal_parameter_count == arity;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
|
|
|
RecordPosition(pointers->position());
|
|
|
|
|
|
2012-01-17 15:53:58 +00:00
|
|
|
|
if (can_invoke_directly) {
|
2012-04-18 09:38:45 +00:00
|
|
|
|
if (r1_state == R1_UNINITIALIZED) {
|
|
|
|
|
__ LoadHeapObject(r1, function);
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-26 12:49:08 +00:00
|
|
|
|
// Change context.
|
|
|
|
|
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2012-01-17 15:53:58 +00:00
|
|
|
|
// Set r0 to arguments count if adaption is not needed. Assumes that r0
|
|
|
|
|
// is available to write to at this point.
|
2013-04-23 09:23:07 +00:00
|
|
|
|
if (dont_adapt_arguments) {
|
2012-01-17 15:53:58 +00:00
|
|
|
|
__ mov(r0, Operand(arity));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Invoke function.
|
|
|
|
|
__ SetCallKind(r5, call_kind);
|
|
|
|
|
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
|
|
|
|
|
__ Call(ip);
|
|
|
|
|
|
|
|
|
|
// Set up deoptimization.
|
|
|
|
|
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
|
|
|
|
|
} else {
|
|
|
|
|
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
|
|
|
|
ParameterCount count(arity);
|
2013-04-23 09:23:07 +00:00
|
|
|
|
ParameterCount expected(formal_parameter_count);
|
|
|
|
|
__ InvokeFunction(
|
|
|
|
|
function, expected, count, CALL_FUNCTION, generator, call_kind);
|
2012-01-17 15:53:58 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
|
2011-01-04 13:02:51 +00:00
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
2013-04-23 09:23:07 +00:00
|
|
|
|
CallKnownFunction(instr->hydrogen()->function(),
|
|
|
|
|
instr->hydrogen()->formal_parameter_count(),
|
2011-05-24 14:01:36 +00:00
|
|
|
|
instr->arity(),
|
|
|
|
|
instr,
|
2012-04-18 09:38:45 +00:00
|
|
|
|
CALL_AS_METHOD,
|
|
|
|
|
R1_UNINITIALIZED);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(instr->context() != NULL);
|
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2011-01-20 08:08:36 +00:00
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
|
|
|
|
|
// Deoptimize if not a heap number.
|
|
|
|
|
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
|
|
|
|
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
|
|
|
|
__ cmp(scratch, Operand(ip));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
|
|
|
|
|
Label done;
|
2011-01-31 10:16:28 +00:00
|
|
|
|
Register exponent = scratch0();
|
|
|
|
|
scratch = no_reg;
|
|
|
|
|
__ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
|
2011-01-20 08:08:36 +00:00
|
|
|
|
// Check the sign of the argument. If the argument is positive, just
|
2011-06-29 10:51:06 +00:00
|
|
|
|
// return it.
|
2011-01-31 10:16:28 +00:00
|
|
|
|
__ tst(exponent, Operand(HeapNumber::kSignMask));
|
2011-06-29 10:51:06 +00:00
|
|
|
|
// Move the input to the result if necessary.
|
|
|
|
|
__ Move(result, input);
|
2011-01-31 10:16:28 +00:00
|
|
|
|
__ b(eq, &done);
|
2011-01-20 08:08:36 +00:00
|
|
|
|
|
2011-01-31 10:16:28 +00:00
|
|
|
|
// Input is negative. Reverse its sign.
|
2011-01-20 08:08:36 +00:00
|
|
|
|
// Preserve the value of all registers.
|
2011-04-07 13:32:45 +00:00
|
|
|
|
{
|
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
2011-01-20 08:08:36 +00:00
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
// Registers were saved at the safepoint, so we can use
|
|
|
|
|
// many scratch registers.
|
|
|
|
|
Register tmp1 = input.is(r1) ? r0 : r1;
|
|
|
|
|
Register tmp2 = input.is(r2) ? r0 : r2;
|
|
|
|
|
Register tmp3 = input.is(r3) ? r0 : r3;
|
|
|
|
|
Register tmp4 = input.is(r4) ? r0 : r4;
|
2011-01-31 10:16:28 +00:00
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
// exponent: floating point exponent value.
|
2011-01-24 07:56:57 +00:00
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
Label allocated, slow;
|
|
|
|
|
__ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
|
|
|
|
|
__ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
|
|
|
|
|
__ b(&allocated);
|
2011-01-20 08:08:36 +00:00
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
// Slow case: Call the runtime system to do the number allocation.
|
|
|
|
|
__ bind(&slow);
|
2011-01-20 08:08:36 +00:00
|
|
|
|
|
2013-09-27 13:59:28 +00:00
|
|
|
|
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
|
|
|
|
|
instr->context());
|
2011-04-07 13:32:45 +00:00
|
|
|
|
// Set the pointer to the new heap number in tmp.
|
|
|
|
|
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
|
|
|
|
|
// Restore input_reg after call to runtime.
|
|
|
|
|
__ LoadFromSafepointRegisterSlot(input, input);
|
|
|
|
|
__ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
|
2011-01-20 08:08:36 +00:00
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
__ bind(&allocated);
|
|
|
|
|
// exponent: floating point exponent value.
|
|
|
|
|
// tmp1: allocated heap number.
|
|
|
|
|
__ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
|
|
|
|
|
__ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
|
|
|
|
|
__ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
|
|
|
|
|
__ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
|
2011-01-20 08:08:36 +00:00
|
|
|
|
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(tmp1, result);
|
2011-04-07 13:32:45 +00:00
|
|
|
|
}
|
2011-01-20 08:08:36 +00:00
|
|
|
|
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(input, Operand::Zero());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ Move(result, input, pl);
|
2011-01-31 10:16:28 +00:00
|
|
|
|
// We can make rsb conditional because the previous cmp instruction
|
|
|
|
|
// will clear the V (overflow) flag and rsb won't set this flag
|
|
|
|
|
// if input is positive.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ rsb(result, input, Operand::Zero(), SetCC, mi);
|
2011-01-20 08:08:36 +00:00
|
|
|
|
// Deoptimize on overflow.
|
|
|
|
|
DeoptimizeIf(vs, instr->environment());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
2011-01-20 08:08:36 +00:00
|
|
|
|
// Class for deferred case.
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
|
2011-01-20 08:08:36 +00:00
|
|
|
|
public:
|
2013-04-11 13:27:06 +00:00
|
|
|
|
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
|
2011-01-20 08:08:36 +00:00
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
2011-01-20 08:08:36 +00:00
|
|
|
|
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
|
|
|
|
|
}
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2011-01-20 08:08:36 +00:00
|
|
|
|
private:
|
2013-04-11 13:27:06 +00:00
|
|
|
|
LMathAbs* instr_;
|
2011-01-20 08:08:36 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
Representation r = instr->hydrogen()->value()->representation();
|
|
|
|
|
if (r.IsDouble()) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
DwVfpRegister input = ToDoubleRegister(instr->value());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
|
|
|
|
__ vabs(result, input);
|
2013-08-02 08:59:02 +00:00
|
|
|
|
} else if (r.IsSmiOrInteger32()) {
|
2011-01-20 08:08:36 +00:00
|
|
|
|
EmitIntegerMathAbs(instr);
|
|
|
|
|
} else {
|
|
|
|
|
// Representation is tagged.
|
|
|
|
|
DeferredMathAbsTaggedHeapNumber* deferred =
|
2012-06-11 12:42:31 +00:00
|
|
|
|
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-01-20 08:08:36 +00:00
|
|
|
|
// Smi check.
|
2011-01-26 07:44:45 +00:00
|
|
|
|
__ JumpIfNotSmi(input, deferred->entry());
|
2011-01-20 08:08:36 +00:00
|
|
|
|
// If smi, handle it directly.
|
|
|
|
|
EmitIntegerMathAbs(instr);
|
|
|
|
|
__ bind(deferred->exit());
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister input = ToDoubleRegister(instr->value());
|
2011-02-04 07:08:50 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2013-03-05 19:35:59 +00:00
|
|
|
|
Register input_high = scratch0();
|
|
|
|
|
Label done, exact;
|
2011-02-04 07:08:50 +00:00
|
|
|
|
|
2013-03-05 19:35:59 +00:00
|
|
|
|
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
|
|
|
|
|
DeoptimizeIf(al, instr->environment());
|
2011-01-13 12:21:47 +00:00
|
|
|
|
|
2013-03-05 19:35:59 +00:00
|
|
|
|
__ bind(&exact);
|
2011-03-22 10:00:43 +00:00
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
|
|
|
// Test for -0.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(result, Operand::Zero());
|
2011-03-22 10:00:43 +00:00
|
|
|
|
__ b(ne, &done);
|
2013-03-05 19:35:59 +00:00
|
|
|
|
__ cmp(input_high, Operand::Zero());
|
|
|
|
|
DeoptimizeIf(mi, instr->environment());
|
2011-03-22 10:00:43 +00:00
|
|
|
|
}
|
2013-03-05 19:35:59 +00:00
|
|
|
|
__ bind(&done);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathRound(LMathRound* instr) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister input = ToDoubleRegister(instr->value());
|
2011-02-21 10:30:25 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2012-10-08 12:50:15 +00:00
|
|
|
|
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
|
2013-03-05 19:35:59 +00:00
|
|
|
|
DwVfpRegister input_plus_dot_five = double_scratch1;
|
|
|
|
|
Register input_high = scratch0();
|
|
|
|
|
DwVfpRegister dot_five = double_scratch0();
|
|
|
|
|
Label convert, done;
|
|
|
|
|
|
|
|
|
|
__ Vmov(dot_five, 0.5, scratch0());
|
|
|
|
|
__ vabs(double_scratch1, input);
|
|
|
|
|
__ VFPCompareAndSetFlags(double_scratch1, dot_five);
|
|
|
|
|
// If input is in [-0.5, -0], the result is -0.
|
|
|
|
|
// If input is in [+0, +0.5[, the result is +0.
|
|
|
|
|
// If the input is +0.5, the result is 1.
|
|
|
|
|
__ b(hi, &convert); // Out of [-0.5, +0.5].
|
2011-04-15 11:29:01 +00:00
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ VmovHigh(input_high, input);
|
2013-03-05 19:35:59 +00:00
|
|
|
|
__ cmp(input_high, Operand::Zero());
|
|
|
|
|
DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
|
|
|
|
|
}
|
|
|
|
|
__ VFPCompareAndSetFlags(input, dot_five);
|
|
|
|
|
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
|
|
|
|
|
// Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
|
|
|
|
|
// flag kBailoutOnMinusZero.
|
|
|
|
|
__ mov(result, Operand::Zero(), LeaveCC, ne);
|
|
|
|
|
__ b(&done);
|
2011-02-21 10:30:25 +00:00
|
|
|
|
|
2013-03-05 19:35:59 +00:00
|
|
|
|
__ bind(&convert);
|
|
|
|
|
__ vadd(input_plus_dot_five, input, dot_five);
|
|
|
|
|
// Reuse dot_five (double_scratch0) as we no longer need this value.
|
|
|
|
|
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
|
|
|
|
|
&done, &done);
|
|
|
|
|
DeoptimizeIf(al, instr->environment());
|
2011-04-15 11:29:01 +00:00
|
|
|
|
__ bind(&done);
|
2011-02-21 10:30:25 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister input = ToDoubleRegister(instr->value());
|
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ vsqrt(result, input);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister input = ToDoubleRegister(instr->value());
|
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
|
|
|
|
DwVfpRegister temp = ToDoubleRegister(instr->temp());
|
2011-12-06 08:28:12 +00:00
|
|
|
|
|
|
|
|
|
// Note that according to ECMA-262 15.8.2.13:
|
|
|
|
|
// Math.pow(-Infinity, 0.5) == Infinity
|
|
|
|
|
// Math.sqrt(-Infinity) == NaN
|
|
|
|
|
Label done;
|
2012-09-25 14:32:07 +00:00
|
|
|
|
__ vmov(temp, -V8_INFINITY, scratch0());
|
2011-12-06 09:20:00 +00:00
|
|
|
|
__ VFPCompareAndSetFlags(input, temp);
|
|
|
|
|
__ vneg(result, temp, eq);
|
2011-12-06 08:28:12 +00:00
|
|
|
|
__ b(&done, eq);
|
|
|
|
|
|
2011-03-08 10:29:40 +00:00
|
|
|
|
// Add +0 to convert -0 to +0.
|
2011-06-29 10:51:06 +00:00
|
|
|
|
__ vadd(result, input, kDoubleRegZero);
|
|
|
|
|
__ vsqrt(result, result);
|
2011-12-06 08:28:12 +00:00
|
|
|
|
__ bind(&done);
|
2011-03-08 10:29:40 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-02-23 10:41:13 +00:00
|
|
|
|
void LCodeGen::DoPower(LPower* instr) {
|
|
|
|
|
Representation exponent_type = instr->hydrogen()->right()->representation();
|
2011-12-07 16:55:00 +00:00
|
|
|
|
// Having marked this as a call, we can use any registers.
|
|
|
|
|
// Just make sure that the input/output registers are the expected ones.
|
2012-09-17 10:54:26 +00:00
|
|
|
|
ASSERT(!instr->right()->IsDoubleRegister() ||
|
|
|
|
|
ToDoubleRegister(instr->right()).is(d2));
|
|
|
|
|
ASSERT(!instr->right()->IsRegister() ||
|
|
|
|
|
ToRegister(instr->right()).is(r2));
|
|
|
|
|
ASSERT(ToDoubleRegister(instr->left()).is(d1));
|
2011-12-07 16:55:00 +00:00
|
|
|
|
ASSERT(ToDoubleRegister(instr->result()).is(d3));
|
|
|
|
|
|
2013-05-24 13:40:02 +00:00
|
|
|
|
if (exponent_type.IsSmi()) {
|
|
|
|
|
MathPowStub stub(MathPowStub::TAGGED);
|
|
|
|
|
__ CallStub(&stub);
|
|
|
|
|
} else if (exponent_type.IsTagged()) {
|
2011-12-07 16:55:00 +00:00
|
|
|
|
Label no_deopt;
|
|
|
|
|
__ JumpIfSmi(r2, &no_deopt);
|
2013-09-23 15:01:33 +00:00
|
|
|
|
__ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
|
2011-02-23 10:41:13 +00:00
|
|
|
|
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
2013-09-23 15:01:33 +00:00
|
|
|
|
__ cmp(r6, Operand(ip));
|
2011-02-23 10:41:13 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
2011-12-07 16:55:00 +00:00
|
|
|
|
__ bind(&no_deopt);
|
|
|
|
|
MathPowStub stub(MathPowStub::TAGGED);
|
|
|
|
|
__ CallStub(&stub);
|
|
|
|
|
} else if (exponent_type.IsInteger32()) {
|
|
|
|
|
MathPowStub stub(MathPowStub::INTEGER);
|
|
|
|
|
__ CallStub(&stub);
|
|
|
|
|
} else {
|
|
|
|
|
ASSERT(exponent_type.IsDouble());
|
|
|
|
|
MathPowStub stub(MathPowStub::DOUBLE);
|
|
|
|
|
__ CallStub(&stub);
|
|
|
|
|
}
|
2011-02-23 10:41:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-01-11 10:35:37 +00:00
|
|
|
|
void LCodeGen::DoRandom(LRandom* instr) {
|
2013-09-12 12:12:01 +00:00
|
|
|
|
// Assert that the register size is indeed the size of each seed.
|
2012-03-06 16:12:11 +00:00
|
|
|
|
static const int kSeedSize = sizeof(uint32_t);
|
|
|
|
|
STATIC_ASSERT(kPointerSize == kSeedSize);
|
|
|
|
|
|
2013-09-12 12:12:01 +00:00
|
|
|
|
// Load native context
|
|
|
|
|
Register global_object = ToRegister(instr->global_object());
|
|
|
|
|
Register native_context = global_object;
|
|
|
|
|
__ ldr(native_context, FieldMemOperand(
|
|
|
|
|
global_object, GlobalObject::kNativeContextOffset));
|
|
|
|
|
|
|
|
|
|
// Load state (FixedArray of the native context's random seeds)
|
2012-03-06 16:12:11 +00:00
|
|
|
|
static const int kRandomSeedOffset =
|
|
|
|
|
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
|
2013-09-12 12:12:01 +00:00
|
|
|
|
Register state = native_context;
|
|
|
|
|
__ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
|
2012-03-06 16:12:11 +00:00
|
|
|
|
|
|
|
|
|
// Load state[0].
|
2013-09-12 12:12:01 +00:00
|
|
|
|
Register state0 = ToRegister(instr->scratch());
|
|
|
|
|
__ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
|
2012-03-06 16:12:11 +00:00
|
|
|
|
// Load state[1].
|
2013-09-12 12:12:01 +00:00
|
|
|
|
Register state1 = ToRegister(instr->scratch2());
|
|
|
|
|
__ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
|
2012-03-06 16:12:11 +00:00
|
|
|
|
|
|
|
|
|
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
|
2013-09-12 12:12:01 +00:00
|
|
|
|
Register scratch3 = ToRegister(instr->scratch3());
|
|
|
|
|
Register scratch4 = scratch0();
|
|
|
|
|
__ and_(scratch3, state0, Operand(0xFFFF));
|
|
|
|
|
__ mov(scratch4, Operand(18273));
|
|
|
|
|
__ mul(scratch3, scratch3, scratch4);
|
|
|
|
|
__ add(state0, scratch3, Operand(state0, LSR, 16));
|
2012-03-06 16:12:11 +00:00
|
|
|
|
// Save state[0].
|
2013-09-12 12:12:01 +00:00
|
|
|
|
__ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
|
2012-03-06 16:12:11 +00:00
|
|
|
|
|
|
|
|
|
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
|
2013-09-12 12:12:01 +00:00
|
|
|
|
__ and_(scratch3, state1, Operand(0xFFFF));
|
|
|
|
|
__ mov(scratch4, Operand(36969));
|
|
|
|
|
__ mul(scratch3, scratch3, scratch4);
|
|
|
|
|
__ add(state1, scratch3, Operand(state1, LSR, 16));
|
2012-03-06 16:12:11 +00:00
|
|
|
|
// Save state[1].
|
2013-09-12 12:12:01 +00:00
|
|
|
|
__ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
|
2012-03-06 16:12:11 +00:00
|
|
|
|
|
|
|
|
|
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
|
2013-09-12 12:12:01 +00:00
|
|
|
|
Register random = scratch4;
|
|
|
|
|
__ and_(random, state1, Operand(0x3FFFF));
|
|
|
|
|
__ add(random, random, Operand(state0, LSL, 14));
|
2012-01-11 10:35:37 +00:00
|
|
|
|
|
|
|
|
|
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
|
|
|
|
|
// Create this constant using mov/orr to avoid PC relative load.
|
2013-09-12 12:12:01 +00:00
|
|
|
|
__ mov(scratch3, Operand(0x41000000));
|
|
|
|
|
__ orr(scratch3, scratch3, Operand(0x300000));
|
2012-01-11 10:35:37 +00:00
|
|
|
|
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
|
2013-09-12 12:12:01 +00:00
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
|
|
|
|
__ vmov(result, random, scratch3);
|
2012-01-11 10:35:37 +00:00
|
|
|
|
// Move 0x4130000000000000 to VFP.
|
2013-09-12 12:12:01 +00:00
|
|
|
|
__ mov(scratch4, Operand::Zero());
|
|
|
|
|
DwVfpRegister scratch5 = double_scratch0();
|
|
|
|
|
__ vmov(scratch5, scratch4, scratch3);
|
|
|
|
|
__ vsub(result, result, scratch5);
|
2012-03-06 16:12:11 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-26 13:12:35 +00:00
|
|
|
|
void LCodeGen::DoMathExp(LMathExp* instr) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister input = ToDoubleRegister(instr->value());
|
|
|
|
|
DwVfpRegister result = ToDoubleRegister(instr->result());
|
|
|
|
|
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
|
|
|
|
|
DwVfpRegister double_scratch2 = double_scratch0();
|
2012-11-26 13:12:35 +00:00
|
|
|
|
Register temp1 = ToRegister(instr->temp1());
|
|
|
|
|
Register temp2 = ToRegister(instr->temp2());
|
|
|
|
|
|
|
|
|
|
MathExpGenerator::EmitMathExp(
|
|
|
|
|
masm(), input, result, double_scratch1, double_scratch2,
|
|
|
|
|
temp1, temp2, scratch0());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathLog(LMathLog* instr) {
|
2011-03-02 14:40:38 +00:00
|
|
|
|
ASSERT(ToDoubleRegister(instr->result()).is(d2));
|
2013-09-27 13:59:28 +00:00
|
|
|
|
// Set the context register to a GC-safe fake value. Clobbering it is
|
|
|
|
|
// OK because this instruction is marked as a call.
|
|
|
|
|
__ mov(cp, Operand::Zero());
|
2011-03-02 14:40:38 +00:00
|
|
|
|
TranscendentalCacheStub stub(TranscendentalCache::LOG,
|
|
|
|
|
TranscendentalCacheStub::UNTAGGED);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-03-02 14:40:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathTan(LMathTan* instr) {
|
2011-11-25 13:15:31 +00:00
|
|
|
|
ASSERT(ToDoubleRegister(instr->result()).is(d2));
|
2013-09-27 13:59:28 +00:00
|
|
|
|
// Set the context register to a GC-safe fake value. Clobbering it is
|
|
|
|
|
// OK because this instruction is marked as a call.
|
|
|
|
|
__ mov(cp, Operand::Zero());
|
2011-11-25 13:15:31 +00:00
|
|
|
|
TranscendentalCacheStub stub(TranscendentalCache::TAN,
|
|
|
|
|
TranscendentalCacheStub::UNTAGGED);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-11-25 13:15:31 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathCos(LMathCos* instr) {
|
2011-03-02 14:40:38 +00:00
|
|
|
|
ASSERT(ToDoubleRegister(instr->result()).is(d2));
|
2013-09-27 13:59:28 +00:00
|
|
|
|
// Set the context register to a GC-safe fake value. Clobbering it is
|
|
|
|
|
// OK because this instruction is marked as a call.
|
|
|
|
|
__ mov(cp, Operand::Zero());
|
2011-03-02 14:40:38 +00:00
|
|
|
|
TranscendentalCacheStub stub(TranscendentalCache::COS,
|
|
|
|
|
TranscendentalCacheStub::UNTAGGED);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-03-02 14:40:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-04-11 13:27:06 +00:00
|
|
|
|
void LCodeGen::DoMathSin(LMathSin* instr) {
|
2011-03-02 14:40:38 +00:00
|
|
|
|
ASSERT(ToDoubleRegister(instr->result()).is(d2));
|
2013-09-27 13:59:28 +00:00
|
|
|
|
// Set the context register to a GC-safe fake value. Clobbering it is
|
|
|
|
|
// OK because this instruction is marked as a call.
|
|
|
|
|
__ mov(cp, Operand::Zero());
|
2011-03-02 14:40:38 +00:00
|
|
|
|
TranscendentalCacheStub stub(TranscendentalCache::SIN,
|
|
|
|
|
TranscendentalCacheStub::UNTAGGED);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-03-02 14:40:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-04-15 07:58:22 +00:00
|
|
|
|
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-04-15 07:58:22 +00:00
|
|
|
|
ASSERT(ToRegister(instr->function()).is(r1));
|
|
|
|
|
ASSERT(instr->HasPointerMap());
|
2012-04-18 09:38:45 +00:00
|
|
|
|
|
2013-04-23 09:23:07 +00:00
|
|
|
|
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
|
|
|
|
|
if (known_function.is_null()) {
|
2012-04-18 09:38:45 +00:00
|
|
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
|
|
|
RecordPosition(pointers->position());
|
|
|
|
|
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
|
|
|
|
ParameterCount count(instr->arity());
|
|
|
|
|
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
|
|
|
|
|
} else {
|
2013-04-23 09:23:07 +00:00
|
|
|
|
CallKnownFunction(known_function,
|
|
|
|
|
instr->hydrogen()->formal_parameter_count(),
|
2012-04-18 09:38:45 +00:00
|
|
|
|
instr->arity(),
|
|
|
|
|
instr,
|
|
|
|
|
CALL_AS_METHOD,
|
|
|
|
|
R1_CONTAINS_TARGET);
|
|
|
|
|
}
|
2011-04-15 07:58:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-01-10 07:59:13 +00:00
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
|
|
|
|
int arity = instr->arity();
|
2011-03-18 20:35:07 +00:00
|
|
|
|
Handle<Code> ic =
|
2011-09-13 12:53:28 +00:00
|
|
|
|
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
|
2012-10-18 12:21:42 +00:00
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCallNamed(LCallNamed* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
|
|
|
|
int arity = instr->arity();
|
2011-05-24 14:01:36 +00:00
|
|
|
|
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
|
|
|
|
|
Handle<Code> ic =
|
2011-09-13 12:53:28 +00:00
|
|
|
|
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ mov(r2, Operand(instr->name()));
|
2012-10-18 12:21:42 +00:00
|
|
|
|
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCallFunction(LCallFunction* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-11-08 14:39:37 +00:00
|
|
|
|
ASSERT(ToRegister(instr->function()).is(r1));
|
2011-01-04 13:02:51 +00:00
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
|
|
|
|
int arity = instr->arity();
|
2011-09-27 11:42:02 +00:00
|
|
|
|
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-01-07 12:34:39 +00:00
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
|
|
|
|
int arity = instr->arity();
|
2011-05-24 14:01:36 +00:00
|
|
|
|
RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
|
2011-03-18 20:35:07 +00:00
|
|
|
|
Handle<Code> ic =
|
2011-09-13 12:53:28 +00:00
|
|
|
|
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
|
2011-01-07 12:34:39 +00:00
|
|
|
|
__ mov(r2, Operand(instr->name()));
|
2012-10-18 12:21:42 +00:00
|
|
|
|
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
|
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
2013-04-23 09:23:07 +00:00
|
|
|
|
CallKnownFunction(instr->hydrogen()->target(),
|
|
|
|
|
instr->hydrogen()->formal_parameter_count(),
|
2012-04-18 09:38:45 +00:00
|
|
|
|
instr->arity(),
|
|
|
|
|
instr,
|
|
|
|
|
CALL_AS_FUNCTION,
|
|
|
|
|
R1_UNINITIALIZED);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCallNew(LCallNew* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2012-09-17 10:54:26 +00:00
|
|
|
|
ASSERT(ToRegister(instr->constructor()).is(r1));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
|
|
|
|
__ mov(r0, Operand(instr->arity()));
|
2013-06-25 16:31:07 +00:00
|
|
|
|
// No cell in r2 for construct type feedback in optimized code
|
|
|
|
|
Handle<Object> undefined_value(isolate()->factory()->undefined_value());
|
|
|
|
|
__ mov(r2, Operand(undefined_value));
|
2013-03-01 16:06:34 +00:00
|
|
|
|
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
|
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2013-03-01 16:06:34 +00:00
|
|
|
|
ASSERT(ToRegister(instr->constructor()).is(r1));
|
|
|
|
|
ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
|
|
|
|
|
|
|
__ mov(r0, Operand(instr->arity()));
|
|
|
|
|
__ mov(r2, Operand(instr->hydrogen()->property_cell()));
|
2013-05-08 16:48:27 +00:00
|
|
|
|
ElementsKind kind = instr->hydrogen()->elements_kind();
|
2013-06-28 13:16:14 +00:00
|
|
|
|
AllocationSiteOverrideMode override_mode =
|
2013-07-08 10:02:16 +00:00
|
|
|
|
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
|
2013-06-28 13:16:14 +00:00
|
|
|
|
? DISABLE_ALLOCATION_SITES
|
|
|
|
|
: DONT_OVERRIDE;
|
|
|
|
|
ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
|
2013-06-04 12:48:51 +00:00
|
|
|
|
|
2013-04-25 16:00:32 +00:00
|
|
|
|
if (instr->arity() == 0) {
|
2013-06-28 13:16:14 +00:00
|
|
|
|
ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
|
2013-04-25 16:00:32 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
|
|
|
|
|
} else if (instr->arity() == 1) {
|
2013-06-12 18:04:16 +00:00
|
|
|
|
Label done;
|
|
|
|
|
if (IsFastPackedElementsKind(kind)) {
|
|
|
|
|
Label packed_case;
|
|
|
|
|
// We might need a change here
|
|
|
|
|
// look at the first argument
|
|
|
|
|
__ ldr(r5, MemOperand(sp, 0));
|
|
|
|
|
__ cmp(r5, Operand::Zero());
|
|
|
|
|
__ b(eq, &packed_case);
|
|
|
|
|
|
|
|
|
|
ElementsKind holey_kind = GetHoleyElementsKind(kind);
|
2013-06-28 13:16:14 +00:00
|
|
|
|
ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
|
|
|
|
|
override_mode);
|
2013-06-12 18:04:16 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
|
|
|
|
|
__ jmp(&done);
|
|
|
|
|
__ bind(&packed_case);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-28 13:16:14 +00:00
|
|
|
|
ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
|
2013-04-25 16:00:32 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
|
2013-06-12 18:04:16 +00:00
|
|
|
|
__ bind(&done);
|
2013-04-25 16:00:32 +00:00
|
|
|
|
} else {
|
2013-06-28 13:16:14 +00:00
|
|
|
|
ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
|
2013-04-25 16:00:32 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
|
|
|
|
|
}
|
2013-03-01 16:06:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
|
|
|
|
|
CallRuntime(instr->function(), instr->arity(), instr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-27 11:55:08 +00:00
|
|
|
|
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
|
|
|
|
|
Register function = ToRegister(instr->function());
|
|
|
|
|
Register code_object = ToRegister(instr->code_object());
|
|
|
|
|
__ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
|
|
|
__ str(code_object,
|
|
|
|
|
FieldMemOperand(function, JSFunction::kCodeEntryOffset));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-03-13 11:05:48 +00:00
|
|
|
|
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
Register base = ToRegister(instr->base_object());
|
|
|
|
|
__ add(result, base, Operand(instr->offset()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
2013-04-26 15:30:41 +00:00
|
|
|
|
Representation representation = instr->representation();
|
|
|
|
|
|
2011-01-07 07:33:46 +00:00
|
|
|
|
Register object = ToRegister(instr->object());
|
|
|
|
|
Register scratch = scratch0();
|
2013-05-24 12:40:08 +00:00
|
|
|
|
HObjectAccess access = instr->hydrogen()->access();
|
|
|
|
|
int offset = access.offset();
|
2011-01-07 07:33:46 +00:00
|
|
|
|
|
2013-07-29 13:56:51 +00:00
|
|
|
|
if (access.IsExternalMemory()) {
|
|
|
|
|
Register value = ToRegister(instr->value());
|
2013-10-04 07:13:43 +00:00
|
|
|
|
MemOperand operand = MemOperand(object, offset);
|
|
|
|
|
if (representation.IsByte()) {
|
|
|
|
|
__ strb(value, operand);
|
|
|
|
|
} else {
|
|
|
|
|
__ str(value, operand);
|
|
|
|
|
}
|
2013-07-29 13:56:51 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-05-08 15:02:08 +00:00
|
|
|
|
Handle<Map> transition = instr->transition();
|
|
|
|
|
|
2013-05-23 08:32:07 +00:00
|
|
|
|
if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
|
2013-05-10 17:17:50 +00:00
|
|
|
|
Register value = ToRegister(instr->value());
|
|
|
|
|
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ SmiTst(value);
|
2013-05-10 17:17:50 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
2013-05-08 15:02:08 +00:00
|
|
|
|
} else if (FLAG_track_double_fields && representation.IsDouble()) {
|
|
|
|
|
ASSERT(transition.is_null());
|
2013-05-24 12:40:08 +00:00
|
|
|
|
ASSERT(access.IsInobject());
|
2013-05-08 15:02:08 +00:00
|
|
|
|
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
|
|
|
|
|
DwVfpRegister value = ToDoubleRegister(instr->value());
|
|
|
|
|
__ vstr(value, FieldMemOperand(object, offset));
|
|
|
|
|
return;
|
2013-04-26 15:30:41 +00:00
|
|
|
|
}
|
2011-01-07 07:33:46 +00:00
|
|
|
|
|
2013-04-26 15:30:41 +00:00
|
|
|
|
if (!transition.is_null()) {
|
|
|
|
|
__ mov(scratch, Operand(transition));
|
2011-01-07 07:33:46 +00:00
|
|
|
|
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
2012-05-29 16:39:26 +00:00
|
|
|
|
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register temp = ToRegister(instr->temp());
|
2012-05-29 16:39:26 +00:00
|
|
|
|
// Update the write barrier for the map field.
|
|
|
|
|
__ RecordWriteField(object,
|
|
|
|
|
HeapObject::kMapOffset,
|
|
|
|
|
scratch,
|
|
|
|
|
temp,
|
2013-03-20 10:37:13 +00:00
|
|
|
|
GetLinkRegisterState(),
|
2013-04-19 16:01:57 +00:00
|
|
|
|
kSaveFPRegs,
|
2012-05-29 16:39:26 +00:00
|
|
|
|
OMIT_REMEMBERED_SET,
|
|
|
|
|
OMIT_SMI_CHECK);
|
|
|
|
|
}
|
2011-01-07 07:33:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Do the store.
|
2013-05-08 15:02:08 +00:00
|
|
|
|
Register value = ToRegister(instr->value());
|
|
|
|
|
ASSERT(!object.is(value));
|
2011-10-14 07:45:18 +00:00
|
|
|
|
SmiCheck check_needed =
|
2013-06-26 17:37:55 +00:00
|
|
|
|
instr->hydrogen()->value()->IsHeapObject()
|
|
|
|
|
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
|
2013-05-24 12:40:08 +00:00
|
|
|
|
if (access.IsInobject()) {
|
2013-10-04 07:13:43 +00:00
|
|
|
|
MemOperand operand = FieldMemOperand(object, offset);
|
|
|
|
|
if (representation.IsByte()) {
|
|
|
|
|
__ strb(value, operand);
|
|
|
|
|
} else {
|
|
|
|
|
__ str(value, operand);
|
|
|
|
|
}
|
2011-10-14 07:45:18 +00:00
|
|
|
|
if (instr->hydrogen()->NeedsWriteBarrier()) {
|
2011-01-07 07:33:46 +00:00
|
|
|
|
// Update the write barrier for the object for in-object properties.
|
2011-10-14 07:45:18 +00:00
|
|
|
|
__ RecordWriteField(object,
|
|
|
|
|
offset,
|
|
|
|
|
value,
|
|
|
|
|
scratch,
|
2013-03-20 10:37:13 +00:00
|
|
|
|
GetLinkRegisterState(),
|
2013-04-19 16:01:57 +00:00
|
|
|
|
kSaveFPRegs,
|
2011-10-14 07:45:18 +00:00
|
|
|
|
EMIT_REMEMBERED_SET,
|
|
|
|
|
check_needed);
|
2011-01-07 07:33:46 +00:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
|
2013-10-04 07:13:43 +00:00
|
|
|
|
MemOperand operand = FieldMemOperand(scratch, offset);
|
|
|
|
|
if (representation.IsByte()) {
|
|
|
|
|
__ strb(value, operand);
|
|
|
|
|
} else {
|
|
|
|
|
__ str(value, operand);
|
|
|
|
|
}
|
2011-10-14 07:45:18 +00:00
|
|
|
|
if (instr->hydrogen()->NeedsWriteBarrier()) {
|
2011-01-07 07:33:46 +00:00
|
|
|
|
// Update the write barrier for the properties array.
|
|
|
|
|
// object is used as a scratch register.
|
2011-10-14 07:45:18 +00:00
|
|
|
|
__ RecordWriteField(scratch,
|
|
|
|
|
offset,
|
|
|
|
|
value,
|
|
|
|
|
object,
|
2013-03-20 10:37:13 +00:00
|
|
|
|
GetLinkRegisterState(),
|
2013-04-19 16:01:57 +00:00
|
|
|
|
kSaveFPRegs,
|
2011-10-14 07:45:18 +00:00
|
|
|
|
EMIT_REMEMBERED_SET,
|
|
|
|
|
check_needed);
|
2011-01-07 07:33:46 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(ToRegister(instr->object()).is(r1));
|
|
|
|
|
ASSERT(ToRegister(instr->value()).is(r0));
|
|
|
|
|
|
|
|
|
|
// Name is always in r2.
|
|
|
|
|
__ mov(r2, Operand(instr->name()));
|
2011-11-24 15:17:04 +00:00
|
|
|
|
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
|
2011-03-23 13:40:07 +00:00
|
|
|
|
? isolate()->builtins()->StoreIC_Initialize_Strict()
|
|
|
|
|
: isolate()->builtins()->StoreIC_Initialize();
|
2012-10-18 12:21:42 +00:00
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-14 08:54:27 +00:00
|
|
|
|
void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
|
2013-07-25 06:37:25 +00:00
|
|
|
|
if (FLAG_debug_code && check->hydrogen()->skip_check()) {
|
|
|
|
|
Label done;
|
2013-08-14 08:54:27 +00:00
|
|
|
|
__ b(NegateCondition(condition), &done);
|
2013-07-25 06:37:25 +00:00
|
|
|
|
__ stop("eliminated bounds check failed");
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
} else {
|
2013-08-14 08:54:27 +00:00
|
|
|
|
DeoptimizeIf(condition, check->environment());
|
2013-07-25 06:37:25 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
|
2013-02-12 11:44:08 +00:00
|
|
|
|
if (instr->hydrogen()->skip_check()) return;
|
|
|
|
|
|
2012-07-31 08:44:51 +00:00
|
|
|
|
if (instr->index()->IsConstantOperand()) {
|
|
|
|
|
int constant_index =
|
|
|
|
|
ToInteger32(LConstantOperand::cast(instr->index()));
|
2013-05-24 08:52:35 +00:00
|
|
|
|
if (instr->hydrogen()->length()->representation().IsSmi()) {
|
2012-07-31 08:44:51 +00:00
|
|
|
|
__ mov(ip, Operand(Smi::FromInt(constant_index)));
|
|
|
|
|
} else {
|
|
|
|
|
__ mov(ip, Operand(constant_index));
|
|
|
|
|
}
|
|
|
|
|
__ cmp(ip, ToRegister(instr->length()));
|
|
|
|
|
} else {
|
|
|
|
|
__ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
|
|
|
|
|
}
|
2013-07-25 06:37:25 +00:00
|
|
|
|
Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
|
|
|
|
|
ApplyCheckIf(condition, instr);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-02 09:18:53 +00:00
|
|
|
|
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
|
|
|
|
Register external_pointer = ToRegister(instr->elements());
|
2011-05-10 15:25:17 +00:00
|
|
|
|
Register key = no_reg;
|
2011-09-09 09:35:57 +00:00
|
|
|
|
ElementsKind elements_kind = instr->elements_kind();
|
2011-05-10 15:25:17 +00:00
|
|
|
|
bool key_is_constant = instr->key()->IsConstantOperand();
|
|
|
|
|
int constant_key = 0;
|
|
|
|
|
if (key_is_constant) {
|
|
|
|
|
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
|
|
|
|
|
if (constant_key & 0xF0000000) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kArrayIndexConstantValueTooBig);
|
2011-05-10 15:25:17 +00:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
key = ToRegister(instr->key());
|
|
|
|
|
}
|
2012-07-20 11:00:33 +00:00
|
|
|
|
int element_size_shift = ElementsKindToShiftSize(elements_kind);
|
2013-05-24 08:52:35 +00:00
|
|
|
|
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
|
2012-07-20 11:00:33 +00:00
|
|
|
|
? (element_size_shift - kSmiTagSize) : element_size_shift;
|
|
|
|
|
int additional_offset = instr->additional_index() << element_size_shift;
|
2011-04-21 07:15:43 +00:00
|
|
|
|
|
2011-09-09 09:35:57 +00:00
|
|
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
|
|
|
|
|
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
|
2013-09-17 12:37:31 +00:00
|
|
|
|
Register address = scratch0();
|
2011-04-12 15:20:26 +00:00
|
|
|
|
DwVfpRegister value(ToDoubleRegister(instr->value()));
|
2013-09-17 12:37:31 +00:00
|
|
|
|
if (key_is_constant) {
|
|
|
|
|
if (constant_key != 0) {
|
|
|
|
|
__ add(address, external_pointer,
|
|
|
|
|
Operand(constant_key << element_size_shift));
|
|
|
|
|
} else {
|
|
|
|
|
address = external_pointer;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
__ add(address, external_pointer, Operand(key, LSL, shift_size));
|
|
|
|
|
}
|
2011-09-09 09:35:57 +00:00
|
|
|
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
2011-05-10 15:25:17 +00:00
|
|
|
|
__ vcvt_f32_f64(double_scratch0().low(), value);
|
2013-09-17 12:37:31 +00:00
|
|
|
|
__ vstr(double_scratch0().low(), address, additional_offset);
|
2011-09-09 09:35:57 +00:00
|
|
|
|
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
|
2013-09-17 12:37:31 +00:00
|
|
|
|
__ vstr(value, address, additional_offset);
|
2011-05-10 15:25:17 +00:00
|
|
|
|
}
|
2011-04-07 09:51:25 +00:00
|
|
|
|
} else {
|
|
|
|
|
Register value(ToRegister(instr->value()));
|
2012-07-20 11:00:33 +00:00
|
|
|
|
MemOperand mem_operand = PrepareKeyedOperand(
|
|
|
|
|
key, external_pointer, key_is_constant, constant_key,
|
|
|
|
|
element_size_shift, shift_size,
|
|
|
|
|
instr->additional_index(), additional_offset);
|
2011-06-09 15:19:37 +00:00
|
|
|
|
switch (elements_kind) {
|
2011-09-09 09:35:57 +00:00
|
|
|
|
case EXTERNAL_PIXEL_ELEMENTS:
|
|
|
|
|
case EXTERNAL_BYTE_ELEMENTS:
|
|
|
|
|
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
2011-05-10 15:25:17 +00:00
|
|
|
|
__ strb(value, mem_operand);
|
2011-04-07 09:51:25 +00:00
|
|
|
|
break;
|
2011-09-09 09:35:57 +00:00
|
|
|
|
case EXTERNAL_SHORT_ELEMENTS:
|
|
|
|
|
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
2011-05-10 15:25:17 +00:00
|
|
|
|
__ strh(value, mem_operand);
|
2011-04-07 09:51:25 +00:00
|
|
|
|
break;
|
2011-09-09 09:35:57 +00:00
|
|
|
|
case EXTERNAL_INT_ELEMENTS:
|
|
|
|
|
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
2011-05-10 15:25:17 +00:00
|
|
|
|
__ str(value, mem_operand);
|
2011-04-07 09:51:25 +00:00
|
|
|
|
break;
|
2011-09-09 09:35:57 +00:00
|
|
|
|
case EXTERNAL_FLOAT_ELEMENTS:
|
|
|
|
|
case EXTERNAL_DOUBLE_ELEMENTS:
|
|
|
|
|
case FAST_DOUBLE_ELEMENTS:
|
|
|
|
|
case FAST_ELEMENTS:
|
2012-05-23 14:24:29 +00:00
|
|
|
|
case FAST_SMI_ELEMENTS:
|
|
|
|
|
case FAST_HOLEY_DOUBLE_ELEMENTS:
|
|
|
|
|
case FAST_HOLEY_ELEMENTS:
|
|
|
|
|
case FAST_HOLEY_SMI_ELEMENTS:
|
2011-09-09 09:35:57 +00:00
|
|
|
|
case DICTIONARY_ELEMENTS:
|
|
|
|
|
case NON_STRICT_ARGUMENTS_ELEMENTS:
|
2011-04-07 09:51:25 +00:00
|
|
|
|
UNREACHABLE();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-03-09 15:01:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-02 09:18:53 +00:00
|
|
|
|
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
|
|
|
|
DwVfpRegister value = ToDoubleRegister(instr->value());
|
|
|
|
|
Register elements = ToRegister(instr->elements());
|
|
|
|
|
Register scratch = scratch0();
|
2013-09-17 12:37:31 +00:00
|
|
|
|
DwVfpRegister double_scratch = double_scratch0();
|
2012-11-02 09:18:53 +00:00
|
|
|
|
bool key_is_constant = instr->key()->IsConstantOperand();
|
|
|
|
|
|
|
|
|
|
// Calculate the effective address of the slot in the array to store the
|
|
|
|
|
// double value.
|
2013-09-17 12:37:31 +00:00
|
|
|
|
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
|
2012-11-02 09:18:53 +00:00
|
|
|
|
if (key_is_constant) {
|
2013-09-17 12:37:31 +00:00
|
|
|
|
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
|
2012-11-02 09:18:53 +00:00
|
|
|
|
if (constant_key & 0xF0000000) {
|
2013-08-02 09:53:11 +00:00
|
|
|
|
Abort(kArrayIndexConstantValueTooBig);
|
2012-11-02 09:18:53 +00:00
|
|
|
|
}
|
2013-09-17 12:37:31 +00:00
|
|
|
|
__ add(scratch, elements,
|
|
|
|
|
Operand((constant_key << element_size_shift) +
|
|
|
|
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
2012-11-02 09:18:53 +00:00
|
|
|
|
} else {
|
2013-09-17 12:37:31 +00:00
|
|
|
|
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
|
|
|
|
|
? (element_size_shift - kSmiTagSize) : element_size_shift;
|
|
|
|
|
__ add(scratch, elements,
|
2012-11-02 09:18:53 +00:00
|
|
|
|
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
2013-09-17 12:37:31 +00:00
|
|
|
|
__ add(scratch, scratch,
|
|
|
|
|
Operand(ToRegister(instr->key()), LSL, shift_size));
|
2012-11-02 09:18:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (instr->NeedsCanonicalization()) {
|
2013-04-15 15:55:47 +00:00
|
|
|
|
// Force a canonical NaN.
|
|
|
|
|
if (masm()->emit_debug_code()) {
|
|
|
|
|
__ vmrs(ip);
|
|
|
|
|
__ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
|
2013-08-02 09:53:11 +00:00
|
|
|
|
__ Assert(ne, kDefaultNaNModeNotSet);
|
2013-04-15 15:55:47 +00:00
|
|
|
|
}
|
2013-09-17 12:37:31 +00:00
|
|
|
|
__ VFPCanonicalizeNaN(double_scratch, value);
|
|
|
|
|
__ vstr(double_scratch, scratch,
|
|
|
|
|
instr->additional_index() << element_size_shift);
|
|
|
|
|
} else {
|
|
|
|
|
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
|
2012-11-02 09:18:53 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
|
|
|
|
Register value = ToRegister(instr->value());
|
|
|
|
|
Register elements = ToRegister(instr->elements());
|
|
|
|
|
Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
|
|
|
|
|
: no_reg;
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
Register store_base = scratch;
|
|
|
|
|
int offset = 0;
|
|
|
|
|
|
|
|
|
|
// Do the store.
|
|
|
|
|
if (instr->key()->IsConstantOperand()) {
|
|
|
|
|
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
|
|
|
|
|
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
|
|
|
|
|
offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
|
|
|
|
|
instr->additional_index());
|
|
|
|
|
store_base = elements;
|
|
|
|
|
} else {
|
|
|
|
|
// Even though the HLoadKeyed instruction forces the input
|
|
|
|
|
// representation for the key to be an integer, the input gets replaced
|
|
|
|
|
// during bound check elimination with the index argument to the bounds
|
|
|
|
|
// check, which can be tagged, so that case must be handled here, too.
|
2013-05-24 08:52:35 +00:00
|
|
|
|
if (instr->hydrogen()->key()->representation().IsSmi()) {
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
|
2012-11-02 09:18:53 +00:00
|
|
|
|
} else {
|
|
|
|
|
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
|
|
|
|
|
}
|
|
|
|
|
offset = FixedArray::OffsetOfElementAt(instr->additional_index());
|
|
|
|
|
}
|
|
|
|
|
__ str(value, FieldMemOperand(store_base, offset));
|
|
|
|
|
|
|
|
|
|
if (instr->hydrogen()->NeedsWriteBarrier()) {
|
|
|
|
|
SmiCheck check_needed =
|
2013-06-26 17:37:55 +00:00
|
|
|
|
instr->hydrogen()->value()->IsHeapObject()
|
|
|
|
|
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
|
2012-11-02 09:18:53 +00:00
|
|
|
|
// Compute address of modified element and store it into key register.
|
|
|
|
|
__ add(key, store_base, Operand(offset - kHeapObjectTag));
|
|
|
|
|
__ RecordWrite(elements,
|
|
|
|
|
key,
|
|
|
|
|
value,
|
2013-03-20 10:37:13 +00:00
|
|
|
|
GetLinkRegisterState(),
|
2013-04-19 16:01:57 +00:00
|
|
|
|
kSaveFPRegs,
|
2012-11-02 09:18:53 +00:00
|
|
|
|
EMIT_REMEMBERED_SET,
|
|
|
|
|
check_needed);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
|
|
|
|
|
// By cases: external, fast double
|
|
|
|
|
if (instr->is_external()) {
|
|
|
|
|
DoStoreKeyedExternalArray(instr);
|
|
|
|
|
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
|
|
|
|
|
DoStoreKeyedFixedDoubleArray(instr);
|
|
|
|
|
} else {
|
|
|
|
|
DoStoreKeyedFixedArray(instr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(ToRegister(instr->object()).is(r2));
|
|
|
|
|
ASSERT(ToRegister(instr->key()).is(r1));
|
|
|
|
|
ASSERT(ToRegister(instr->value()).is(r0));
|
|
|
|
|
|
2011-11-24 15:17:04 +00:00
|
|
|
|
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
|
2011-03-23 13:40:07 +00:00
|
|
|
|
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
|
|
|
|
|
: isolate()->builtins()->KeyedStoreIC_Initialize();
|
2012-10-18 12:21:42 +00:00
|
|
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
|
2011-04-15 06:39:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-10-19 12:10:18 +00:00
|
|
|
|
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
|
|
|
|
Register object_reg = ToRegister(instr->object());
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
|
|
|
|
|
Handle<Map> from_map = instr->original_map();
|
|
|
|
|
Handle<Map> to_map = instr->transitioned_map();
|
2013-01-23 13:52:00 +00:00
|
|
|
|
ElementsKind from_kind = instr->from_kind();
|
|
|
|
|
ElementsKind to_kind = instr->to_kind();
|
2011-10-19 12:10:18 +00:00
|
|
|
|
|
|
|
|
|
Label not_applicable;
|
|
|
|
|
__ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
|
|
|
|
|
__ cmp(scratch, Operand(from_map));
|
|
|
|
|
__ b(ne, ¬_applicable);
|
2012-05-23 14:24:29 +00:00
|
|
|
|
|
|
|
|
|
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
|
2013-02-04 12:01:59 +00:00
|
|
|
|
Register new_map_reg = ToRegister(instr->new_map_temp());
|
|
|
|
|
__ mov(new_map_reg, Operand(to_map));
|
2011-10-19 12:10:18 +00:00
|
|
|
|
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
|
|
|
|
|
// Write barrier.
|
|
|
|
|
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
|
2013-03-20 10:37:13 +00:00
|
|
|
|
scratch, GetLinkRegisterState(), kDontSaveFPRegs);
|
2013-07-29 09:12:16 +00:00
|
|
|
|
} else {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2013-08-13 15:06:17 +00:00
|
|
|
|
PushSafepointRegistersScope scope(
|
|
|
|
|
this, Safepoint::kWithRegistersAndDoubles);
|
2013-02-04 12:01:59 +00:00
|
|
|
|
__ Move(r0, object_reg);
|
|
|
|
|
__ Move(r1, to_map);
|
|
|
|
|
TransitionElementsKindStub stub(from_kind, to_kind);
|
|
|
|
|
__ CallStub(&stub);
|
2013-08-13 15:06:17 +00:00
|
|
|
|
RecordSafepointWithRegistersAndDoubles(
|
2013-02-04 12:01:59 +00:00
|
|
|
|
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
2011-10-19 12:10:18 +00:00
|
|
|
|
}
|
|
|
|
|
__ bind(¬_applicable);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-02-04 12:01:59 +00:00
|
|
|
|
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
|
|
|
|
Register object = ToRegister(instr->object());
|
|
|
|
|
Register temp = ToRegister(instr->temp());
|
2013-07-19 13:30:49 +00:00
|
|
|
|
__ TestJSArrayForAllocationMemento(object, temp);
|
2013-02-04 12:01:59 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-04-15 06:39:36 +00:00
|
|
|
|
void LCodeGen::DoStringAdd(LStringAdd* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-04-15 06:39:36 +00:00
|
|
|
|
__ push(ToRegister(instr->left()));
|
|
|
|
|
__ push(ToRegister(instr->right()));
|
2013-07-19 09:42:15 +00:00
|
|
|
|
StringAddStub stub(instr->hydrogen()->flags());
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-01-20 14:20:54 +00:00
|
|
|
|
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
|
2011-01-20 14:20:54 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
|
|
|
|
codegen()->DoDeferredStringCharCodeAt(instr_);
|
|
|
|
|
}
|
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2011-01-20 14:20:54 +00:00
|
|
|
|
private:
|
|
|
|
|
LStringCharCodeAt* instr_;
|
|
|
|
|
};
|
|
|
|
|
|
2011-01-21 08:30:13 +00:00
|
|
|
|
DeferredStringCharCodeAt* deferred =
|
2012-06-11 12:42:31 +00:00
|
|
|
|
new(zone()) DeferredStringCharCodeAt(this, instr);
|
2011-01-21 08:30:13 +00:00
|
|
|
|
|
2011-11-24 11:07:39 +00:00
|
|
|
|
StringCharLoadGenerator::Generate(masm(),
|
|
|
|
|
ToRegister(instr->string()),
|
|
|
|
|
ToRegister(instr->index()),
|
|
|
|
|
ToRegister(instr->result()),
|
|
|
|
|
deferred->entry());
|
2011-01-20 14:20:54 +00:00
|
|
|
|
__ bind(deferred->exit());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
|
|
|
|
Register string = ToRegister(instr->string());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
|
|
|
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
|
|
|
// result register contain a valid pointer because it is already
|
|
|
|
|
// contained in the register pointer map.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ mov(result, Operand::Zero());
|
2011-01-20 14:20:54 +00:00
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
2011-01-20 14:20:54 +00:00
|
|
|
|
__ push(string);
|
2011-01-21 08:30:13 +00:00
|
|
|
|
// Push the index as a smi. This is safe because of the checks in
|
|
|
|
|
// DoStringCharCodeAt above.
|
2011-01-20 14:20:54 +00:00
|
|
|
|
if (instr->index()->IsConstantOperand()) {
|
|
|
|
|
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
|
|
|
|
__ mov(scratch, Operand(Smi::FromInt(const_index)));
|
|
|
|
|
__ push(scratch);
|
|
|
|
|
} else {
|
|
|
|
|
Register index = ToRegister(instr->index());
|
|
|
|
|
__ SmiTag(index);
|
|
|
|
|
__ push(index);
|
|
|
|
|
}
|
2013-09-27 13:59:28 +00:00
|
|
|
|
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
|
|
|
|
|
instr->context());
|
2012-10-12 11:09:14 +00:00
|
|
|
|
__ AssertSmi(r0);
|
2011-01-20 14:20:54 +00:00
|
|
|
|
__ SmiUntag(r0);
|
2011-02-21 11:29:45 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(r0, result);
|
2011-01-20 14:20:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-03-14 15:36:00 +00:00
|
|
|
|
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
|
2011-03-14 15:36:00 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
|
|
|
|
codegen()->DoDeferredStringCharFromCode(instr_);
|
|
|
|
|
}
|
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2011-03-14 15:36:00 +00:00
|
|
|
|
private:
|
|
|
|
|
LStringCharFromCode* instr_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
DeferredStringCharFromCode* deferred =
|
2012-06-11 12:42:31 +00:00
|
|
|
|
new(zone()) DeferredStringCharFromCode(this, instr);
|
2011-03-14 15:36:00 +00:00
|
|
|
|
|
|
|
|
|
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
|
|
|
|
|
Register char_code = ToRegister(instr->char_code());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
ASSERT(!char_code.is(result));
|
|
|
|
|
|
2013-01-09 10:30:54 +00:00
|
|
|
|
__ cmp(char_code, Operand(String::kMaxOneByteCharCode));
|
2011-03-14 15:36:00 +00:00
|
|
|
|
__ b(hi, deferred->entry());
|
|
|
|
|
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
|
|
|
|
|
__ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
|
|
|
|
|
__ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
|
|
|
|
|
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
|
|
|
|
__ cmp(result, ip);
|
|
|
|
|
__ b(eq, deferred->entry());
|
|
|
|
|
__ bind(deferred->exit());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
|
|
|
|
|
Register char_code = ToRegister(instr->char_code());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
|
|
|
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
|
|
|
// result register contain a valid pointer because it is already
|
|
|
|
|
// contained in the register pointer map.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ mov(result, Operand::Zero());
|
2011-03-14 15:36:00 +00:00
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
2011-03-14 15:36:00 +00:00
|
|
|
|
__ SmiTag(char_code);
|
|
|
|
|
__ push(char_code);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
|
2011-03-14 15:36:00 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(r0, result);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* input = instr->value();
|
2011-01-17 07:26:36 +00:00
|
|
|
|
ASSERT(input->IsRegister() || input->IsStackSlot());
|
|
|
|
|
LOperand* output = instr->result();
|
|
|
|
|
ASSERT(output->IsDoubleRegister());
|
|
|
|
|
SwVfpRegister single_scratch = double_scratch0().low();
|
|
|
|
|
if (input->IsStackSlot()) {
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
__ ldr(scratch, ToMemOperand(input));
|
|
|
|
|
__ vmov(single_scratch, scratch);
|
|
|
|
|
} else {
|
|
|
|
|
__ vmov(single_scratch, ToRegister(input));
|
|
|
|
|
}
|
|
|
|
|
__ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-05-23 08:32:07 +00:00
|
|
|
|
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
|
|
|
|
|
LOperand* input = instr->value();
|
|
|
|
|
ASSERT(input->IsRegister());
|
|
|
|
|
LOperand* output = instr->result();
|
|
|
|
|
ASSERT(output->IsRegister());
|
|
|
|
|
__ SmiTag(ToRegister(output), ToRegister(input), SetCC);
|
|
|
|
|
if (!instr->hydrogen()->value()->HasRange() ||
|
|
|
|
|
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
|
|
|
|
|
DeoptimizeIf(vs, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-08-22 15:44:17 +00:00
|
|
|
|
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* input = instr->value();
|
2012-08-22 15:44:17 +00:00
|
|
|
|
LOperand* output = instr->result();
|
|
|
|
|
|
|
|
|
|
SwVfpRegister flt_scratch = double_scratch0().low();
|
|
|
|
|
__ vmov(flt_scratch, ToRegister(input));
|
|
|
|
|
__ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
2012-08-27 09:39:05 +00:00
|
|
|
|
codegen()->DoDeferredNumberTagI(instr_,
|
2012-09-17 10:54:26 +00:00
|
|
|
|
instr_->value(),
|
2012-08-27 09:39:05 +00:00
|
|
|
|
SIGNED_INT32);
|
2012-08-22 15:44:17 +00:00
|
|
|
|
}
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
private:
|
|
|
|
|
LNumberTagI* instr_;
|
|
|
|
|
};
|
|
|
|
|
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register src = ToRegister(instr->value());
|
2012-01-27 14:55:20 +00:00
|
|
|
|
Register dst = ToRegister(instr->result());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2012-06-11 12:42:31 +00:00
|
|
|
|
DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
|
2012-01-27 14:55:20 +00:00
|
|
|
|
__ SmiTag(dst, src, SetCC);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ b(vs, deferred->entry());
|
|
|
|
|
__ bind(deferred->exit());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-08-22 15:44:17 +00:00
|
|
|
|
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredNumberTagU V8_FINAL : public LDeferredCode {
|
2012-08-22 15:44:17 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
2012-08-27 09:39:05 +00:00
|
|
|
|
codegen()->DoDeferredNumberTagI(instr_,
|
2012-09-17 10:54:26 +00:00
|
|
|
|
instr_->value(),
|
2012-08-27 09:39:05 +00:00
|
|
|
|
UNSIGNED_INT32);
|
2012-08-22 15:44:17 +00:00
|
|
|
|
}
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2012-08-22 15:44:17 +00:00
|
|
|
|
private:
|
|
|
|
|
LNumberTagU* instr_;
|
|
|
|
|
};
|
|
|
|
|
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* input = instr->value();
|
2012-08-22 15:44:17 +00:00
|
|
|
|
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
|
|
|
|
Register reg = ToRegister(input);
|
|
|
|
|
|
|
|
|
|
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
|
|
|
|
|
__ cmp(reg, Operand(Smi::kMaxValue));
|
|
|
|
|
__ b(hi, deferred->entry());
|
|
|
|
|
__ SmiTag(reg, reg);
|
|
|
|
|
__ bind(deferred->exit());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
|
2012-08-27 09:39:05 +00:00
|
|
|
|
LOperand* value,
|
2012-08-22 15:44:17 +00:00
|
|
|
|
IntegerSignedness signedness) {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Label slow;
|
2012-08-27 09:39:05 +00:00
|
|
|
|
Register src = ToRegister(value);
|
2012-01-27 14:55:20 +00:00
|
|
|
|
Register dst = ToRegister(instr->result());
|
2013-07-25 15:04:38 +00:00
|
|
|
|
LowDwVfpRegister dbl_scratch = double_scratch0();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
// Preserve the value of all registers.
|
2011-04-07 13:32:45 +00:00
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
Label done;
|
2012-08-22 15:44:17 +00:00
|
|
|
|
if (signedness == SIGNED_INT32) {
|
|
|
|
|
// There was overflow, so bits 30 and 31 of the original integer
|
|
|
|
|
// disagree. Try to allocate a heap number in new space and store
|
|
|
|
|
// the value in there. If that fails, call the runtime system.
|
|
|
|
|
if (dst.is(src)) {
|
|
|
|
|
__ SmiUntag(src, dst);
|
|
|
|
|
__ eor(src, src, Operand(0x80000000));
|
|
|
|
|
}
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vmov(dbl_scratch.low(), src);
|
|
|
|
|
__ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
|
2012-08-22 15:44:17 +00:00
|
|
|
|
} else {
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vmov(dbl_scratch.low(), src);
|
|
|
|
|
__ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
|
2012-01-27 14:55:20 +00:00
|
|
|
|
}
|
2012-08-22 15:44:17 +00:00
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (FLAG_inline_new) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
__ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
|
|
|
|
|
__ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
|
2012-01-27 14:55:20 +00:00
|
|
|
|
__ Move(dst, r5);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ b(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Slow case: Call the runtime system to do the number allocation.
|
|
|
|
|
__ bind(&slow);
|
|
|
|
|
|
|
|
|
|
// TODO(3095996): Put a valid pointer value in the stack slot where the result
|
|
|
|
|
// register is stored, as this register is in the pointer map, but contains an
|
|
|
|
|
// integer value.
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ mov(ip, Operand::Zero());
|
2012-01-27 14:55:20 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(ip, dst);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
// NumberTagI and NumberTagD use the context from the frame, rather than
|
|
|
|
|
// the environment's HContext or HInlinedContext value.
|
|
|
|
|
// They only call Runtime::kAllocateHeapNumber.
|
|
|
|
|
// The corresponding HChange instructions are added in a phase that does
|
|
|
|
|
// not have easy access to the local context.
|
|
|
|
|
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
|
|
|
|
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
|
|
|
|
RecordSafepointWithRegisters(
|
|
|
|
|
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
2012-01-27 14:55:20 +00:00
|
|
|
|
__ Move(dst, r0);
|
2012-10-12 10:58:25 +00:00
|
|
|
|
__ sub(dst, dst, Operand(kHeapObjectTag));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
// Done. Put the value in dbl_scratch into the value of the allocated heap
|
|
|
|
|
// number.
|
|
|
|
|
__ bind(&done);
|
2013-04-07 04:34:20 +00:00
|
|
|
|
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
|
2012-10-12 10:58:25 +00:00
|
|
|
|
__ add(dst, dst, Operand(kHeapObjectTag));
|
2012-01-27 14:55:20 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(dst, dst);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredNumberTagD V8_FINAL : public LDeferredCode {
|
2010-12-07 11:31:57 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
|
|
|
|
codegen()->DoDeferredNumberTagD(instr_);
|
|
|
|
|
}
|
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
private:
|
|
|
|
|
LNumberTagD* instr_;
|
|
|
|
|
};
|
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
|
2011-01-04 14:32:54 +00:00
|
|
|
|
Register scratch = scratch0();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
Register reg = ToRegister(instr->result());
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register temp1 = ToRegister(instr->temp());
|
|
|
|
|
Register temp2 = ToRegister(instr->temp2());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2012-06-11 12:42:31 +00:00
|
|
|
|
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (FLAG_inline_new) {
|
|
|
|
|
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
|
2012-10-12 10:58:25 +00:00
|
|
|
|
// We want the untagged address first for performance
|
|
|
|
|
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
|
|
|
|
|
DONT_TAG_RESULT);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
|
|
|
|
__ jmp(deferred->entry());
|
|
|
|
|
}
|
|
|
|
|
__ bind(deferred->exit());
|
2013-04-07 04:34:20 +00:00
|
|
|
|
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
|
2012-10-12 10:58:25 +00:00
|
|
|
|
// Now that we have finished with the object's real address tag it
|
|
|
|
|
__ add(reg, reg, Operand(kHeapObjectTag));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
|
|
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
|
|
|
// result register contain a valid pointer because it is already
|
|
|
|
|
// contained in the register pointer map.
|
|
|
|
|
Register reg = ToRegister(instr->result());
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ mov(reg, Operand::Zero());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
// NumberTagI and NumberTagD use the context from the frame, rather than
|
|
|
|
|
// the environment's HContext or HInlinedContext value.
|
|
|
|
|
// They only call Runtime::kAllocateHeapNumber.
|
|
|
|
|
// The corresponding HChange instructions are added in a phase that does
|
|
|
|
|
// not have easy access to the local context.
|
|
|
|
|
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
|
|
|
|
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
|
|
|
|
RecordSafepointWithRegisters(
|
|
|
|
|
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
2012-10-12 10:58:25 +00:00
|
|
|
|
__ sub(r0, r0, Operand(kHeapObjectTag));
|
2011-02-21 11:29:45 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(r0, reg);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
|
|
|
|
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
|
2012-09-17 10:54:26 +00:00
|
|
|
|
__ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2012-01-27 14:55:20 +00:00
|
|
|
|
Register result = ToRegister(instr->result());
|
2011-01-07 13:44:05 +00:00
|
|
|
|
if (instr->needs_check()) {
|
2011-08-29 13:02:35 +00:00
|
|
|
|
STATIC_ASSERT(kHeapObjectTag == 1);
|
2011-04-18 13:53:11 +00:00
|
|
|
|
// If the input is a HeapObject, SmiUntag will set the carry flag.
|
2012-01-27 14:55:20 +00:00
|
|
|
|
__ SmiUntag(result, input, SetCC);
|
2011-04-18 13:53:11 +00:00
|
|
|
|
DeoptimizeIf(cs, instr->environment());
|
|
|
|
|
} else {
|
2012-01-27 14:55:20 +00:00
|
|
|
|
__ SmiUntag(result, input);
|
2011-01-07 13:44:05 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::EmitNumberUntagD(Register input_reg,
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister result_reg,
|
2013-08-14 08:54:27 +00:00
|
|
|
|
bool can_convert_undefined_to_nan,
|
2012-01-11 15:43:33 +00:00
|
|
|
|
bool deoptimize_on_minus_zero,
|
2013-02-04 12:01:59 +00:00
|
|
|
|
LEnvironment* env,
|
|
|
|
|
NumberUntagDMode mode) {
|
2011-01-04 14:32:54 +00:00
|
|
|
|
Register scratch = scratch0();
|
2011-06-29 10:51:06 +00:00
|
|
|
|
SwVfpRegister flt_scratch = double_scratch0().low();
|
|
|
|
|
ASSERT(!result_reg.is(double_scratch0()));
|
2013-09-20 09:57:58 +00:00
|
|
|
|
Label convert, load_smi, done;
|
2013-08-14 08:54:27 +00:00
|
|
|
|
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
|
2013-02-04 12:01:59 +00:00
|
|
|
|
// Smi check.
|
|
|
|
|
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
|
|
|
|
|
// Heap number map check.
|
|
|
|
|
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
|
|
|
|
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
|
|
|
|
__ cmp(scratch, Operand(ip));
|
2013-09-20 09:57:58 +00:00
|
|
|
|
if (can_convert_undefined_to_nan) {
|
|
|
|
|
__ b(ne, &convert);
|
2013-02-04 12:01:59 +00:00
|
|
|
|
} else {
|
|
|
|
|
DeoptimizeIf(ne, env);
|
|
|
|
|
}
|
2013-09-20 09:57:58 +00:00
|
|
|
|
// load heap number
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
|
2013-02-04 12:01:59 +00:00
|
|
|
|
if (deoptimize_on_minus_zero) {
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ VmovLow(scratch, result_reg);
|
|
|
|
|
__ cmp(scratch, Operand::Zero());
|
2013-02-04 12:01:59 +00:00
|
|
|
|
__ b(ne, &done);
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ VmovHigh(scratch, result_reg);
|
|
|
|
|
__ cmp(scratch, Operand(HeapNumber::kSignMask));
|
2013-02-04 12:01:59 +00:00
|
|
|
|
DeoptimizeIf(eq, env);
|
|
|
|
|
}
|
2011-06-09 12:27:28 +00:00
|
|
|
|
__ jmp(&done);
|
2013-09-20 09:57:58 +00:00
|
|
|
|
if (can_convert_undefined_to_nan) {
|
|
|
|
|
__ bind(&convert);
|
|
|
|
|
// Convert undefined (and hole) to NaN.
|
|
|
|
|
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
|
|
|
|
__ cmp(input_reg, Operand(ip));
|
|
|
|
|
DeoptimizeIf(ne, env);
|
|
|
|
|
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
|
|
|
|
|
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
|
|
|
|
|
__ jmp(&done);
|
|
|
|
|
}
|
2013-02-04 12:01:59 +00:00
|
|
|
|
} else {
|
|
|
|
|
__ SmiUntag(scratch, input_reg);
|
|
|
|
|
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
|
2012-01-11 15:43:33 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
// Smi to double register conversion
|
|
|
|
|
__ bind(&load_smi);
|
2012-01-27 16:54:22 +00:00
|
|
|
|
// scratch: untagged value of input_reg
|
2012-01-27 14:55:20 +00:00
|
|
|
|
__ vmov(flt_scratch, scratch);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ vcvt_f64_s32(result_reg, flt_scratch);
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input_reg = ToRegister(instr->value());
|
2011-03-15 11:19:13 +00:00
|
|
|
|
Register scratch1 = scratch0();
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register scratch2 = ToRegister(instr->temp());
|
2013-07-25 15:04:38 +00:00
|
|
|
|
LowDwVfpRegister double_scratch = double_scratch0();
|
2013-08-26 12:24:03 +00:00
|
|
|
|
DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
|
2011-03-15 11:19:13 +00:00
|
|
|
|
|
|
|
|
|
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
|
|
|
|
|
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
|
|
|
|
|
|
|
|
|
|
Label done;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2011-04-18 13:53:11 +00:00
|
|
|
|
// The input was optimistically untagged; revert it.
|
|
|
|
|
// The carry flag is set when we reach this deferred code as we just executed
|
|
|
|
|
// SmiUntag(heap_object, SetCC)
|
2011-08-29 13:02:35 +00:00
|
|
|
|
STATIC_ASSERT(kHeapObjectTag == 1);
|
2013-08-26 12:24:03 +00:00
|
|
|
|
__ adc(scratch2, input_reg, Operand(input_reg));
|
2011-04-18 13:53:11 +00:00
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
// Heap number map check.
|
2013-08-26 12:24:03 +00:00
|
|
|
|
__ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
2011-03-15 11:19:13 +00:00
|
|
|
|
__ cmp(scratch1, Operand(ip));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
if (instr->truncating()) {
|
2011-03-15 11:19:13 +00:00
|
|
|
|
// Performs a truncating conversion of a floating point number as used by
|
|
|
|
|
// the JS bitwise operations.
|
2013-10-11 15:13:12 +00:00
|
|
|
|
Label no_heap_number, check_bools, check_false;
|
|
|
|
|
__ b(ne, &no_heap_number);
|
|
|
|
|
__ TruncateHeapNumberToI(input_reg, scratch2);
|
|
|
|
|
__ b(&done);
|
|
|
|
|
|
|
|
|
|
// Check for Oddballs. Undefined/False is converted to zero and True to one
|
|
|
|
|
// for truncating conversions.
|
|
|
|
|
__ bind(&no_heap_number);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
2013-08-26 12:24:03 +00:00
|
|
|
|
__ cmp(scratch2, Operand(ip));
|
2013-10-11 15:13:12 +00:00
|
|
|
|
__ b(ne, &check_bools);
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ mov(input_reg, Operand::Zero());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ b(&done);
|
|
|
|
|
|
2013-10-11 15:13:12 +00:00
|
|
|
|
__ bind(&check_bools);
|
|
|
|
|
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
|
|
|
|
|
__ cmp(scratch2, Operand(ip));
|
|
|
|
|
__ b(ne, &check_false);
|
|
|
|
|
__ mov(input_reg, Operand(1));
|
|
|
|
|
__ b(&done);
|
|
|
|
|
|
|
|
|
|
__ bind(&check_false);
|
|
|
|
|
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
|
|
|
|
|
__ cmp(scratch2, Operand(ip));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
__ mov(input_reg, Operand::Zero());
|
|
|
|
|
__ b(&done);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
} else {
|
|
|
|
|
// Deoptimize if we don't have a heap number.
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
|
2013-08-26 12:24:03 +00:00
|
|
|
|
__ sub(ip, scratch2, Operand(kHeapObjectTag));
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
|
|
|
|
|
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
|
2011-03-15 11:19:13 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(input_reg, Operand::Zero());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ b(ne, &done);
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ VmovHigh(scratch1, double_scratch2);
|
2011-03-15 11:19:13 +00:00
|
|
|
|
__ tst(scratch1, Operand(HeapNumber::kSignMask));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredTaggedToI V8_FINAL : public LDeferredCode {
|
2011-09-26 09:32:10 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
|
|
|
|
codegen()->DoDeferredTaggedToI(instr_);
|
|
|
|
|
}
|
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2011-09-26 09:32:10 +00:00
|
|
|
|
private:
|
|
|
|
|
LTaggedToI* instr_;
|
|
|
|
|
};
|
|
|
|
|
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* input = instr->value();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(input->IsRegister());
|
|
|
|
|
ASSERT(input->Equals(instr->result()));
|
|
|
|
|
|
|
|
|
|
Register input_reg = ToRegister(input);
|
|
|
|
|
|
2013-09-16 15:24:49 +00:00
|
|
|
|
if (instr->hydrogen()->value()->representation().IsSmi()) {
|
|
|
|
|
__ SmiUntag(input_reg);
|
|
|
|
|
} else {
|
|
|
|
|
DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
|
|
|
|
|
|
|
|
|
|
// Optimistically untag the input.
|
|
|
|
|
// If the input is a HeapObject, SmiUntag will set the carry flag.
|
|
|
|
|
__ SmiUntag(input_reg, SetCC);
|
|
|
|
|
// Branch to deferred code if the input was tagged.
|
|
|
|
|
// The deferred code will take care of restoring the tag.
|
|
|
|
|
__ b(cs, deferred->entry());
|
|
|
|
|
__ bind(deferred->exit());
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* input = instr->value();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(input->IsRegister());
|
|
|
|
|
LOperand* result = instr->result();
|
|
|
|
|
ASSERT(result->IsDoubleRegister());
|
|
|
|
|
|
|
|
|
|
Register input_reg = ToRegister(input);
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister result_reg = ToDoubleRegister(result);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-02-04 12:01:59 +00:00
|
|
|
|
HValue* value = instr->hydrogen()->value();
|
2013-08-14 08:54:27 +00:00
|
|
|
|
NumberUntagDMode mode = value->representation().IsSmi()
|
|
|
|
|
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
|
2013-02-04 12:01:59 +00:00
|
|
|
|
|
2011-06-09 12:27:28 +00:00
|
|
|
|
EmitNumberUntagD(input_reg, result_reg,
|
2013-08-14 08:54:27 +00:00
|
|
|
|
instr->hydrogen()->can_convert_undefined_to_nan(),
|
2012-01-11 15:43:33 +00:00
|
|
|
|
instr->hydrogen()->deoptimize_on_minus_zero(),
|
2013-02-04 12:01:59 +00:00
|
|
|
|
instr->environment(),
|
|
|
|
|
mode);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
2011-03-15 11:19:13 +00:00
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
2011-02-04 07:08:50 +00:00
|
|
|
|
Register scratch1 = scratch0();
|
2012-09-17 10:54:26 +00:00
|
|
|
|
DwVfpRegister double_input = ToDoubleRegister(instr->value());
|
2013-07-25 15:04:38 +00:00
|
|
|
|
LowDwVfpRegister double_scratch = double_scratch0();
|
2011-02-04 07:08:50 +00:00
|
|
|
|
|
2013-05-23 08:32:07 +00:00
|
|
|
|
if (instr->truncating()) {
|
2013-08-26 12:24:03 +00:00
|
|
|
|
__ TruncateDoubleToI(result_reg, double_input);
|
2013-05-23 08:32:07 +00:00
|
|
|
|
} else {
|
|
|
|
|
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
|
|
|
|
|
// Deoptimize if the input wasn't a int32 (inside a double).
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
|
|
|
Label done;
|
|
|
|
|
__ cmp(result_reg, Operand::Zero());
|
|
|
|
|
__ b(ne, &done);
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ VmovHigh(scratch1, double_input);
|
2013-05-23 08:32:07 +00:00
|
|
|
|
__ tst(scratch1, Operand(HeapNumber::kSignMask));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
|
|
|
|
Register scratch1 = scratch0();
|
|
|
|
|
DwVfpRegister double_input = ToDoubleRegister(instr->value());
|
2013-07-25 15:04:38 +00:00
|
|
|
|
LowDwVfpRegister double_scratch = double_scratch0();
|
2011-02-04 07:08:50 +00:00
|
|
|
|
|
2011-03-15 11:19:13 +00:00
|
|
|
|
if (instr->truncating()) {
|
2013-08-26 12:24:03 +00:00
|
|
|
|
__ TruncateDoubleToI(result_reg, double_input);
|
2011-03-15 11:19:13 +00:00
|
|
|
|
} else {
|
2013-03-05 19:35:59 +00:00
|
|
|
|
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
|
|
|
|
|
// Deoptimize if the input wasn't a int32 (inside a double).
|
2011-02-04 07:08:50 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
2013-05-23 08:32:07 +00:00
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
|
|
|
Label done;
|
|
|
|
|
__ cmp(result_reg, Operand::Zero());
|
|
|
|
|
__ b(ne, &done);
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ VmovHigh(scratch1, double_input);
|
2013-05-23 08:32:07 +00:00
|
|
|
|
__ tst(scratch1, Operand(HeapNumber::kSignMask));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
2011-02-04 07:08:50 +00:00
|
|
|
|
}
|
2013-05-23 08:32:07 +00:00
|
|
|
|
__ SmiTag(result_reg, SetCC);
|
|
|
|
|
DeoptimizeIf(vs, instr->environment());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* input = instr->value();
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ SmiTst(ToRegister(input));
|
2011-03-16 16:28:06 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
|
2013-06-26 17:37:55 +00:00
|
|
|
|
if (!instr->hydrogen()->value()->IsHeapObject()) {
|
|
|
|
|
LOperand* input = instr->value();
|
|
|
|
|
__ SmiTst(ToRegister(input));
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-01-06 15:25:03 +00:00
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
|
|
|
|
|
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
|
|
|
|
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
|
|
|
|
|
2011-05-10 14:17:23 +00:00
|
|
|
|
if (instr->hydrogen()->is_interval_check()) {
|
|
|
|
|
InstanceType first;
|
|
|
|
|
InstanceType last;
|
|
|
|
|
instr->hydrogen()->GetCheckInterval(&first, &last);
|
|
|
|
|
|
|
|
|
|
__ cmp(scratch, Operand(first));
|
|
|
|
|
|
|
|
|
|
// If there is only one type in the interval check for equality.
|
|
|
|
|
if (first == last) {
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
} else {
|
|
|
|
|
DeoptimizeIf(lo, instr->environment());
|
|
|
|
|
// Omit check for the last type.
|
|
|
|
|
if (last != LAST_TYPE) {
|
|
|
|
|
__ cmp(scratch, Operand(last));
|
|
|
|
|
DeoptimizeIf(hi, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-01-06 15:25:03 +00:00
|
|
|
|
} else {
|
2011-05-10 14:17:23 +00:00
|
|
|
|
uint8_t mask;
|
|
|
|
|
uint8_t tag;
|
|
|
|
|
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
|
|
|
|
|
|
|
|
|
|
if (IsPowerOf2(mask)) {
|
|
|
|
|
ASSERT(tag == 0 || IsPowerOf2(tag));
|
|
|
|
|
__ tst(scratch, Operand(mask));
|
|
|
|
|
DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
|
|
|
|
|
} else {
|
|
|
|
|
__ and_(scratch, scratch, Operand(mask));
|
|
|
|
|
__ cmp(scratch, Operand(tag));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
2011-01-06 15:25:03 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-28 14:16:57 +00:00
|
|
|
|
void LCodeGen::DoCheckValue(LCheckValue* instr) {
|
2011-12-06 12:11:08 +00:00
|
|
|
|
Register reg = ToRegister(instr->value());
|
2013-09-19 09:07:27 +00:00
|
|
|
|
Handle<HeapObject> object = instr->hydrogen()->object().handle();
|
2013-06-03 15:32:22 +00:00
|
|
|
|
AllowDeferredHandleDereference smi_check;
|
2013-08-28 14:16:57 +00:00
|
|
|
|
if (isolate()->heap()->InNewSpace(*object)) {
|
2011-12-06 12:11:08 +00:00
|
|
|
|
Register reg = ToRegister(instr->value());
|
2013-08-28 14:16:57 +00:00
|
|
|
|
Handle<Cell> cell = isolate()->factory()->NewCell(object);
|
2011-12-06 12:11:08 +00:00
|
|
|
|
__ mov(ip, Operand(Handle<Object>(cell)));
|
2013-06-12 15:03:44 +00:00
|
|
|
|
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
|
2011-12-06 12:11:08 +00:00
|
|
|
|
__ cmp(reg, ip);
|
|
|
|
|
} else {
|
2013-08-28 14:16:57 +00:00
|
|
|
|
__ cmp(reg, Operand(object));
|
2011-12-06 12:11:08 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-08-05 16:42:39 +00:00
|
|
|
|
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
|
|
|
|
|
{
|
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
|
|
|
|
__ push(object);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
__ mov(cp, Operand::Zero());
|
|
|
|
|
__ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
|
|
|
|
|
RecordSafepointWithRegisters(
|
|
|
|
|
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
|
2013-08-06 08:26:31 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(r0, scratch0());
|
2013-08-05 16:42:39 +00:00
|
|
|
|
}
|
|
|
|
|
__ tst(scratch0(), Operand(kSmiTagMask));
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
2012-01-09 16:37:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-03-23 16:37:54 +00:00
|
|
|
|
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredCheckMaps V8_FINAL : public LDeferredCode {
|
2013-08-05 16:42:39 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr), object_(object) {
|
|
|
|
|
SetExit(check_maps());
|
|
|
|
|
}
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
2013-08-05 16:42:39 +00:00
|
|
|
|
codegen()->DoDeferredInstanceMigration(instr_, object_);
|
|
|
|
|
}
|
|
|
|
|
Label* check_maps() { return &check_maps_; }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2013-08-05 16:42:39 +00:00
|
|
|
|
private:
|
|
|
|
|
LCheckMaps* instr_;
|
|
|
|
|
Label check_maps_;
|
|
|
|
|
Register object_;
|
|
|
|
|
};
|
|
|
|
|
|
2013-07-23 09:18:42 +00:00
|
|
|
|
if (instr->hydrogen()->CanOmitMapChecks()) return;
|
2012-12-20 14:05:14 +00:00
|
|
|
|
Register map_reg = scratch0();
|
2013-08-05 16:42:39 +00:00
|
|
|
|
|
2012-09-17 10:54:26 +00:00
|
|
|
|
LOperand* input = instr->value();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
ASSERT(input->IsRegister());
|
|
|
|
|
Register reg = ToRegister(input);
|
2012-03-23 16:37:54 +00:00
|
|
|
|
|
2012-12-20 14:05:14 +00:00
|
|
|
|
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
|
2013-08-05 16:42:39 +00:00
|
|
|
|
|
|
|
|
|
DeferredCheckMaps* deferred = NULL;
|
|
|
|
|
if (instr->hydrogen()->has_migration_target()) {
|
|
|
|
|
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
|
|
|
|
|
__ bind(deferred->check_maps());
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-19 09:07:27 +00:00
|
|
|
|
UniqueSet<Map> map_set = instr->hydrogen()->map_set();
|
2013-08-05 16:42:39 +00:00
|
|
|
|
Label success;
|
2013-09-19 09:07:27 +00:00
|
|
|
|
for (int i = 0; i < map_set.size() - 1; i++) {
|
|
|
|
|
Handle<Map> map = map_set.at(i).handle();
|
2013-05-23 09:19:18 +00:00
|
|
|
|
__ CompareMap(map_reg, map, &success);
|
2012-03-23 16:37:54 +00:00
|
|
|
|
__ b(eq, &success);
|
|
|
|
|
}
|
2013-08-05 16:42:39 +00:00
|
|
|
|
|
2013-09-19 09:07:27 +00:00
|
|
|
|
Handle<Map> map = map_set.at(map_set.size() - 1).handle();
|
2013-08-05 16:42:39 +00:00
|
|
|
|
__ CompareMap(map_reg, map, &success);
|
|
|
|
|
if (instr->hydrogen()->has_migration_target()) {
|
|
|
|
|
__ b(ne, deferred->entry());
|
|
|
|
|
} else {
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
2012-03-23 16:37:54 +00:00
|
|
|
|
__ bind(&success);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-05-17 07:22:01 +00:00
|
|
|
|
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
|
2011-05-16 14:10:56 +00:00
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
|
2011-05-16 14:10:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
|
|
|
|
|
Register unclamped_reg = ToRegister(instr->unclamped());
|
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
|
|
|
|
__ ClampUint8(result_reg, unclamped_reg);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-05-17 07:22:01 +00:00
|
|
|
|
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
2011-05-16 14:10:56 +00:00
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
Register input_reg = ToRegister(instr->unclamped());
|
|
|
|
|
Register result_reg = ToRegister(instr->result());
|
2012-12-18 16:25:45 +00:00
|
|
|
|
DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
|
2011-05-16 14:10:56 +00:00
|
|
|
|
Label is_smi, done, heap_number;
|
|
|
|
|
|
|
|
|
|
// Both smi and heap number cases are handled.
|
2012-01-27 16:54:22 +00:00
|
|
|
|
__ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
|
2011-05-16 14:10:56 +00:00
|
|
|
|
|
|
|
|
|
// Check for heap number
|
|
|
|
|
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
|
|
|
|
__ cmp(scratch, Operand(factory()->heap_number_map()));
|
|
|
|
|
__ b(eq, &heap_number);
|
|
|
|
|
|
|
|
|
|
// Check for undefined. Undefined is converted to zero for clamping
|
|
|
|
|
// conversions.
|
|
|
|
|
__ cmp(input_reg, Operand(factory()->undefined_value()));
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ mov(result_reg, Operand::Zero());
|
2011-05-16 14:10:56 +00:00
|
|
|
|
__ jmp(&done);
|
|
|
|
|
|
|
|
|
|
// Heap number
|
|
|
|
|
__ bind(&heap_number);
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
|
|
|
|
__ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
|
2011-05-16 14:10:56 +00:00
|
|
|
|
__ jmp(&done);
|
|
|
|
|
|
|
|
|
|
// smi
|
|
|
|
|
__ bind(&is_smi);
|
|
|
|
|
__ ClampUint8(result_reg, result_reg);
|
|
|
|
|
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-02-04 12:01:59 +00:00
|
|
|
|
void LCodeGen::DoAllocate(LAllocate* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredAllocate V8_FINAL : public LDeferredCode {
|
2013-02-04 12:01:59 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
|
|
|
|
codegen()->DoDeferredAllocate(instr_);
|
|
|
|
|
}
|
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2013-02-04 12:01:59 +00:00
|
|
|
|
private:
|
|
|
|
|
LAllocate* instr_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
DeferredAllocate* deferred =
|
|
|
|
|
new(zone()) DeferredAllocate(this, instr);
|
|
|
|
|
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
Register scratch = ToRegister(instr->temp1());
|
|
|
|
|
Register scratch2 = ToRegister(instr->temp2());
|
|
|
|
|
|
2013-02-26 13:08:08 +00:00
|
|
|
|
// Allocate memory for the object.
|
|
|
|
|
AllocationFlags flags = TAG_OBJECT;
|
|
|
|
|
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
|
|
|
|
|
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
|
|
|
|
|
}
|
2013-07-31 07:03:16 +00:00
|
|
|
|
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
|
|
|
|
|
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
|
|
|
|
|
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
|
2013-04-09 13:44:22 +00:00
|
|
|
|
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
|
2013-07-31 07:03:16 +00:00
|
|
|
|
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
|
|
|
|
|
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
|
2013-05-28 09:38:28 +00:00
|
|
|
|
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
|
2013-04-09 13:44:22 +00:00
|
|
|
|
}
|
2013-05-28 09:38:28 +00:00
|
|
|
|
|
2013-02-26 13:08:08 +00:00
|
|
|
|
if (instr->size()->IsConstantOperand()) {
|
|
|
|
|
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
|
2013-03-14 08:32:52 +00:00
|
|
|
|
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
|
2013-02-04 12:01:59 +00:00
|
|
|
|
} else {
|
2013-02-26 13:08:08 +00:00
|
|
|
|
Register size = ToRegister(instr->size());
|
2013-03-25 15:54:15 +00:00
|
|
|
|
__ Allocate(size,
|
|
|
|
|
result,
|
|
|
|
|
scratch,
|
|
|
|
|
scratch2,
|
|
|
|
|
deferred->entry(),
|
|
|
|
|
flags);
|
2013-02-04 12:01:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__ bind(deferred->exit());
|
2013-07-22 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
if (instr->hydrogen()->MustPrefillWithFiller()) {
|
|
|
|
|
if (instr->size()->IsConstantOperand()) {
|
|
|
|
|
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
|
|
|
|
|
__ mov(scratch, Operand(size));
|
|
|
|
|
} else {
|
|
|
|
|
scratch = ToRegister(instr->size());
|
|
|
|
|
}
|
|
|
|
|
__ sub(scratch, scratch, Operand(kPointerSize));
|
|
|
|
|
__ sub(result, result, Operand(kHeapObjectTag));
|
|
|
|
|
Label loop;
|
|
|
|
|
__ bind(&loop);
|
|
|
|
|
__ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
|
|
|
|
|
__ str(scratch2, MemOperand(result, scratch));
|
|
|
|
|
__ sub(scratch, scratch, Operand(kPointerSize));
|
|
|
|
|
__ cmp(scratch, Operand(0));
|
|
|
|
|
__ b(ge, &loop);
|
|
|
|
|
__ add(result, result, Operand(kHeapObjectTag));
|
|
|
|
|
}
|
2013-02-04 12:01:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
|
|
|
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
|
|
|
// result register contain a valid pointer because it is already
|
|
|
|
|
// contained in the register pointer map.
|
|
|
|
|
__ mov(result, Operand(Smi::FromInt(0)));
|
|
|
|
|
|
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
2013-04-26 14:04:07 +00:00
|
|
|
|
if (instr->size()->IsRegister()) {
|
|
|
|
|
Register size = ToRegister(instr->size());
|
|
|
|
|
ASSERT(!size.is(result));
|
|
|
|
|
__ SmiTag(size);
|
|
|
|
|
__ push(size);
|
|
|
|
|
} else {
|
|
|
|
|
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
|
|
|
|
|
__ Push(Smi::FromInt(size));
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-31 07:03:16 +00:00
|
|
|
|
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
|
|
|
|
|
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
|
|
|
|
|
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
|
2013-09-27 13:59:28 +00:00
|
|
|
|
CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
|
|
|
|
|
instr->context());
|
2013-07-31 07:03:16 +00:00
|
|
|
|
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
|
|
|
|
|
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
|
2013-09-27 13:59:28 +00:00
|
|
|
|
CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
|
|
|
|
|
instr->context());
|
2013-04-12 09:45:46 +00:00
|
|
|
|
} else {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
|
|
|
|
|
instr->context());
|
2013-04-12 09:45:46 +00:00
|
|
|
|
}
|
2013-02-04 12:01:59 +00:00
|
|
|
|
__ StoreToSafepointRegisterSlot(r0, result);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-03-21 12:25:31 +00:00
|
|
|
|
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
ASSERT(ToRegister(instr->value()).is(r0));
|
2011-03-21 12:25:31 +00:00
|
|
|
|
__ push(r0);
|
|
|
|
|
CallRuntime(Runtime::kToFastProperties, 1, instr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-01-06 08:56:29 +00:00
|
|
|
|
Label materialized;
|
|
|
|
|
// Registers will be used as follows:
|
2013-09-23 15:01:33 +00:00
|
|
|
|
// r6 = literals array.
|
2011-01-06 08:56:29 +00:00
|
|
|
|
// r1 = regexp literal.
|
|
|
|
|
// r0 = regexp literal clone.
|
2013-09-23 15:01:33 +00:00
|
|
|
|
// r2-5 are used as temporaries.
|
2012-06-27 11:49:37 +00:00
|
|
|
|
int literal_offset =
|
|
|
|
|
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
|
2013-09-23 15:01:33 +00:00
|
|
|
|
__ LoadHeapObject(r6, instr->hydrogen()->literals());
|
|
|
|
|
__ ldr(r1, FieldMemOperand(r6, literal_offset));
|
2011-01-06 08:56:29 +00:00
|
|
|
|
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
|
|
|
|
__ cmp(r1, ip);
|
|
|
|
|
__ b(ne, &materialized);
|
|
|
|
|
|
|
|
|
|
// Create regexp literal using runtime function
|
|
|
|
|
// Result will be in r0.
|
2013-09-23 15:01:33 +00:00
|
|
|
|
__ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
|
|
|
|
|
__ mov(r4, Operand(instr->hydrogen()->pattern()));
|
|
|
|
|
__ mov(r3, Operand(instr->hydrogen()->flags()));
|
|
|
|
|
__ Push(r6, r5, r4, r3);
|
2011-01-06 08:56:29 +00:00
|
|
|
|
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
|
|
|
|
|
__ mov(r1, r0);
|
|
|
|
|
|
|
|
|
|
__ bind(&materialized);
|
|
|
|
|
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
|
|
|
|
|
Label allocated, runtime_allocate;
|
|
|
|
|
|
2013-03-14 08:32:52 +00:00
|
|
|
|
__ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
|
2011-01-06 08:56:29 +00:00
|
|
|
|
__ jmp(&allocated);
|
|
|
|
|
|
|
|
|
|
__ bind(&runtime_allocate);
|
|
|
|
|
__ mov(r0, Operand(Smi::FromInt(size)));
|
|
|
|
|
__ Push(r1, r0);
|
|
|
|
|
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
|
|
|
|
|
__ pop(r1);
|
|
|
|
|
|
|
|
|
|
__ bind(&allocated);
|
|
|
|
|
// Copy the content into the newly allocated memory.
|
2013-07-25 15:04:38 +00:00
|
|
|
|
__ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2011-01-06 12:53:59 +00:00
|
|
|
|
// Use the fast case closure allocation code that allocates in new
|
|
|
|
|
// space for nested functions that don't need literals cloning.
|
2011-02-03 09:10:54 +00:00
|
|
|
|
bool pretenure = instr->hydrogen()->pretenure();
|
2013-04-23 09:23:07 +00:00
|
|
|
|
if (!pretenure && instr->hydrogen()->has_no_literals()) {
|
|
|
|
|
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
|
|
|
|
|
instr->hydrogen()->is_generator());
|
2013-08-27 11:55:08 +00:00
|
|
|
|
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
|
2013-02-27 12:33:24 +00:00
|
|
|
|
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
2011-01-06 12:53:59 +00:00
|
|
|
|
} else {
|
2013-04-23 09:23:07 +00:00
|
|
|
|
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
|
|
|
|
|
__ mov(r1, Operand(pretenure ? factory()->true_value()
|
|
|
|
|
: factory()->false_value()));
|
2011-01-06 12:53:59 +00:00
|
|
|
|
__ Push(cp, r2, r1);
|
|
|
|
|
CallRuntime(Runtime::kNewClosure, 3, instr);
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoTypeof(LTypeof* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2011-01-07 13:44:05 +00:00
|
|
|
|
__ push(input);
|
|
|
|
|
CallRuntime(Runtime::kTypeof, 1, instr);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register input = ToRegister(instr->value());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-06-20 11:50:50 +00:00
|
|
|
|
Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
|
|
|
|
|
instr->FalseLabel(chunk_),
|
2010-12-07 11:31:57 +00:00
|
|
|
|
input,
|
|
|
|
|
instr->type_literal());
|
2011-10-14 07:45:18 +00:00
|
|
|
|
if (final_branch_condition != kNoCondition) {
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, final_branch_condition);
|
2011-10-14 07:45:18 +00:00
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Condition LCodeGen::EmitTypeofIs(Label* true_label,
|
|
|
|
|
Label* false_label,
|
|
|
|
|
Register input,
|
|
|
|
|
Handle<String> type_name) {
|
2011-01-26 08:32:54 +00:00
|
|
|
|
Condition final_branch_condition = kNoCondition;
|
2011-01-04 14:32:54 +00:00
|
|
|
|
Register scratch = scratch0();
|
2013-02-28 17:03:34 +00:00
|
|
|
|
if (type_name->Equals(heap()->number_string())) {
|
2011-03-03 12:16:21 +00:00
|
|
|
|
__ JumpIfSmi(input, true_label);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
|
|
|
|
|
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
|
|
|
|
__ cmp(input, Operand(ip));
|
|
|
|
|
final_branch_condition = eq;
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
|
} else if (type_name->Equals(heap()->string_string())) {
|
2011-03-03 12:16:21 +00:00
|
|
|
|
__ JumpIfSmi(input, false_label);
|
|
|
|
|
__ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
|
|
|
|
|
__ b(ge, false_label);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
|
|
|
|
|
__ tst(ip, Operand(1 << Map::kIsUndetectable));
|
2011-03-03 12:16:21 +00:00
|
|
|
|
final_branch_condition = eq;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-03-22 16:33:50 +00:00
|
|
|
|
} else if (type_name->Equals(heap()->symbol_string())) {
|
|
|
|
|
__ JumpIfSmi(input, false_label);
|
|
|
|
|
__ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
|
|
|
|
|
final_branch_condition = eq;
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
|
} else if (type_name->Equals(heap()->boolean_string())) {
|
2011-03-03 12:16:21 +00:00
|
|
|
|
__ CompareRoot(input, Heap::kTrueValueRootIndex);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ b(eq, true_label);
|
2011-03-03 12:16:21 +00:00
|
|
|
|
__ CompareRoot(input, Heap::kFalseValueRootIndex);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
final_branch_condition = eq;
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
|
} else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
|
2011-08-10 12:12:06 +00:00
|
|
|
|
__ CompareRoot(input, Heap::kNullValueRootIndex);
|
|
|
|
|
final_branch_condition = eq;
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
|
} else if (type_name->Equals(heap()->undefined_string())) {
|
2011-03-03 12:16:21 +00:00
|
|
|
|
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
__ b(eq, true_label);
|
2011-03-03 12:16:21 +00:00
|
|
|
|
__ JumpIfSmi(input, false_label);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
// Check for undetectable objects => true.
|
|
|
|
|
__ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
|
|
|
|
|
__ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
|
|
|
|
|
__ tst(ip, Operand(1 << Map::kIsUndetectable));
|
|
|
|
|
final_branch_condition = ne;
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
|
} else if (type_name->Equals(heap()->function_string())) {
|
2011-09-21 14:46:54 +00:00
|
|
|
|
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
|
2011-03-03 12:16:21 +00:00
|
|
|
|
__ JumpIfSmi(input, false_label);
|
2011-09-21 14:46:54 +00:00
|
|
|
|
__ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
|
|
|
|
|
__ b(eq, true_label);
|
|
|
|
|
__ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
|
|
|
|
|
final_branch_condition = eq;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
|
} else if (type_name->Equals(heap()->object_string())) {
|
2011-03-03 12:16:21 +00:00
|
|
|
|
__ JumpIfSmi(input, false_label);
|
2011-08-10 12:12:06 +00:00
|
|
|
|
if (!FLAG_harmony_typeof) {
|
|
|
|
|
__ CompareRoot(input, Heap::kNullValueRootIndex);
|
|
|
|
|
__ b(eq, true_label);
|
|
|
|
|
}
|
2013-03-22 16:33:50 +00:00
|
|
|
|
__ CompareObjectType(input, input, scratch,
|
|
|
|
|
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
|
Implement set trap for proxies, and revamp class hierarchy in preparation:
- Introduce a class JSReceiver, that is a common superclass of JSObject and
JSProxy. Use JSReceiver where appropriate (probably lots of places that we
still have to migrate, but we will find those later with proxy test suite).
- Move appropriate methods to JSReceiver class (SetProperty,
GetPropertyAttribute, Get/SetPrototype, Lookup, and so on).
- Introduce new JSFunctionProxy subclass of JSProxy. Currently only a stub.
- Overhaul enum InstanceType:
* Introduce FIRST/LAST_SPEC_OBJECT_TYPE that ranges over all types that
represent JS objects, and use that consistently to check language types.
* Rename FIRST/LAST_JS_OBJECT_TYPE and FIRST/LAST_FUNCTION_CLASS_TYPE
to FIRST/LAST_[NON]CALLABLE_SPEC_OBJECT_TYPE for clarity.
* Eliminate the overlap over JS_REGEXP_TYPE.
* Also replace FIRST_JS_OBJECT with FIRST_JS_RECEIVER, but only use it where
we exclusively talk about the internal representation type.
* Insert JS_PROXY and JS_FUNCTION_PROXY in the appropriate places.
- Fix all checks concerning classification, especially for functions, to
use the CALLABLE_SPEC_OBJECT range (that includes funciton proxies).
- Handle proxies in SetProperty (that was the easiest part :) ).
- A few simple test cases.
R=kmillikin@chromium.org
Review URL: http://codereview.chromium.org/6992072
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8126 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-05-31 16:38:40 +00:00
|
|
|
|
__ b(lt, false_label);
|
|
|
|
|
__ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
|
|
|
|
|
__ b(gt, false_label);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
// Check for undetectable objects => false.
|
|
|
|
|
__ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
|
|
|
|
|
__ tst(ip, Operand(1 << Map::kIsUndetectable));
|
2011-03-03 12:16:21 +00:00
|
|
|
|
final_branch_condition = eq;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
__ b(false_label);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return final_branch_condition;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-02-08 10:08:47 +00:00
|
|
|
|
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
|
2012-09-17 10:54:26 +00:00
|
|
|
|
Register temp1 = ToRegister(instr->temp());
|
2011-02-08 10:08:47 +00:00
|
|
|
|
|
|
|
|
|
EmitIsConstructCall(temp1, scratch0());
|
2013-06-20 11:50:50 +00:00
|
|
|
|
EmitBranch(instr, eq);
|
2011-02-08 10:08:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
|
|
|
|
|
ASSERT(!temp1.is(temp2));
|
|
|
|
|
// Get the frame pointer for the calling frame.
|
|
|
|
|
__ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
|
|
|
|
|
|
// Skip the arguments adaptor frame if it exists.
|
|
|
|
|
Label check_frame_marker;
|
|
|
|
|
__ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
|
|
|
|
|
__ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
|
|
__ b(ne, &check_frame_marker);
|
|
|
|
|
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
|
|
|
|
|
|
// Check the marker in the calling frame.
|
|
|
|
|
__ bind(&check_frame_marker);
|
|
|
|
|
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
|
|
|
|
|
__ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-10-02 11:43:41 +00:00
|
|
|
|
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
|
2012-12-18 16:25:45 +00:00
|
|
|
|
if (info()->IsStub()) return;
|
2011-11-16 08:44:30 +00:00
|
|
|
|
// Ensure that we have enough space after the previous lazy-bailout
|
|
|
|
|
// instruction for patching the code here.
|
|
|
|
|
int current_pc = masm()->pc_offset();
|
2013-10-02 11:43:41 +00:00
|
|
|
|
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
|
2012-06-12 17:26:28 +00:00
|
|
|
|
// Block literal pool emission for duration of padding.
|
|
|
|
|
Assembler::BlockConstPoolScope block_const_pool(masm());
|
2013-10-02 11:43:41 +00:00
|
|
|
|
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
|
2011-11-16 08:44:30 +00:00
|
|
|
|
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
|
|
|
|
|
while (padding_size > 0) {
|
|
|
|
|
__ nop();
|
|
|
|
|
padding_size -= Assembler::kInstrSize;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
|
2013-10-02 11:43:41 +00:00
|
|
|
|
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
|
2013-07-22 17:21:41 +00:00
|
|
|
|
last_lazy_deopt_pc_ = masm()->pc_offset();
|
2011-11-16 08:44:30 +00:00
|
|
|
|
ASSERT(instr->HasEnvironment());
|
|
|
|
|
LEnvironment* env = instr->environment();
|
|
|
|
|
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
|
|
|
|
|
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
|
2013-07-23 13:35:10 +00:00
|
|
|
|
Deoptimizer::BailoutType type = instr->hydrogen()->type();
|
|
|
|
|
// TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
|
|
|
|
|
// needed return address), even though the implementation of LAZY and EAGER is
|
|
|
|
|
// now identical. When LAZY is eventually completely folded into EAGER, remove
|
|
|
|
|
// the special case below.
|
|
|
|
|
if (info()->IsStub() && type == Deoptimizer::EAGER) {
|
|
|
|
|
type = Deoptimizer::LAZY;
|
|
|
|
|
}
|
2013-08-09 12:50:42 +00:00
|
|
|
|
|
|
|
|
|
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
|
2013-07-23 13:35:10 +00:00
|
|
|
|
DeoptimizeIf(al, instr->environment(), type);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-01-17 14:07:47 +00:00
|
|
|
|
void LCodeGen::DoDummyUse(LDummyUse* instr) {
|
|
|
|
|
// Nothing to see here, move on!
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-06-27 12:12:27 +00:00
|
|
|
|
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
|
2011-11-16 08:44:30 +00:00
|
|
|
|
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
LoadContextFromDeferred(instr->context());
|
2011-11-16 08:44:30 +00:00
|
|
|
|
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
|
|
|
|
|
RecordSafepointWithLazyDeopt(
|
|
|
|
|
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
|
|
|
|
ASSERT(instr->HasEnvironment());
|
|
|
|
|
LEnvironment* env = instr->environment();
|
|
|
|
|
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
|
2011-06-27 12:12:27 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
2013-08-20 11:10:24 +00:00
|
|
|
|
class DeferredStackCheck V8_FINAL : public LDeferredCode {
|
2011-06-27 12:12:27 +00:00
|
|
|
|
public:
|
|
|
|
|
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
|
|
|
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
2013-08-20 11:10:24 +00:00
|
|
|
|
virtual void Generate() V8_OVERRIDE {
|
|
|
|
|
codegen()->DoDeferredStackCheck(instr_);
|
|
|
|
|
}
|
|
|
|
|
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
2011-06-27 12:12:27 +00:00
|
|
|
|
private:
|
|
|
|
|
LStackCheck* instr_;
|
|
|
|
|
};
|
|
|
|
|
|
2011-12-01 09:54:05 +00:00
|
|
|
|
ASSERT(instr->HasEnvironment());
|
|
|
|
|
LEnvironment* env = instr->environment();
|
2011-12-01 16:57:54 +00:00
|
|
|
|
// There is no LLazyBailout instruction for stack-checks. We have to
|
|
|
|
|
// prepare for lazy deoptimization explicitly here.
|
|
|
|
|
if (instr->hydrogen()->is_function_entry()) {
|
|
|
|
|
// Perform stack overflow check.
|
|
|
|
|
Label done;
|
|
|
|
|
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
|
|
|
|
|
__ cmp(sp, Operand(ip));
|
|
|
|
|
__ b(hs, &done);
|
2012-11-22 14:59:52 +00:00
|
|
|
|
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
|
2013-09-27 13:59:28 +00:00
|
|
|
|
ASSERT(instr->context()->IsRegister());
|
|
|
|
|
ASSERT(ToRegister(instr->context()).is(cp));
|
2013-08-30 11:24:58 +00:00
|
|
|
|
CallCode(isolate()->builtins()->StackCheck(),
|
2013-09-27 13:59:28 +00:00
|
|
|
|
RelocInfo::CODE_TARGET,
|
|
|
|
|
instr);
|
2013-10-02 11:43:41 +00:00
|
|
|
|
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
|
2013-07-22 17:21:41 +00:00
|
|
|
|
last_lazy_deopt_pc_ = masm()->pc_offset();
|
2011-12-01 16:57:54 +00:00
|
|
|
|
__ bind(&done);
|
|
|
|
|
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
|
|
|
|
|
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
|
|
|
|
|
} else {
|
|
|
|
|
ASSERT(instr->hydrogen()->is_backwards_branch());
|
|
|
|
|
// Perform stack overflow check if this goto needs it before jumping.
|
|
|
|
|
DeferredStackCheck* deferred_stack_check =
|
2012-06-11 12:42:31 +00:00
|
|
|
|
new(zone()) DeferredStackCheck(this, instr);
|
2011-12-01 16:57:54 +00:00
|
|
|
|
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
|
|
|
|
|
__ cmp(sp, Operand(ip));
|
|
|
|
|
__ b(lo, deferred_stack_check->entry());
|
2013-10-02 11:43:41 +00:00
|
|
|
|
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
|
2013-07-22 17:21:41 +00:00
|
|
|
|
last_lazy_deopt_pc_ = masm()->pc_offset();
|
2011-12-01 16:57:54 +00:00
|
|
|
|
__ bind(instr->done_label());
|
|
|
|
|
deferred_stack_check->SetExit(instr->done_label());
|
|
|
|
|
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
|
|
|
|
|
// Don't record a deoptimization index for the safepoint here.
|
|
|
|
|
// This will be done explicitly when emitting call and the safepoint in
|
|
|
|
|
// the deferred code.
|
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
|
2011-02-13 10:24:39 +00:00
|
|
|
|
// This is a pseudo-instruction that ensures that the environment here is
|
|
|
|
|
// properly registered for deoptimization and records the assembler's PC
|
|
|
|
|
// offset.
|
|
|
|
|
LEnvironment* environment = instr->environment();
|
|
|
|
|
|
|
|
|
|
// If the environment were already registered, we would have no way of
|
|
|
|
|
// backpatching it with the spill slot operands.
|
|
|
|
|
ASSERT(!environment->HasBeenRegistered());
|
2011-11-16 08:44:30 +00:00
|
|
|
|
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
|
2013-06-26 08:43:27 +00:00
|
|
|
|
|
2013-09-09 16:34:40 +00:00
|
|
|
|
GenerateOsrPrologue();
|
2010-12-07 11:31:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-02-22 12:47:42 +00:00
|
|
|
|
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
|
|
|
|
|
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
|
|
|
|
__ cmp(r0, ip);
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
|
|
|
|
|
Register null_value = r5;
|
|
|
|
|
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
|
|
|
|
|
__ cmp(r0, null_value);
|
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ SmiTst(r0);
|
2012-02-22 12:47:42 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
|
|
|
|
|
|
|
|
|
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
|
|
|
|
|
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
|
|
|
|
|
DeoptimizeIf(le, instr->environment());
|
|
|
|
|
|
|
|
|
|
Label use_cache, call_runtime;
|
|
|
|
|
__ CheckEnumCache(null_value, &call_runtime);
|
|
|
|
|
|
|
|
|
|
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
|
|
|
|
|
__ b(&use_cache);
|
|
|
|
|
|
|
|
|
|
// Get the set of properties to enumerate.
|
|
|
|
|
__ bind(&call_runtime);
|
|
|
|
|
__ push(r0);
|
|
|
|
|
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
|
|
|
|
|
|
|
|
|
|
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
|
|
|
|
|
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
|
|
|
|
|
__ cmp(r1, ip);
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
__ bind(&use_cache);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
|
|
|
|
|
Register map = ToRegister(instr->map());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
2012-08-28 14:20:50 +00:00
|
|
|
|
Label load_cache, done;
|
|
|
|
|
__ EnumLength(result, map);
|
|
|
|
|
__ cmp(result, Operand(Smi::FromInt(0)));
|
|
|
|
|
__ b(ne, &load_cache);
|
|
|
|
|
__ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
|
|
|
|
|
__ jmp(&done);
|
|
|
|
|
|
|
|
|
|
__ bind(&load_cache);
|
2012-10-19 12:39:59 +00:00
|
|
|
|
__ LoadInstanceDescriptors(map, result);
|
2012-02-22 12:47:42 +00:00
|
|
|
|
__ ldr(result,
|
2012-07-19 14:45:19 +00:00
|
|
|
|
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
|
2012-02-22 12:47:42 +00:00
|
|
|
|
__ ldr(result,
|
|
|
|
|
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(result, Operand::Zero());
|
2012-02-22 12:47:42 +00:00
|
|
|
|
DeoptimizeIf(eq, instr->environment());
|
2012-08-28 14:20:50 +00:00
|
|
|
|
|
|
|
|
|
__ bind(&done);
|
2012-02-22 12:47:42 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
|
|
|
|
|
Register object = ToRegister(instr->value());
|
|
|
|
|
Register map = ToRegister(instr->map());
|
|
|
|
|
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
|
|
|
|
|
__ cmp(map, scratch0());
|
|
|
|
|
DeoptimizeIf(ne, instr->environment());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
|
|
|
|
Register object = ToRegister(instr->object());
|
|
|
|
|
Register index = ToRegister(instr->index());
|
|
|
|
|
Register result = ToRegister(instr->result());
|
|
|
|
|
Register scratch = scratch0();
|
|
|
|
|
|
|
|
|
|
Label out_of_object, done;
|
2013-01-07 09:43:12 +00:00
|
|
|
|
__ cmp(index, Operand::Zero());
|
2012-02-22 12:47:42 +00:00
|
|
|
|
__ b(lt, &out_of_object);
|
|
|
|
|
|
2013-05-17 15:38:14 +00:00
|
|
|
|
__ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
|
2012-02-22 12:47:42 +00:00
|
|
|
|
__ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
|
|
|
|
|
|
|
|
|
|
__ b(&done);
|
|
|
|
|
|
|
|
|
|
__ bind(&out_of_object);
|
|
|
|
|
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
|
|
|
|
|
// Index is equal to negated out of object property index plus 1.
|
2013-05-17 15:38:14 +00:00
|
|
|
|
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
|
|
|
|
__ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
|
2012-02-22 12:47:42 +00:00
|
|
|
|
__ ldr(result, FieldMemOperand(scratch,
|
|
|
|
|
FixedArray::kHeaderSize - kPointerSize));
|
|
|
|
|
__ bind(&done);
|
|
|
|
|
}
|
2011-04-26 15:22:44 +00:00
|
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
#undef __
|
|
|
|
|
|
|
|
|
|
} } // namespace v8::internal
|