11c7b47430
BUG=none TEST=unboxed-double-arrays.js Review URL: http://codereview.chromium.org/7350021 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8682 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
4320 lines
143 KiB
C++
4320 lines
143 KiB
C++
// Copyright 2011 the V8 project authors. All rights reserved.
|
|
// Redistribution and use in source and binary forms, with or without
|
|
// modification, are permitted provided that the following conditions are
|
|
// met:
|
|
//
|
|
// * Redistributions of source code must retain the above copyright
|
|
// notice, this list of conditions and the following disclaimer.
|
|
// * Redistributions in binary form must reproduce the above
|
|
// copyright notice, this list of conditions and the following
|
|
// disclaimer in the documentation and/or other materials provided
|
|
// with the distribution.
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
// contributors may be used to endorse or promote products derived
|
|
// from this software without specific prior written permission.
|
|
//
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
#include "v8.h"
|
|
|
|
#if defined(V8_TARGET_ARCH_IA32)
|
|
|
|
#include "ia32/lithium-codegen-ia32.h"
|
|
#include "code-stubs.h"
|
|
#include "deoptimizer.h"
|
|
#include "stub-cache.h"
|
|
|
|
namespace v8 {
|
|
namespace internal {
|
|
|
|
|
|
// When invoking builtins, we need to record the safepoint in the middle of
|
|
// the invoke instruction sequence generated by the macro assembler.
|
|
class SafepointGenerator : public CallWrapper {
|
|
public:
|
|
SafepointGenerator(LCodeGen* codegen,
|
|
LPointerMap* pointers,
|
|
int deoptimization_index)
|
|
: codegen_(codegen),
|
|
pointers_(pointers),
|
|
deoptimization_index_(deoptimization_index) {}
|
|
virtual ~SafepointGenerator() { }
|
|
|
|
virtual void BeforeCall(int call_size) const {}
|
|
|
|
virtual void AfterCall() const {
|
|
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
|
|
}
|
|
|
|
private:
|
|
LCodeGen* codegen_;
|
|
LPointerMap* pointers_;
|
|
int deoptimization_index_;
|
|
};
|
|
|
|
|
|
#define __ masm()->
|
|
|
|
bool LCodeGen::GenerateCode() {
|
|
HPhase phase("Code generation", chunk());
|
|
ASSERT(is_unused());
|
|
status_ = GENERATING;
|
|
CpuFeatures::Scope scope(SSE2);
|
|
return GeneratePrologue() &&
|
|
GenerateBody() &&
|
|
GenerateDeferredCode() &&
|
|
GenerateSafepointTable();
|
|
}
|
|
|
|
|
|
void LCodeGen::FinishCode(Handle<Code> code) {
|
|
ASSERT(is_done());
|
|
code->set_stack_slots(GetStackSlotCount());
|
|
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
|
|
PopulateDeoptimizationData(code);
|
|
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
|
|
}
|
|
|
|
|
|
void LCodeGen::Abort(const char* format, ...) {
|
|
if (FLAG_trace_bailout) {
|
|
SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
|
|
PrintF("Aborting LCodeGen in @\"%s\": ", *name);
|
|
va_list arguments;
|
|
va_start(arguments, format);
|
|
OS::VPrint(format, arguments);
|
|
va_end(arguments);
|
|
PrintF("\n");
|
|
}
|
|
status_ = ABORTED;
|
|
}
|
|
|
|
|
|
void LCodeGen::Comment(const char* format, ...) {
|
|
if (!FLAG_code_comments) return;
|
|
char buffer[4 * KB];
|
|
StringBuilder builder(buffer, ARRAY_SIZE(buffer));
|
|
va_list arguments;
|
|
va_start(arguments, format);
|
|
builder.AddFormattedList(format, arguments);
|
|
va_end(arguments);
|
|
|
|
// Copy the string before recording it in the assembler to avoid
|
|
// issues when the stack allocated buffer goes out of scope.
|
|
size_t length = builder.position();
|
|
Vector<char> copy = Vector<char>::New(length + 1);
|
|
memcpy(copy.start(), builder.Finalize(), copy.length());
|
|
masm()->RecordComment(copy.start());
|
|
}
|
|
|
|
|
|
bool LCodeGen::GeneratePrologue() {
|
|
ASSERT(is_generating());
|
|
|
|
#ifdef DEBUG
|
|
if (strlen(FLAG_stop_at) > 0 &&
|
|
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
|
|
__ int3();
|
|
}
|
|
#endif
|
|
|
|
// Strict mode functions and builtins need to replace the receiver
|
|
// with undefined when called as functions (without an explicit
|
|
// receiver object). ecx is zero for method calls and non-zero for
|
|
// function calls.
|
|
if (info_->is_strict_mode() || info_->is_native()) {
|
|
Label ok;
|
|
__ test(ecx, Operand(ecx));
|
|
__ j(zero, &ok, Label::kNear);
|
|
// +1 for return address.
|
|
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
|
|
__ mov(Operand(esp, receiver_offset),
|
|
Immediate(isolate()->factory()->undefined_value()));
|
|
__ bind(&ok);
|
|
}
|
|
|
|
__ push(ebp); // Caller's frame pointer.
|
|
__ mov(ebp, esp);
|
|
__ push(esi); // Callee's context.
|
|
__ push(edi); // Callee's JS function.
|
|
|
|
// Reserve space for the stack slots needed by the code.
|
|
int slots = GetStackSlotCount();
|
|
if (slots > 0) {
|
|
if (FLAG_debug_code) {
|
|
__ mov(Operand(eax), Immediate(slots));
|
|
Label loop;
|
|
__ bind(&loop);
|
|
__ push(Immediate(kSlotsZapValue));
|
|
__ dec(eax);
|
|
__ j(not_zero, &loop);
|
|
} else {
|
|
__ sub(Operand(esp), Immediate(slots * kPointerSize));
|
|
#ifdef _MSC_VER
|
|
// On windows, you may not access the stack more than one page below
|
|
// the most recently mapped page. To make the allocated area randomly
|
|
// accessible, we write to each page in turn (the value is irrelevant).
|
|
const int kPageSize = 4 * KB;
|
|
for (int offset = slots * kPointerSize - kPageSize;
|
|
offset > 0;
|
|
offset -= kPageSize) {
|
|
__ mov(Operand(esp, offset), eax);
|
|
}
|
|
#endif
|
|
}
|
|
}
|
|
|
|
// Possibly allocate a local context.
|
|
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
|
|
if (heap_slots > 0) {
|
|
Comment(";;; Allocate local context");
|
|
// Argument to NewContext is the function, which is still in edi.
|
|
__ push(edi);
|
|
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
|
|
FastNewContextStub stub(heap_slots);
|
|
__ CallStub(&stub);
|
|
} else {
|
|
__ CallRuntime(Runtime::kNewFunctionContext, 1);
|
|
}
|
|
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
|
|
// Context is returned in both eax and esi. It replaces the context
|
|
// passed to us. It's saved in the stack and kept live in esi.
|
|
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
|
|
|
|
// Copy parameters into context if necessary.
|
|
int num_parameters = scope()->num_parameters();
|
|
for (int i = 0; i < num_parameters; i++) {
|
|
Slot* slot = scope()->parameter(i)->AsSlot();
|
|
if (slot != NULL && slot->type() == Slot::CONTEXT) {
|
|
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
|
|
(num_parameters - 1 - i) * kPointerSize;
|
|
// Load parameter from stack.
|
|
__ mov(eax, Operand(ebp, parameter_offset));
|
|
// Store it in the context.
|
|
int context_offset = Context::SlotOffset(slot->index());
|
|
__ mov(Operand(esi, context_offset), eax);
|
|
// Update the write barrier. This clobbers all involved
|
|
// registers, so we have to use a third register to avoid
|
|
// clobbering esi.
|
|
__ mov(ecx, esi);
|
|
__ RecordWrite(ecx, context_offset, eax, ebx);
|
|
}
|
|
}
|
|
Comment(";;; End allocate local context");
|
|
}
|
|
|
|
// Trace the call.
|
|
if (FLAG_trace) {
|
|
// We have not executed any compiled code yet, so esi still holds the
|
|
// incoming context.
|
|
__ CallRuntime(Runtime::kTraceEnter, 0);
|
|
}
|
|
return !is_aborted();
|
|
}
|
|
|
|
|
|
bool LCodeGen::GenerateBody() {
|
|
ASSERT(is_generating());
|
|
bool emit_instructions = true;
|
|
for (current_instruction_ = 0;
|
|
!is_aborted() && current_instruction_ < instructions_->length();
|
|
current_instruction_++) {
|
|
LInstruction* instr = instructions_->at(current_instruction_);
|
|
if (instr->IsLabel()) {
|
|
LLabel* label = LLabel::cast(instr);
|
|
emit_instructions = !label->HasReplacement();
|
|
}
|
|
|
|
if (emit_instructions) {
|
|
Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
|
|
instr->CompileToNative(this);
|
|
}
|
|
}
|
|
return !is_aborted();
|
|
}
|
|
|
|
|
|
LInstruction* LCodeGen::GetNextInstruction() {
|
|
if (current_instruction_ < instructions_->length() - 1) {
|
|
return instructions_->at(current_instruction_ + 1);
|
|
} else {
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
bool LCodeGen::GenerateDeferredCode() {
|
|
ASSERT(is_generating());
|
|
if (deferred_.length() > 0) {
|
|
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
|
|
LDeferredCode* code = deferred_[i];
|
|
__ bind(code->entry());
|
|
code->Generate();
|
|
__ jmp(code->exit());
|
|
}
|
|
|
|
// Pad code to ensure that the last piece of deferred code have
|
|
// room for lazy bailout.
|
|
while ((masm()->pc_offset() - LastSafepointEnd())
|
|
< Deoptimizer::patch_size()) {
|
|
__ nop();
|
|
}
|
|
}
|
|
|
|
// Deferred code is the last part of the instruction sequence. Mark
|
|
// the generated code as done unless we bailed out.
|
|
if (!is_aborted()) status_ = DONE;
|
|
return !is_aborted();
|
|
}
|
|
|
|
|
|
bool LCodeGen::GenerateSafepointTable() {
|
|
ASSERT(is_done());
|
|
safepoints_.Emit(masm(), GetStackSlotCount());
|
|
return !is_aborted();
|
|
}
|
|
|
|
|
|
Register LCodeGen::ToRegister(int index) const {
|
|
return Register::FromAllocationIndex(index);
|
|
}
|
|
|
|
|
|
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
|
|
return XMMRegister::FromAllocationIndex(index);
|
|
}
|
|
|
|
|
|
Register LCodeGen::ToRegister(LOperand* op) const {
|
|
ASSERT(op->IsRegister());
|
|
return ToRegister(op->index());
|
|
}
|
|
|
|
|
|
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
|
|
ASSERT(op->IsDoubleRegister());
|
|
return ToDoubleRegister(op->index());
|
|
}
|
|
|
|
|
|
int LCodeGen::ToInteger32(LConstantOperand* op) const {
|
|
Handle<Object> value = chunk_->LookupLiteral(op);
|
|
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
|
|
ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
|
|
value->Number());
|
|
return static_cast<int32_t>(value->Number());
|
|
}
|
|
|
|
|
|
Immediate LCodeGen::ToImmediate(LOperand* op) {
|
|
LConstantOperand* const_op = LConstantOperand::cast(op);
|
|
Handle<Object> literal = chunk_->LookupLiteral(const_op);
|
|
Representation r = chunk_->LookupLiteralRepresentation(const_op);
|
|
if (r.IsInteger32()) {
|
|
ASSERT(literal->IsNumber());
|
|
return Immediate(static_cast<int32_t>(literal->Number()));
|
|
} else if (r.IsDouble()) {
|
|
Abort("unsupported double immediate");
|
|
}
|
|
ASSERT(r.IsTagged());
|
|
return Immediate(literal);
|
|
}
|
|
|
|
|
|
Operand LCodeGen::ToOperand(LOperand* op) const {
|
|
if (op->IsRegister()) return Operand(ToRegister(op));
|
|
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
|
|
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
|
|
int index = op->index();
|
|
if (index >= 0) {
|
|
// Local or spill slot. Skip the frame pointer, function, and
|
|
// context in the fixed part of the frame.
|
|
return Operand(ebp, -(index + 3) * kPointerSize);
|
|
} else {
|
|
// Incoming parameter. Skip the return address.
|
|
return Operand(ebp, -(index - 1) * kPointerSize);
|
|
}
|
|
}
|
|
|
|
|
|
Operand LCodeGen::HighOperand(LOperand* op) {
|
|
ASSERT(op->IsDoubleStackSlot());
|
|
int index = op->index();
|
|
int offset = (index >= 0) ? index + 3 : index - 1;
|
|
return Operand(ebp, -offset * kPointerSize);
|
|
}
|
|
|
|
|
|
void LCodeGen::WriteTranslation(LEnvironment* environment,
|
|
Translation* translation) {
|
|
if (environment == NULL) return;
|
|
|
|
// The translation includes one command per value in the environment.
|
|
int translation_size = environment->values()->length();
|
|
// The output frame height does not include the parameters.
|
|
int height = translation_size - environment->parameter_count();
|
|
|
|
WriteTranslation(environment->outer(), translation);
|
|
int closure_id = DefineDeoptimizationLiteral(environment->closure());
|
|
translation->BeginFrame(environment->ast_id(), closure_id, height);
|
|
for (int i = 0; i < translation_size; ++i) {
|
|
LOperand* value = environment->values()->at(i);
|
|
// spilled_registers_ and spilled_double_registers_ are either
|
|
// both NULL or both set.
|
|
if (environment->spilled_registers() != NULL && value != NULL) {
|
|
if (value->IsRegister() &&
|
|
environment->spilled_registers()[value->index()] != NULL) {
|
|
translation->MarkDuplicate();
|
|
AddToTranslation(translation,
|
|
environment->spilled_registers()[value->index()],
|
|
environment->HasTaggedValueAt(i));
|
|
} else if (
|
|
value->IsDoubleRegister() &&
|
|
environment->spilled_double_registers()[value->index()] != NULL) {
|
|
translation->MarkDuplicate();
|
|
AddToTranslation(
|
|
translation,
|
|
environment->spilled_double_registers()[value->index()],
|
|
false);
|
|
}
|
|
}
|
|
|
|
AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::AddToTranslation(Translation* translation,
|
|
LOperand* op,
|
|
bool is_tagged) {
|
|
if (op == NULL) {
|
|
// TODO(twuerthinger): Introduce marker operands to indicate that this value
|
|
// is not present and must be reconstructed from the deoptimizer. Currently
|
|
// this is only used for the arguments object.
|
|
translation->StoreArgumentsObject();
|
|
} else if (op->IsStackSlot()) {
|
|
if (is_tagged) {
|
|
translation->StoreStackSlot(op->index());
|
|
} else {
|
|
translation->StoreInt32StackSlot(op->index());
|
|
}
|
|
} else if (op->IsDoubleStackSlot()) {
|
|
translation->StoreDoubleStackSlot(op->index());
|
|
} else if (op->IsArgument()) {
|
|
ASSERT(is_tagged);
|
|
int src_index = GetStackSlotCount() + op->index();
|
|
translation->StoreStackSlot(src_index);
|
|
} else if (op->IsRegister()) {
|
|
Register reg = ToRegister(op);
|
|
if (is_tagged) {
|
|
translation->StoreRegister(reg);
|
|
} else {
|
|
translation->StoreInt32Register(reg);
|
|
}
|
|
} else if (op->IsDoubleRegister()) {
|
|
XMMRegister reg = ToDoubleRegister(op);
|
|
translation->StoreDoubleRegister(reg);
|
|
} else if (op->IsConstantOperand()) {
|
|
Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
|
|
int src_index = DefineDeoptimizationLiteral(literal);
|
|
translation->StoreLiteral(src_index);
|
|
} else {
|
|
UNREACHABLE();
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::CallCodeGeneric(Handle<Code> code,
|
|
RelocInfo::Mode mode,
|
|
LInstruction* instr,
|
|
SafepointMode safepoint_mode) {
|
|
ASSERT(instr != NULL);
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
RecordPosition(pointers->position());
|
|
|
|
__ call(code, mode);
|
|
|
|
RegisterLazyDeoptimization(instr, safepoint_mode);
|
|
|
|
// Signal that we don't inline smi code before these stubs in the
|
|
// optimizing code generator.
|
|
if (code->kind() == Code::BINARY_OP_IC ||
|
|
code->kind() == Code::COMPARE_IC) {
|
|
__ nop();
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::CallCode(Handle<Code> code,
|
|
RelocInfo::Mode mode,
|
|
LInstruction* instr) {
|
|
CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
|
|
}
|
|
|
|
|
|
void LCodeGen::CallRuntime(const Runtime::Function* fun,
|
|
int argc,
|
|
LInstruction* instr) {
|
|
ASSERT(instr != NULL);
|
|
ASSERT(instr->HasPointerMap());
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
RecordPosition(pointers->position());
|
|
|
|
__ CallRuntime(fun, argc);
|
|
|
|
RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
|
|
}
|
|
|
|
|
|
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
|
int argc,
|
|
LInstruction* instr,
|
|
LOperand* context) {
|
|
ASSERT(context->IsRegister() || context->IsStackSlot());
|
|
if (context->IsRegister()) {
|
|
if (!ToRegister(context).is(esi)) {
|
|
__ mov(esi, ToRegister(context));
|
|
}
|
|
} else {
|
|
// Context is stack slot.
|
|
__ mov(esi, ToOperand(context));
|
|
}
|
|
|
|
__ CallRuntimeSaveDoubles(id);
|
|
RecordSafepointWithRegisters(
|
|
instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
|
|
}
|
|
|
|
|
|
void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
|
|
SafepointMode safepoint_mode) {
|
|
// Create the environment to bailout to. If the call has side effects
|
|
// execution has to continue after the call otherwise execution can continue
|
|
// from a previous bailout point repeating the call.
|
|
LEnvironment* deoptimization_environment;
|
|
if (instr->HasDeoptimizationEnvironment()) {
|
|
deoptimization_environment = instr->deoptimization_environment();
|
|
} else {
|
|
deoptimization_environment = instr->environment();
|
|
}
|
|
|
|
RegisterEnvironmentForDeoptimization(deoptimization_environment);
|
|
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
|
|
RecordSafepoint(instr->pointer_map(),
|
|
deoptimization_environment->deoptimization_index());
|
|
} else {
|
|
ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
|
RecordSafepointWithRegisters(
|
|
instr->pointer_map(),
|
|
0,
|
|
deoptimization_environment->deoptimization_index());
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
|
|
if (!environment->HasBeenRegistered()) {
|
|
// Physical stack frame layout:
|
|
// -x ............. -4 0 ..................................... y
|
|
// [incoming arguments] [spill slots] [pushed outgoing arguments]
|
|
|
|
// Layout of the environment:
|
|
// 0 ..................................................... size-1
|
|
// [parameters] [locals] [expression stack including arguments]
|
|
|
|
// Layout of the translation:
|
|
// 0 ........................................................ size - 1 + 4
|
|
// [expression stack including arguments] [locals] [4 words] [parameters]
|
|
// |>------------ translation_size ------------<|
|
|
|
|
int frame_count = 0;
|
|
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
|
|
++frame_count;
|
|
}
|
|
Translation translation(&translations_, frame_count);
|
|
WriteTranslation(environment, &translation);
|
|
int deoptimization_index = deoptimizations_.length();
|
|
environment->Register(deoptimization_index, translation.index());
|
|
deoptimizations_.Add(environment);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
|
|
RegisterEnvironmentForDeoptimization(environment);
|
|
ASSERT(environment->HasBeenRegistered());
|
|
int id = environment->deoptimization_index();
|
|
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
|
|
ASSERT(entry != NULL);
|
|
if (entry == NULL) {
|
|
Abort("bailout was not prepared");
|
|
return;
|
|
}
|
|
|
|
if (FLAG_deopt_every_n_times != 0) {
|
|
Handle<SharedFunctionInfo> shared(info_->shared_info());
|
|
Label no_deopt;
|
|
__ pushfd();
|
|
__ push(eax);
|
|
__ push(ebx);
|
|
__ mov(ebx, shared);
|
|
__ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
|
|
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
|
|
__ j(not_zero, &no_deopt, Label::kNear);
|
|
if (FLAG_trap_on_deopt) __ int3();
|
|
__ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
|
|
__ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
|
|
__ pop(ebx);
|
|
__ pop(eax);
|
|
__ popfd();
|
|
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
|
|
|
|
__ bind(&no_deopt);
|
|
__ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
|
|
__ pop(ebx);
|
|
__ pop(eax);
|
|
__ popfd();
|
|
}
|
|
|
|
if (cc == no_condition) {
|
|
if (FLAG_trap_on_deopt) __ int3();
|
|
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
|
|
} else {
|
|
if (FLAG_trap_on_deopt) {
|
|
Label done;
|
|
__ j(NegateCondition(cc), &done, Label::kNear);
|
|
__ int3();
|
|
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
|
|
__ bind(&done);
|
|
} else {
|
|
__ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
|
int length = deoptimizations_.length();
|
|
if (length == 0) return;
|
|
ASSERT(FLAG_deopt);
|
|
Handle<DeoptimizationInputData> data =
|
|
factory()->NewDeoptimizationInputData(length, TENURED);
|
|
|
|
Handle<ByteArray> translations = translations_.CreateByteArray();
|
|
data->SetTranslationByteArray(*translations);
|
|
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
|
|
|
|
Handle<FixedArray> literals =
|
|
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
|
|
for (int i = 0; i < deoptimization_literals_.length(); i++) {
|
|
literals->set(i, *deoptimization_literals_[i]);
|
|
}
|
|
data->SetLiteralArray(*literals);
|
|
|
|
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
|
|
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
|
|
|
|
// Populate the deoptimization entries.
|
|
for (int i = 0; i < length; i++) {
|
|
LEnvironment* env = deoptimizations_[i];
|
|
data->SetAstId(i, Smi::FromInt(env->ast_id()));
|
|
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
|
|
data->SetArgumentsStackHeight(i,
|
|
Smi::FromInt(env->arguments_stack_height()));
|
|
}
|
|
code->set_deoptimization_data(*data);
|
|
}
|
|
|
|
|
|
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
|
|
int result = deoptimization_literals_.length();
|
|
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
|
|
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
|
|
}
|
|
deoptimization_literals_.Add(literal);
|
|
return result;
|
|
}
|
|
|
|
|
|
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
|
|
ASSERT(deoptimization_literals_.length() == 0);
|
|
|
|
const ZoneList<Handle<JSFunction> >* inlined_closures =
|
|
chunk()->inlined_closures();
|
|
|
|
for (int i = 0, length = inlined_closures->length();
|
|
i < length;
|
|
i++) {
|
|
DefineDeoptimizationLiteral(inlined_closures->at(i));
|
|
}
|
|
|
|
inlined_function_count_ = deoptimization_literals_.length();
|
|
}
|
|
|
|
|
|
void LCodeGen::RecordSafepoint(
|
|
LPointerMap* pointers,
|
|
Safepoint::Kind kind,
|
|
int arguments,
|
|
int deoptimization_index) {
|
|
ASSERT(kind == expected_safepoint_kind_);
|
|
const ZoneList<LOperand*>* operands = pointers->operands();
|
|
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
|
|
kind, arguments, deoptimization_index);
|
|
for (int i = 0; i < operands->length(); i++) {
|
|
LOperand* pointer = operands->at(i);
|
|
if (pointer->IsStackSlot()) {
|
|
safepoint.DefinePointerSlot(pointer->index());
|
|
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
|
|
safepoint.DefinePointerRegister(ToRegister(pointer));
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
|
|
int deoptimization_index) {
|
|
RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
|
|
}
|
|
|
|
|
|
void LCodeGen::RecordSafepoint(int deoptimization_index) {
|
|
LPointerMap empty_pointers(RelocInfo::kNoPosition);
|
|
RecordSafepoint(&empty_pointers, deoptimization_index);
|
|
}
|
|
|
|
|
|
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
|
|
int arguments,
|
|
int deoptimization_index) {
|
|
RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
|
|
deoptimization_index);
|
|
}
|
|
|
|
|
|
void LCodeGen::RecordPosition(int position) {
|
|
if (position == RelocInfo::kNoPosition) return;
|
|
masm()->positions_recorder()->RecordPosition(position);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLabel(LLabel* label) {
|
|
if (label->is_loop_header()) {
|
|
Comment(";;; B%d - LOOP entry", label->block_id());
|
|
} else {
|
|
Comment(";;; B%d", label->block_id());
|
|
}
|
|
__ bind(label->label());
|
|
current_block_ = label->block_id();
|
|
DoGap(label);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoParallelMove(LParallelMove* move) {
|
|
resolver_.Resolve(move);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoGap(LGap* gap) {
|
|
for (int i = LGap::FIRST_INNER_POSITION;
|
|
i <= LGap::LAST_INNER_POSITION;
|
|
i++) {
|
|
LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
|
|
LParallelMove* move = gap->GetParallelMove(inner_pos);
|
|
if (move != NULL) DoParallelMove(move);
|
|
}
|
|
|
|
LInstruction* next = GetNextInstruction();
|
|
if (next != NULL && next->IsLazyBailout()) {
|
|
int pc = masm()->pc_offset();
|
|
safepoints_.SetPcAfterGap(pc);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
|
|
DoGap(instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoParameter(LParameter* instr) {
|
|
// Nothing to do.
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallStub(LCallStub* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
switch (instr->hydrogen()->major_key()) {
|
|
case CodeStub::RegExpConstructResult: {
|
|
RegExpConstructResultStub stub;
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
break;
|
|
}
|
|
case CodeStub::RegExpExec: {
|
|
RegExpExecStub stub;
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
break;
|
|
}
|
|
case CodeStub::SubString: {
|
|
SubStringStub stub;
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
break;
|
|
}
|
|
case CodeStub::NumberToString: {
|
|
NumberToStringStub stub;
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
break;
|
|
}
|
|
case CodeStub::StringAdd: {
|
|
StringAddStub stub(NO_STRING_ADD_FLAGS);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
break;
|
|
}
|
|
case CodeStub::StringCompare: {
|
|
StringCompareStub stub;
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
break;
|
|
}
|
|
case CodeStub::TranscendentalCache: {
|
|
TranscendentalCacheStub stub(instr->transcendental_type(),
|
|
TranscendentalCacheStub::TAGGED);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
break;
|
|
}
|
|
default:
|
|
UNREACHABLE();
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
|
|
// Nothing to do.
|
|
}
|
|
|
|
|
|
void LCodeGen::DoModI(LModI* instr) {
|
|
if (instr->hydrogen()->HasPowerOf2Divisor()) {
|
|
Register dividend = ToRegister(instr->InputAt(0));
|
|
|
|
int32_t divisor =
|
|
HConstant::cast(instr->hydrogen()->right())->Integer32Value();
|
|
|
|
if (divisor < 0) divisor = -divisor;
|
|
|
|
Label positive_dividend, done;
|
|
__ test(dividend, Operand(dividend));
|
|
__ j(not_sign, &positive_dividend, Label::kNear);
|
|
__ neg(dividend);
|
|
__ and_(dividend, divisor - 1);
|
|
__ neg(dividend);
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
__ j(not_zero, &done, Label::kNear);
|
|
DeoptimizeIf(no_condition, instr->environment());
|
|
} else {
|
|
__ jmp(&done, Label::kNear);
|
|
}
|
|
__ bind(&positive_dividend);
|
|
__ and_(dividend, divisor - 1);
|
|
__ bind(&done);
|
|
} else {
|
|
Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
|
|
Register left_reg = ToRegister(instr->InputAt(0));
|
|
Register right_reg = ToRegister(instr->InputAt(1));
|
|
Register result_reg = ToRegister(instr->result());
|
|
|
|
ASSERT(left_reg.is(eax));
|
|
ASSERT(result_reg.is(edx));
|
|
ASSERT(!right_reg.is(eax));
|
|
ASSERT(!right_reg.is(edx));
|
|
|
|
// Check for x % 0.
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
|
|
__ test(right_reg, Operand(right_reg));
|
|
DeoptimizeIf(zero, instr->environment());
|
|
}
|
|
|
|
__ test(left_reg, Operand(left_reg));
|
|
__ j(zero, &remainder_eq_dividend, Label::kNear);
|
|
__ j(sign, &slow, Label::kNear);
|
|
|
|
__ test(right_reg, Operand(right_reg));
|
|
__ j(not_sign, &both_positive, Label::kNear);
|
|
// The sign of the divisor doesn't matter.
|
|
__ neg(right_reg);
|
|
|
|
__ bind(&both_positive);
|
|
// If the dividend is smaller than the nonnegative
|
|
// divisor, the dividend is the result.
|
|
__ cmp(left_reg, Operand(right_reg));
|
|
__ j(less, &remainder_eq_dividend, Label::kNear);
|
|
|
|
// Check if the divisor is a PowerOfTwo integer.
|
|
Register scratch = ToRegister(instr->TempAt(0));
|
|
__ mov(scratch, right_reg);
|
|
__ sub(Operand(scratch), Immediate(1));
|
|
__ test(scratch, Operand(right_reg));
|
|
__ j(not_zero, &do_subtraction, Label::kNear);
|
|
__ and_(left_reg, Operand(scratch));
|
|
__ jmp(&remainder_eq_dividend, Label::kNear);
|
|
|
|
__ bind(&do_subtraction);
|
|
const int kUnfolds = 3;
|
|
// Try a few subtractions of the dividend.
|
|
__ mov(scratch, left_reg);
|
|
for (int i = 0; i < kUnfolds; i++) {
|
|
// Reduce the dividend by the divisor.
|
|
__ sub(left_reg, Operand(right_reg));
|
|
// Check if the dividend is less than the divisor.
|
|
__ cmp(left_reg, Operand(right_reg));
|
|
__ j(less, &remainder_eq_dividend, Label::kNear);
|
|
}
|
|
__ mov(left_reg, scratch);
|
|
|
|
// Slow case, using idiv instruction.
|
|
__ bind(&slow);
|
|
// Sign extend to edx.
|
|
__ cdq();
|
|
|
|
// Check for (0 % -x) that will produce negative zero.
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
Label positive_left;
|
|
Label done;
|
|
__ test(left_reg, Operand(left_reg));
|
|
__ j(not_sign, &positive_left, Label::kNear);
|
|
__ idiv(right_reg);
|
|
|
|
// Test the remainder for 0, because then the result would be -0.
|
|
__ test(result_reg, Operand(result_reg));
|
|
__ j(not_zero, &done, Label::kNear);
|
|
|
|
DeoptimizeIf(no_condition, instr->environment());
|
|
__ bind(&positive_left);
|
|
__ idiv(right_reg);
|
|
__ bind(&done);
|
|
} else {
|
|
__ idiv(right_reg);
|
|
}
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
__ bind(&remainder_eq_dividend);
|
|
__ mov(result_reg, left_reg);
|
|
|
|
__ bind(&done);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDivI(LDivI* instr) {
|
|
LOperand* right = instr->InputAt(1);
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
|
|
ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
|
|
ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
|
|
|
|
Register left_reg = eax;
|
|
|
|
// Check for x / 0.
|
|
Register right_reg = ToRegister(right);
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
|
|
__ test(right_reg, ToOperand(right));
|
|
DeoptimizeIf(zero, instr->environment());
|
|
}
|
|
|
|
// Check for (0 / -x) that will produce negative zero.
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
Label left_not_zero;
|
|
__ test(left_reg, Operand(left_reg));
|
|
__ j(not_zero, &left_not_zero, Label::kNear);
|
|
__ test(right_reg, ToOperand(right));
|
|
DeoptimizeIf(sign, instr->environment());
|
|
__ bind(&left_not_zero);
|
|
}
|
|
|
|
// Check for (-kMinInt / -1).
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
|
Label left_not_min_int;
|
|
__ cmp(left_reg, kMinInt);
|
|
__ j(not_zero, &left_not_min_int, Label::kNear);
|
|
__ cmp(right_reg, -1);
|
|
DeoptimizeIf(zero, instr->environment());
|
|
__ bind(&left_not_min_int);
|
|
}
|
|
|
|
// Sign extend to edx.
|
|
__ cdq();
|
|
__ idiv(right_reg);
|
|
|
|
// Deoptimize if remainder is not 0.
|
|
__ test(edx, Operand(edx));
|
|
DeoptimizeIf(not_zero, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMulI(LMulI* instr) {
|
|
Register left = ToRegister(instr->InputAt(0));
|
|
LOperand* right = instr->InputAt(1);
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
__ mov(ToRegister(instr->TempAt(0)), left);
|
|
}
|
|
|
|
if (right->IsConstantOperand()) {
|
|
// Try strength reductions on the multiplication.
|
|
// All replacement instructions are at most as long as the imul
|
|
// and have better latency.
|
|
int constant = ToInteger32(LConstantOperand::cast(right));
|
|
if (constant == -1) {
|
|
__ neg(left);
|
|
} else if (constant == 0) {
|
|
__ xor_(left, Operand(left));
|
|
} else if (constant == 2) {
|
|
__ add(left, Operand(left));
|
|
} else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
|
// If we know that the multiplication can't overflow, it's safe to
|
|
// use instructions that don't set the overflow flag for the
|
|
// multiplication.
|
|
switch (constant) {
|
|
case 1:
|
|
// Do nothing.
|
|
break;
|
|
case 3:
|
|
__ lea(left, Operand(left, left, times_2, 0));
|
|
break;
|
|
case 4:
|
|
__ shl(left, 2);
|
|
break;
|
|
case 5:
|
|
__ lea(left, Operand(left, left, times_4, 0));
|
|
break;
|
|
case 8:
|
|
__ shl(left, 3);
|
|
break;
|
|
case 9:
|
|
__ lea(left, Operand(left, left, times_8, 0));
|
|
break;
|
|
case 16:
|
|
__ shl(left, 4);
|
|
break;
|
|
default:
|
|
__ imul(left, left, constant);
|
|
break;
|
|
}
|
|
} else {
|
|
__ imul(left, left, constant);
|
|
}
|
|
} else {
|
|
__ imul(left, ToOperand(right));
|
|
}
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
|
DeoptimizeIf(overflow, instr->environment());
|
|
}
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
// Bail out if the result is supposed to be negative zero.
|
|
Label done;
|
|
__ test(left, Operand(left));
|
|
__ j(not_zero, &done, Label::kNear);
|
|
if (right->IsConstantOperand()) {
|
|
if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
|
|
DeoptimizeIf(no_condition, instr->environment());
|
|
}
|
|
} else {
|
|
// Test the non-zero operand for negative sign.
|
|
__ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
|
|
DeoptimizeIf(sign, instr->environment());
|
|
}
|
|
__ bind(&done);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoBitI(LBitI* instr) {
|
|
LOperand* left = instr->InputAt(0);
|
|
LOperand* right = instr->InputAt(1);
|
|
ASSERT(left->Equals(instr->result()));
|
|
ASSERT(left->IsRegister());
|
|
|
|
if (right->IsConstantOperand()) {
|
|
int right_operand = ToInteger32(LConstantOperand::cast(right));
|
|
switch (instr->op()) {
|
|
case Token::BIT_AND:
|
|
__ and_(ToRegister(left), right_operand);
|
|
break;
|
|
case Token::BIT_OR:
|
|
__ or_(ToRegister(left), right_operand);
|
|
break;
|
|
case Token::BIT_XOR:
|
|
__ xor_(ToRegister(left), right_operand);
|
|
break;
|
|
default:
|
|
UNREACHABLE();
|
|
break;
|
|
}
|
|
} else {
|
|
switch (instr->op()) {
|
|
case Token::BIT_AND:
|
|
__ and_(ToRegister(left), ToOperand(right));
|
|
break;
|
|
case Token::BIT_OR:
|
|
__ or_(ToRegister(left), ToOperand(right));
|
|
break;
|
|
case Token::BIT_XOR:
|
|
__ xor_(ToRegister(left), ToOperand(right));
|
|
break;
|
|
default:
|
|
UNREACHABLE();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoShiftI(LShiftI* instr) {
|
|
LOperand* left = instr->InputAt(0);
|
|
LOperand* right = instr->InputAt(1);
|
|
ASSERT(left->Equals(instr->result()));
|
|
ASSERT(left->IsRegister());
|
|
if (right->IsRegister()) {
|
|
ASSERT(ToRegister(right).is(ecx));
|
|
|
|
switch (instr->op()) {
|
|
case Token::SAR:
|
|
__ sar_cl(ToRegister(left));
|
|
break;
|
|
case Token::SHR:
|
|
__ shr_cl(ToRegister(left));
|
|
if (instr->can_deopt()) {
|
|
__ test(ToRegister(left), Immediate(0x80000000));
|
|
DeoptimizeIf(not_zero, instr->environment());
|
|
}
|
|
break;
|
|
case Token::SHL:
|
|
__ shl_cl(ToRegister(left));
|
|
break;
|
|
default:
|
|
UNREACHABLE();
|
|
break;
|
|
}
|
|
} else {
|
|
int value = ToInteger32(LConstantOperand::cast(right));
|
|
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
|
|
switch (instr->op()) {
|
|
case Token::SAR:
|
|
if (shift_count != 0) {
|
|
__ sar(ToRegister(left), shift_count);
|
|
}
|
|
break;
|
|
case Token::SHR:
|
|
if (shift_count == 0 && instr->can_deopt()) {
|
|
__ test(ToRegister(left), Immediate(0x80000000));
|
|
DeoptimizeIf(not_zero, instr->environment());
|
|
} else {
|
|
__ shr(ToRegister(left), shift_count);
|
|
}
|
|
break;
|
|
case Token::SHL:
|
|
if (shift_count != 0) {
|
|
__ shl(ToRegister(left), shift_count);
|
|
}
|
|
break;
|
|
default:
|
|
UNREACHABLE();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoSubI(LSubI* instr) {
|
|
LOperand* left = instr->InputAt(0);
|
|
LOperand* right = instr->InputAt(1);
|
|
ASSERT(left->Equals(instr->result()));
|
|
|
|
if (right->IsConstantOperand()) {
|
|
__ sub(ToOperand(left), ToImmediate(right));
|
|
} else {
|
|
__ sub(ToRegister(left), ToOperand(right));
|
|
}
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
|
DeoptimizeIf(overflow, instr->environment());
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoConstantI(LConstantI* instr) {
|
|
ASSERT(instr->result()->IsRegister());
|
|
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoConstantD(LConstantD* instr) {
|
|
ASSERT(instr->result()->IsDoubleRegister());
|
|
XMMRegister res = ToDoubleRegister(instr->result());
|
|
double v = instr->value();
|
|
// Use xor to produce +0.0 in a fast and compact way, but avoid to
|
|
// do so if the constant is -0.0.
|
|
if (BitCast<uint64_t, double>(v) == 0) {
|
|
__ xorps(res, res);
|
|
} else {
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
uint64_t int_val = BitCast<uint64_t, double>(v);
|
|
int32_t lower = static_cast<int32_t>(int_val);
|
|
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
|
|
if (CpuFeatures::IsSupported(SSE4_1)) {
|
|
CpuFeatures::Scope scope(SSE4_1);
|
|
if (lower != 0) {
|
|
__ Set(temp, Immediate(lower));
|
|
__ movd(res, Operand(temp));
|
|
__ Set(temp, Immediate(upper));
|
|
__ pinsrd(res, Operand(temp), 1);
|
|
} else {
|
|
__ xorps(res, res);
|
|
__ Set(temp, Immediate(upper));
|
|
__ pinsrd(res, Operand(temp), 1);
|
|
}
|
|
} else {
|
|
__ Set(temp, Immediate(upper));
|
|
__ movd(res, Operand(temp));
|
|
__ psllq(res, 32);
|
|
if (lower != 0) {
|
|
__ Set(temp, Immediate(lower));
|
|
__ movd(xmm0, Operand(temp));
|
|
__ por(res, xmm0);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoConstantT(LConstantT* instr) {
|
|
ASSERT(instr->result()->IsRegister());
|
|
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
Register array = ToRegister(instr->InputAt(0));
|
|
__ mov(result, FieldOperand(array, JSArray::kLengthOffset));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
Register array = ToRegister(instr->InputAt(0));
|
|
__ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
Register array = ToRegister(instr->InputAt(0));
|
|
__ mov(result, FieldOperand(array, ExternalArray::kLengthOffset));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoElementsKind(LElementsKind* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
|
|
// Load map into |result|.
|
|
__ mov(result, FieldOperand(input, HeapObject::kMapOffset));
|
|
// Load the map's "bit field 2" into |result|. We only need the first byte,
|
|
// but the following masking takes care of that anyway.
|
|
__ mov(result, FieldOperand(result, Map::kBitField2Offset));
|
|
// Retrieve elements_kind from bit field 2.
|
|
__ and_(result, Map::kElementsKindMask);
|
|
__ shr(result, Map::kElementsKindShift);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoValueOf(LValueOf* instr) {
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
Register result = ToRegister(instr->result());
|
|
Register map = ToRegister(instr->TempAt(0));
|
|
ASSERT(input.is(result));
|
|
Label done;
|
|
// If the object is a smi return the object.
|
|
__ JumpIfSmi(input, &done, Label::kNear);
|
|
|
|
// If the object is not a value type, return the object.
|
|
__ CmpObjectType(input, JS_VALUE_TYPE, map);
|
|
__ j(not_equal, &done, Label::kNear);
|
|
__ mov(result, FieldOperand(input, JSValue::kValueOffset));
|
|
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoBitNotI(LBitNotI* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->Equals(instr->result()));
|
|
__ not_(ToRegister(input));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoThrow(LThrow* instr) {
|
|
__ push(ToOperand(instr->value()));
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
CallRuntime(Runtime::kThrow, 1, instr);
|
|
|
|
if (FLAG_debug_code) {
|
|
Comment("Unreachable code.");
|
|
__ int3();
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoAddI(LAddI* instr) {
|
|
LOperand* left = instr->InputAt(0);
|
|
LOperand* right = instr->InputAt(1);
|
|
ASSERT(left->Equals(instr->result()));
|
|
|
|
if (right->IsConstantOperand()) {
|
|
__ add(ToOperand(left), ToImmediate(right));
|
|
} else {
|
|
__ add(ToRegister(left), ToOperand(right));
|
|
}
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
|
DeoptimizeIf(overflow, instr->environment());
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
|
XMMRegister left = ToDoubleRegister(instr->InputAt(0));
|
|
XMMRegister right = ToDoubleRegister(instr->InputAt(1));
|
|
XMMRegister result = ToDoubleRegister(instr->result());
|
|
// Modulo uses a fixed result register.
|
|
ASSERT(instr->op() == Token::MOD || left.is(result));
|
|
switch (instr->op()) {
|
|
case Token::ADD:
|
|
__ addsd(left, right);
|
|
break;
|
|
case Token::SUB:
|
|
__ subsd(left, right);
|
|
break;
|
|
case Token::MUL:
|
|
__ mulsd(left, right);
|
|
break;
|
|
case Token::DIV:
|
|
__ divsd(left, right);
|
|
break;
|
|
case Token::MOD: {
|
|
// Pass two doubles as arguments on the stack.
|
|
__ PrepareCallCFunction(4, eax);
|
|
__ movdbl(Operand(esp, 0 * kDoubleSize), left);
|
|
__ movdbl(Operand(esp, 1 * kDoubleSize), right);
|
|
__ CallCFunction(
|
|
ExternalReference::double_fp_operation(Token::MOD, isolate()),
|
|
4);
|
|
|
|
// Return value is in st(0) on ia32.
|
|
// Store it into the (fixed) result register.
|
|
__ sub(Operand(esp), Immediate(kDoubleSize));
|
|
__ fstp_d(Operand(esp, 0));
|
|
__ movdbl(result, Operand(esp, 0));
|
|
__ add(Operand(esp), Immediate(kDoubleSize));
|
|
break;
|
|
}
|
|
default:
|
|
UNREACHABLE();
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->left()).is(edx));
|
|
ASSERT(ToRegister(instr->right()).is(eax));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
__ nop(); // Signals no inlined code.
|
|
}
|
|
|
|
|
|
int LCodeGen::GetNextEmittedBlock(int block) {
|
|
for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
|
|
LLabel* label = chunk_->GetLabel(i);
|
|
if (!label->HasReplacement()) return i;
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
|
|
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
|
|
int next_block = GetNextEmittedBlock(current_block_);
|
|
right_block = chunk_->LookupDestination(right_block);
|
|
left_block = chunk_->LookupDestination(left_block);
|
|
|
|
if (right_block == left_block) {
|
|
EmitGoto(left_block);
|
|
} else if (left_block == next_block) {
|
|
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
|
|
} else if (right_block == next_block) {
|
|
__ j(cc, chunk_->GetAssemblyLabel(left_block));
|
|
} else {
|
|
__ j(cc, chunk_->GetAssemblyLabel(left_block));
|
|
__ jmp(chunk_->GetAssemblyLabel(right_block));
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoBranch(LBranch* instr) {
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
Representation r = instr->hydrogen()->value()->representation();
|
|
if (r.IsInteger32()) {
|
|
Register reg = ToRegister(instr->InputAt(0));
|
|
__ test(reg, Operand(reg));
|
|
EmitBranch(true_block, false_block, not_zero);
|
|
} else if (r.IsDouble()) {
|
|
XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
|
|
__ xorps(xmm0, xmm0);
|
|
__ ucomisd(reg, xmm0);
|
|
EmitBranch(true_block, false_block, not_equal);
|
|
} else {
|
|
ASSERT(r.IsTagged());
|
|
Register reg = ToRegister(instr->InputAt(0));
|
|
if (instr->hydrogen()->value()->type().IsBoolean()) {
|
|
__ cmp(reg, factory()->true_value());
|
|
EmitBranch(true_block, false_block, equal);
|
|
} else {
|
|
Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
|
Label* false_label = chunk_->GetAssemblyLabel(false_block);
|
|
|
|
__ cmp(reg, factory()->undefined_value());
|
|
__ j(equal, false_label);
|
|
__ cmp(reg, factory()->true_value());
|
|
__ j(equal, true_label);
|
|
__ cmp(reg, factory()->false_value());
|
|
__ j(equal, false_label);
|
|
__ test(reg, Operand(reg));
|
|
__ j(equal, false_label);
|
|
__ JumpIfSmi(reg, true_label);
|
|
|
|
// Test for double values. Zero is false.
|
|
Label call_stub;
|
|
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
|
factory()->heap_number_map());
|
|
__ j(not_equal, &call_stub, Label::kNear);
|
|
__ fldz();
|
|
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
|
|
__ FCmp();
|
|
__ j(zero, false_label);
|
|
__ jmp(true_label);
|
|
|
|
// The conversion stub doesn't cause garbage collections so it's
|
|
// safe to not record a safepoint after the call.
|
|
__ bind(&call_stub);
|
|
ToBooleanStub stub(eax);
|
|
__ pushad();
|
|
__ push(reg);
|
|
__ CallStub(&stub);
|
|
__ test(eax, Operand(eax));
|
|
__ popad();
|
|
EmitBranch(true_block, false_block, not_zero);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::EmitGoto(int block) {
|
|
block = chunk_->LookupDestination(block);
|
|
int next_block = GetNextEmittedBlock(current_block_);
|
|
if (block != next_block) {
|
|
__ jmp(chunk_->GetAssemblyLabel(block));
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoGoto(LGoto* instr) {
|
|
EmitGoto(instr->block_id());
|
|
}
|
|
|
|
|
|
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
|
|
Condition cond = no_condition;
|
|
switch (op) {
|
|
case Token::EQ:
|
|
case Token::EQ_STRICT:
|
|
cond = equal;
|
|
break;
|
|
case Token::LT:
|
|
cond = is_unsigned ? below : less;
|
|
break;
|
|
case Token::GT:
|
|
cond = is_unsigned ? above : greater;
|
|
break;
|
|
case Token::LTE:
|
|
cond = is_unsigned ? below_equal : less_equal;
|
|
break;
|
|
case Token::GTE:
|
|
cond = is_unsigned ? above_equal : greater_equal;
|
|
break;
|
|
case Token::IN:
|
|
case Token::INSTANCEOF:
|
|
default:
|
|
UNREACHABLE();
|
|
}
|
|
return cond;
|
|
}
|
|
|
|
|
|
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
|
|
if (right->IsConstantOperand()) {
|
|
__ cmp(ToOperand(left), ToImmediate(right));
|
|
} else {
|
|
__ cmp(ToRegister(left), ToOperand(right));
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
|
|
LOperand* left = instr->InputAt(0);
|
|
LOperand* right = instr->InputAt(1);
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
|
|
if (instr->is_double()) {
|
|
// Don't base result on EFLAGS when a NaN is involved. Instead
|
|
// jump to the false block.
|
|
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
|
|
__ j(parity_even, chunk_->GetAssemblyLabel(false_block));
|
|
} else {
|
|
EmitCmpI(left, right);
|
|
}
|
|
|
|
Condition cc = TokenToCondition(instr->op(), instr->is_double());
|
|
EmitBranch(true_block, false_block, cc);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
|
|
Register left = ToRegister(instr->InputAt(0));
|
|
Operand right = ToOperand(instr->InputAt(1));
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
|
|
__ cmp(left, Operand(right));
|
|
EmitBranch(true_block, false_block, equal);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
|
|
Register left = ToRegister(instr->InputAt(0));
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
__ cmp(left, instr->hydrogen()->right());
|
|
EmitBranch(true_block, false_block, equal);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
|
|
Register reg = ToRegister(instr->InputAt(0));
|
|
|
|
// TODO(fsc): If the expression is known to be a smi, then it's
|
|
// definitely not null. Jump to the false block.
|
|
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
__ cmp(reg, factory()->null_value());
|
|
if (instr->is_strict()) {
|
|
EmitBranch(true_block, false_block, equal);
|
|
} else {
|
|
Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
|
Label* false_label = chunk_->GetAssemblyLabel(false_block);
|
|
__ j(equal, true_label);
|
|
__ cmp(reg, factory()->undefined_value());
|
|
__ j(equal, true_label);
|
|
__ JumpIfSmi(reg, false_label);
|
|
// Check for undetectable objects by looking in the bit field in
|
|
// the map. The object has already been smi checked.
|
|
Register scratch = ToRegister(instr->TempAt(0));
|
|
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
|
|
__ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
|
|
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
|
|
EmitBranch(true_block, false_block, not_zero);
|
|
}
|
|
}
|
|
|
|
|
|
Condition LCodeGen::EmitIsObject(Register input,
|
|
Register temp1,
|
|
Label* is_not_object,
|
|
Label* is_object) {
|
|
__ JumpIfSmi(input, is_not_object);
|
|
|
|
__ cmp(input, isolate()->factory()->null_value());
|
|
__ j(equal, is_object);
|
|
|
|
__ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
|
|
// Undetectable objects behave like undefined.
|
|
__ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
|
|
1 << Map::kIsUndetectable);
|
|
__ j(not_zero, is_not_object);
|
|
|
|
__ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
|
|
__ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
|
|
__ j(below, is_not_object);
|
|
__ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
|
|
return below_equal;
|
|
}
|
|
|
|
|
|
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
|
|
Register reg = ToRegister(instr->InputAt(0));
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
|
Label* false_label = chunk_->GetAssemblyLabel(false_block);
|
|
|
|
Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
|
|
|
|
EmitBranch(true_block, false_block, true_cond);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
|
|
Operand input = ToOperand(instr->InputAt(0));
|
|
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
__ test(input, Immediate(kSmiTagMask));
|
|
EmitBranch(true_block, false_block, zero);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
__ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
|
|
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
|
|
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
|
|
1 << Map::kIsUndetectable);
|
|
EmitBranch(true_block, false_block, not_zero);
|
|
}
|
|
|
|
|
|
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
|
|
InstanceType from = instr->from();
|
|
InstanceType to = instr->to();
|
|
if (from == FIRST_TYPE) return to;
|
|
ASSERT(from == to || to == LAST_TYPE);
|
|
return from;
|
|
}
|
|
|
|
|
|
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
|
|
InstanceType from = instr->from();
|
|
InstanceType to = instr->to();
|
|
if (from == to) return equal;
|
|
if (to == LAST_TYPE) return above_equal;
|
|
if (from == FIRST_TYPE) return below_equal;
|
|
UNREACHABLE();
|
|
return equal;
|
|
}
|
|
|
|
|
|
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
Label* false_label = chunk_->GetAssemblyLabel(false_block);
|
|
|
|
__ JumpIfSmi(input, false_label);
|
|
|
|
__ CmpObjectType(input, TestType(instr->hydrogen()), temp);
|
|
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
Register result = ToRegister(instr->result());
|
|
|
|
if (FLAG_debug_code) {
|
|
__ AbortIfNotString(input);
|
|
}
|
|
|
|
__ mov(result, FieldOperand(input, String::kHashFieldOffset));
|
|
__ IndexFromHash(result, result);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoHasCachedArrayIndexAndBranch(
|
|
LHasCachedArrayIndexAndBranch* instr) {
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
__ test(FieldOperand(input, String::kHashFieldOffset),
|
|
Immediate(String::kContainsCachedArrayIndexMask));
|
|
EmitBranch(true_block, false_block, equal);
|
|
}
|
|
|
|
|
|
// Branches to a label or falls through with the answer in the z flag. Trashes
|
|
// the temp registers, but not the input. Only input and temp2 may alias.
|
|
void LCodeGen::EmitClassOfTest(Label* is_true,
|
|
Label* is_false,
|
|
Handle<String>class_name,
|
|
Register input,
|
|
Register temp,
|
|
Register temp2) {
|
|
ASSERT(!input.is(temp));
|
|
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
|
|
__ JumpIfSmi(input, is_false);
|
|
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
|
|
__ j(below, is_false);
|
|
|
|
// Map is now in temp.
|
|
// Functions have class 'Function'.
|
|
__ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
|
|
if (class_name->IsEqualTo(CStrVector("Function"))) {
|
|
__ j(above_equal, is_true);
|
|
} else {
|
|
__ j(above_equal, is_false);
|
|
}
|
|
|
|
// Check if the constructor in the map is a function.
|
|
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
|
|
|
|
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
|
|
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
|
|
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
|
|
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
|
|
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
|
|
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
|
|
|
|
// Objects with a non-function constructor have class 'Object'.
|
|
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
|
|
if (class_name->IsEqualTo(CStrVector("Object"))) {
|
|
__ j(not_equal, is_true);
|
|
} else {
|
|
__ j(not_equal, is_false);
|
|
}
|
|
|
|
// temp now contains the constructor function. Grab the
|
|
// instance class name from there.
|
|
__ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
|
|
__ mov(temp, FieldOperand(temp,
|
|
SharedFunctionInfo::kInstanceClassNameOffset));
|
|
// The class name we are testing against is a symbol because it's a literal.
|
|
// The name in the constructor is a symbol because of the way the context is
|
|
// booted. This routine isn't expected to work for random API-created
|
|
// classes and it doesn't have to because you can't access it with natives
|
|
// syntax. Since both sides are symbols it is sufficient to use an identity
|
|
// comparison.
|
|
__ cmp(temp, class_name);
|
|
// End with the answer in the z flag.
|
|
}
|
|
|
|
|
|
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
Register temp2 = ToRegister(instr->TempAt(1));
|
|
if (input.is(temp)) {
|
|
// Swap.
|
|
Register swapper = temp;
|
|
temp = temp2;
|
|
temp2 = swapper;
|
|
}
|
|
Handle<String> class_name = instr->hydrogen()->class_name();
|
|
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
|
Label* false_label = chunk_->GetAssemblyLabel(false_block);
|
|
|
|
EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
|
|
|
|
EmitBranch(true_block, false_block, equal);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
|
|
Register reg = ToRegister(instr->InputAt(0));
|
|
int true_block = instr->true_block_id();
|
|
int false_block = instr->false_block_id();
|
|
|
|
__ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
|
|
EmitBranch(true_block, false_block, equal);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
|
|
// Object and function are in fixed registers defined by the stub.
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
|
|
Label true_value, done;
|
|
__ test(eax, Operand(eax));
|
|
__ j(zero, &true_value, Label::kNear);
|
|
__ mov(ToRegister(instr->result()), factory()->false_value());
|
|
__ jmp(&done, Label::kNear);
|
|
__ bind(&true_value);
|
|
__ mov(ToRegister(instr->result()), factory()->true_value());
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
|
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
|
|
public:
|
|
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
|
|
LInstanceOfKnownGlobal* instr)
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
|
virtual void Generate() {
|
|
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
|
|
}
|
|
|
|
Label* map_check() { return &map_check_; }
|
|
|
|
private:
|
|
LInstanceOfKnownGlobal* instr_;
|
|
Label map_check_;
|
|
};
|
|
|
|
DeferredInstanceOfKnownGlobal* deferred;
|
|
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
|
|
|
|
Label done, false_result;
|
|
Register object = ToRegister(instr->InputAt(1));
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
|
|
// A Smi is not an instance of anything.
|
|
__ JumpIfSmi(object, &false_result);
|
|
|
|
// This is the inlined call site instanceof cache. The two occurences of the
|
|
// hole value will be patched to the last map/result pair generated by the
|
|
// instanceof stub.
|
|
Label cache_miss;
|
|
Register map = ToRegister(instr->TempAt(0));
|
|
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
|
|
__ bind(deferred->map_check()); // Label for calculating code patching.
|
|
__ cmp(map, factory()->the_hole_value()); // Patched to cached map.
|
|
__ j(not_equal, &cache_miss, Label::kNear);
|
|
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
|
|
__ jmp(&done);
|
|
|
|
// The inlined call site cache did not match. Check for null and string
|
|
// before calling the deferred code.
|
|
__ bind(&cache_miss);
|
|
// Null is not an instance of anything.
|
|
__ cmp(object, factory()->null_value());
|
|
__ j(equal, &false_result);
|
|
|
|
// String values are not instances of anything.
|
|
Condition is_string = masm_->IsObjectStringType(object, temp, temp);
|
|
__ j(is_string, &false_result);
|
|
|
|
// Go to the deferred code.
|
|
__ jmp(deferred->entry());
|
|
|
|
__ bind(&false_result);
|
|
__ mov(ToRegister(instr->result()), factory()->false_value());
|
|
|
|
// Here result has either true or false. Deferred code also produces true or
|
|
// false object.
|
|
__ bind(deferred->exit());
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
|
Label* map_check) {
|
|
PushSafepointRegistersScope scope(this);
|
|
|
|
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
|
|
flags = static_cast<InstanceofStub::Flags>(
|
|
flags | InstanceofStub::kArgsInRegisters);
|
|
flags = static_cast<InstanceofStub::Flags>(
|
|
flags | InstanceofStub::kCallSiteInlineCheck);
|
|
flags = static_cast<InstanceofStub::Flags>(
|
|
flags | InstanceofStub::kReturnTrueFalseObject);
|
|
InstanceofStub stub(flags);
|
|
|
|
// Get the temp register reserved by the instruction. This needs to be a
|
|
// register which is pushed last by PushSafepointRegisters as top of the
|
|
// stack is used to pass the offset to the location of the map check to
|
|
// the stub.
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
|
|
__ mov(InstanceofStub::right(), Immediate(instr->function()));
|
|
static const int kAdditionalDelta = 13;
|
|
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
|
|
__ mov(temp, Immediate(delta));
|
|
__ StoreToSafepointRegisterSlot(temp, temp);
|
|
CallCodeGeneric(stub.GetCode(),
|
|
RelocInfo::CODE_TARGET,
|
|
instr,
|
|
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
|
// Put the result value into the eax slot and restore all registers.
|
|
__ StoreToSafepointRegisterSlot(eax, eax);
|
|
}
|
|
|
|
|
|
static Condition ComputeCompareCondition(Token::Value op) {
|
|
switch (op) {
|
|
case Token::EQ_STRICT:
|
|
case Token::EQ:
|
|
return equal;
|
|
case Token::LT:
|
|
return less;
|
|
case Token::GT:
|
|
return greater;
|
|
case Token::LTE:
|
|
return less_equal;
|
|
case Token::GTE:
|
|
return greater_equal;
|
|
default:
|
|
UNREACHABLE();
|
|
return no_condition;
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCmpT(LCmpT* instr) {
|
|
Token::Value op = instr->op();
|
|
|
|
Handle<Code> ic = CompareIC::GetUninitialized(op);
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
|
|
|
Condition condition = ComputeCompareCondition(op);
|
|
if (op == Token::GT || op == Token::LTE) {
|
|
condition = ReverseCondition(condition);
|
|
}
|
|
Label true_value, done;
|
|
__ test(eax, Operand(eax));
|
|
__ j(condition, &true_value, Label::kNear);
|
|
__ mov(ToRegister(instr->result()), factory()->false_value());
|
|
__ jmp(&done, Label::kNear);
|
|
__ bind(&true_value);
|
|
__ mov(ToRegister(instr->result()), factory()->true_value());
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoReturn(LReturn* instr) {
|
|
if (FLAG_trace) {
|
|
// Preserve the return value on the stack and rely on the runtime call
|
|
// to return the value in the same register. We're leaving the code
|
|
// managed by the register allocator and tearing down the frame, it's
|
|
// safe to write to the context register.
|
|
__ push(eax);
|
|
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
|
__ CallRuntime(Runtime::kTraceExit, 1);
|
|
}
|
|
__ mov(esp, ebp);
|
|
__ pop(ebp);
|
|
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
|
|
if (instr->hydrogen()->check_hole_value()) {
|
|
__ cmp(result, factory()->the_hole_value());
|
|
DeoptimizeIf(equal, instr->environment());
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->global_object()).is(eax));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
__ mov(ecx, instr->name());
|
|
RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
|
|
RelocInfo::CODE_TARGET_CONTEXT;
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
|
CallCode(ic, mode, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
|
|
Register value = ToRegister(instr->InputAt(0));
|
|
Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
|
|
|
|
// If the cell we are storing to contains the hole it could have
|
|
// been deleted from the property dictionary. In that case, we need
|
|
// to update the property details in the property dictionary to mark
|
|
// it as no longer deleted. We deoptimize in that case.
|
|
if (instr->hydrogen()->check_hole_value()) {
|
|
__ cmp(cell_operand, factory()->the_hole_value());
|
|
DeoptimizeIf(equal, instr->environment());
|
|
}
|
|
|
|
// Store the value.
|
|
__ mov(cell_operand, value);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->global_object()).is(edx));
|
|
ASSERT(ToRegister(instr->value()).is(eax));
|
|
|
|
__ mov(ecx, instr->name());
|
|
Handle<Code> ic = instr->strict_mode()
|
|
? isolate()->builtins()->StoreIC_Initialize_Strict()
|
|
: isolate()->builtins()->StoreIC_Initialize();
|
|
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
|
|
Register context = ToRegister(instr->context());
|
|
Register result = ToRegister(instr->result());
|
|
__ mov(result, ContextOperand(context, instr->slot_index()));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
|
Register context = ToRegister(instr->context());
|
|
Register value = ToRegister(instr->value());
|
|
__ mov(ContextOperand(context, instr->slot_index()), value);
|
|
if (instr->needs_write_barrier()) {
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
int offset = Context::SlotOffset(instr->slot_index());
|
|
__ RecordWrite(context, offset, value, temp);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
|
Register object = ToRegister(instr->object());
|
|
Register result = ToRegister(instr->result());
|
|
if (instr->hydrogen()->is_in_object()) {
|
|
__ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
|
|
} else {
|
|
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
|
|
__ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
|
|
Register object,
|
|
Handle<Map> type,
|
|
Handle<String> name) {
|
|
LookupResult lookup;
|
|
type->LookupInDescriptors(NULL, *name, &lookup);
|
|
ASSERT(lookup.IsProperty() &&
|
|
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
|
|
if (lookup.type() == FIELD) {
|
|
int index = lookup.GetLocalFieldIndexFromMap(*type);
|
|
int offset = index * kPointerSize;
|
|
if (index < 0) {
|
|
// Negative property indices are in-object properties, indexed
|
|
// from the end of the fixed part of the object.
|
|
__ mov(result, FieldOperand(object, offset + type->instance_size()));
|
|
} else {
|
|
// Non-negative property indices are in the properties array.
|
|
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
|
|
__ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
|
|
}
|
|
} else {
|
|
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
|
|
LoadHeapObject(result, Handle<HeapObject>::cast(function));
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
|
|
Register object = ToRegister(instr->object());
|
|
Register result = ToRegister(instr->result());
|
|
|
|
int map_count = instr->hydrogen()->types()->length();
|
|
Handle<String> name = instr->hydrogen()->name();
|
|
if (map_count == 0) {
|
|
ASSERT(instr->hydrogen()->need_generic());
|
|
__ mov(ecx, name);
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
|
} else {
|
|
Label done;
|
|
for (int i = 0; i < map_count - 1; ++i) {
|
|
Handle<Map> map = instr->hydrogen()->types()->at(i);
|
|
Label next;
|
|
__ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
|
|
__ j(not_equal, &next, Label::kNear);
|
|
EmitLoadFieldOrConstantFunction(result, object, map, name);
|
|
__ jmp(&done, Label::kNear);
|
|
__ bind(&next);
|
|
}
|
|
Handle<Map> map = instr->hydrogen()->types()->last();
|
|
__ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
|
|
if (instr->hydrogen()->need_generic()) {
|
|
Label generic;
|
|
__ j(not_equal, &generic, Label::kNear);
|
|
EmitLoadFieldOrConstantFunction(result, object, map, name);
|
|
__ jmp(&done, Label::kNear);
|
|
__ bind(&generic);
|
|
__ mov(ecx, name);
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
|
} else {
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
EmitLoadFieldOrConstantFunction(result, object, map, name);
|
|
}
|
|
__ bind(&done);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->object()).is(eax));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
__ mov(ecx, instr->name());
|
|
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
|
|
Register function = ToRegister(instr->function());
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
Register result = ToRegister(instr->result());
|
|
|
|
// Check that the function really is a function.
|
|
__ CmpObjectType(function, JS_FUNCTION_TYPE, result);
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
|
|
// Check whether the function has an instance prototype.
|
|
Label non_instance;
|
|
__ test_b(FieldOperand(result, Map::kBitFieldOffset),
|
|
1 << Map::kHasNonInstancePrototype);
|
|
__ j(not_zero, &non_instance, Label::kNear);
|
|
|
|
// Get the prototype or initial map from the function.
|
|
__ mov(result,
|
|
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
|
// Check that the function has a prototype or an initial map.
|
|
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
|
|
DeoptimizeIf(equal, instr->environment());
|
|
|
|
// If the function does not have an initial map, we're done.
|
|
Label done;
|
|
__ CmpObjectType(result, MAP_TYPE, temp);
|
|
__ j(not_equal, &done, Label::kNear);
|
|
|
|
// Get the prototype from the initial map.
|
|
__ mov(result, FieldOperand(result, Map::kPrototypeOffset));
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
// Non-instance prototype: Fetch prototype from constructor field
|
|
// in the function's map.
|
|
__ bind(&non_instance);
|
|
__ mov(result, FieldOperand(result, Map::kConstructorOffset));
|
|
|
|
// All done.
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadElements(LLoadElements* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
__ mov(result, FieldOperand(input, JSObject::kElementsOffset));
|
|
if (FLAG_debug_code) {
|
|
Label done, ok, fail;
|
|
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
|
|
Immediate(factory()->fixed_array_map()));
|
|
__ j(equal, &done, Label::kNear);
|
|
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
|
|
Immediate(factory()->fixed_cow_array_map()));
|
|
__ j(equal, &done, Label::kNear);
|
|
Register temp((result.is(eax)) ? ebx : eax);
|
|
__ push(temp);
|
|
__ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
|
|
__ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
|
|
__ and_(temp, Map::kElementsKindMask);
|
|
__ shr(temp, Map::kElementsKindShift);
|
|
__ cmp(temp, JSObject::FAST_ELEMENTS);
|
|
__ j(equal, &ok, Label::kNear);
|
|
__ cmp(temp, JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
|
|
__ j(less, &fail, Label::kNear);
|
|
__ cmp(temp, JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
|
|
__ j(less_equal, &ok, Label::kNear);
|
|
__ bind(&fail);
|
|
__ Abort("Check for fast or external elements failed.");
|
|
__ bind(&ok);
|
|
__ pop(temp);
|
|
__ bind(&done);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadExternalArrayPointer(
|
|
LLoadExternalArrayPointer* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
__ mov(result, FieldOperand(input,
|
|
ExternalArray::kExternalPointerOffset));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
|
|
Register arguments = ToRegister(instr->arguments());
|
|
Register length = ToRegister(instr->length());
|
|
Operand index = ToOperand(instr->index());
|
|
Register result = ToRegister(instr->result());
|
|
|
|
__ sub(length, index);
|
|
DeoptimizeIf(below_equal, instr->environment());
|
|
|
|
// There are two words between the frame pointer and the last argument.
|
|
// Subtracting from length accounts for one of them add one more.
|
|
__ mov(result, Operand(arguments, length, times_4, kPointerSize));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
|
|
Register elements = ToRegister(instr->elements());
|
|
Register key = ToRegister(instr->key());
|
|
Register result = ToRegister(instr->result());
|
|
ASSERT(result.is(elements));
|
|
|
|
// Load the result.
|
|
__ mov(result, FieldOperand(elements,
|
|
key,
|
|
times_pointer_size,
|
|
FixedArray::kHeaderSize));
|
|
|
|
// Check for the hole value.
|
|
if (instr->hydrogen()->RequiresHoleCheck()) {
|
|
__ cmp(result, factory()->the_hole_value());
|
|
DeoptimizeIf(equal, instr->environment());
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadKeyedFastDoubleElement(
|
|
LLoadKeyedFastDoubleElement* instr) {
|
|
Register elements = ToRegister(instr->elements());
|
|
XMMRegister result = ToDoubleRegister(instr->result());
|
|
|
|
if (instr->hydrogen()->RequiresHoleCheck()) {
|
|
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
|
|
sizeof(kHoleNanLower32);
|
|
Operand hole_check_operand = BuildFastArrayOperand(
|
|
instr->elements(), instr->key(),
|
|
JSObject::FAST_DOUBLE_ELEMENTS,
|
|
offset);
|
|
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
|
|
DeoptimizeIf(equal, instr->environment());
|
|
}
|
|
|
|
Operand double_load_operand = BuildFastArrayOperand(
|
|
instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS,
|
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
|
|
__ movdbl(result, double_load_operand);
|
|
}
|
|
|
|
|
|
Operand LCodeGen::BuildFastArrayOperand(
|
|
LOperand* external_pointer,
|
|
LOperand* key,
|
|
JSObject::ElementsKind elements_kind,
|
|
uint32_t offset) {
|
|
Register external_pointer_reg = ToRegister(external_pointer);
|
|
int shift_size = ElementsKindToShiftSize(elements_kind);
|
|
if (key->IsConstantOperand()) {
|
|
int constant_value = ToInteger32(LConstantOperand::cast(key));
|
|
if (constant_value & 0xF0000000) {
|
|
Abort("array index constant value too big");
|
|
}
|
|
return Operand(external_pointer_reg,
|
|
constant_value * (1 << shift_size) + offset);
|
|
} else {
|
|
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
|
|
return Operand(external_pointer_reg, ToRegister(key), scale_factor, offset);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
|
|
LLoadKeyedSpecializedArrayElement* instr) {
|
|
JSObject::ElementsKind elements_kind = instr->elements_kind();
|
|
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
|
|
instr->key(), elements_kind, 0));
|
|
if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
|
|
XMMRegister result(ToDoubleRegister(instr->result()));
|
|
__ movss(result, operand);
|
|
__ cvtss2sd(result, result);
|
|
} else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
|
|
__ movdbl(ToDoubleRegister(instr->result()), operand);
|
|
} else {
|
|
Register result(ToRegister(instr->result()));
|
|
switch (elements_kind) {
|
|
case JSObject::EXTERNAL_BYTE_ELEMENTS:
|
|
__ movsx_b(result, operand);
|
|
break;
|
|
case JSObject::EXTERNAL_PIXEL_ELEMENTS:
|
|
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
|
__ movzx_b(result, operand);
|
|
break;
|
|
case JSObject::EXTERNAL_SHORT_ELEMENTS:
|
|
__ movsx_w(result, operand);
|
|
break;
|
|
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
|
__ movzx_w(result, operand);
|
|
break;
|
|
case JSObject::EXTERNAL_INT_ELEMENTS:
|
|
__ mov(result, operand);
|
|
break;
|
|
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
|
__ mov(result, operand);
|
|
__ test(result, Operand(result));
|
|
// TODO(danno): we could be more clever here, perhaps having a special
|
|
// version of the stub that detects if the overflow case actually
|
|
// happens, and generate code that returns a double rather than int.
|
|
DeoptimizeIf(negative, instr->environment());
|
|
break;
|
|
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
|
|
case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
|
|
case JSObject::FAST_ELEMENTS:
|
|
case JSObject::FAST_DOUBLE_ELEMENTS:
|
|
case JSObject::DICTIONARY_ELEMENTS:
|
|
case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
|
|
UNREACHABLE();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->object()).is(edx));
|
|
ASSERT(ToRegister(instr->key()).is(eax));
|
|
|
|
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
|
|
// Check for arguments adapter frame.
|
|
Label done, adapted;
|
|
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
|
__ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
|
|
__ cmp(Operand(result),
|
|
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
__ j(equal, &adapted, Label::kNear);
|
|
|
|
// No arguments adaptor frame.
|
|
__ mov(result, Operand(ebp));
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
// Arguments adaptor frame present.
|
|
__ bind(&adapted);
|
|
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
// Result is the frame pointer for the frame if not adapted and for the real
|
|
// frame below the adaptor frame if adapted.
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
|
|
Operand elem = ToOperand(instr->InputAt(0));
|
|
Register result = ToRegister(instr->result());
|
|
|
|
Label done;
|
|
|
|
// If no arguments adaptor frame the number of arguments is fixed.
|
|
__ cmp(ebp, elem);
|
|
__ mov(result, Immediate(scope()->num_parameters()));
|
|
__ j(equal, &done, Label::kNear);
|
|
|
|
// Arguments adaptor frame present. Get argument length from there.
|
|
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
|
__ mov(result, Operand(result,
|
|
ArgumentsAdaptorFrameConstants::kLengthOffset));
|
|
__ SmiUntag(result);
|
|
|
|
// Argument length is in result register.
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
|
Register receiver = ToRegister(instr->receiver());
|
|
Register function = ToRegister(instr->function());
|
|
Register length = ToRegister(instr->length());
|
|
Register elements = ToRegister(instr->elements());
|
|
Register scratch = ToRegister(instr->TempAt(0));
|
|
ASSERT(receiver.is(eax)); // Used for parameter count.
|
|
ASSERT(function.is(edi)); // Required by InvokeFunction.
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
// If the receiver is null or undefined, we have to pass the global
|
|
// object as a receiver to normal functions. Values have to be
|
|
// passed unchanged to builtins and strict-mode functions.
|
|
Label global_object, receiver_ok;
|
|
|
|
// Do not transform the receiver to object for strict mode
|
|
// functions.
|
|
__ mov(scratch,
|
|
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
|
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
|
|
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
|
|
__ j(not_equal, &receiver_ok, Label::kNear);
|
|
|
|
// Do not transform the receiver to object for builtins.
|
|
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
|
|
1 << SharedFunctionInfo::kNativeBitWithinByte);
|
|
__ j(not_equal, &receiver_ok, Label::kNear);
|
|
|
|
// Normal function. Replace undefined or null with global receiver.
|
|
__ cmp(receiver, factory()->null_value());
|
|
__ j(equal, &global_object, Label::kNear);
|
|
__ cmp(receiver, factory()->undefined_value());
|
|
__ j(equal, &global_object, Label::kNear);
|
|
|
|
// The receiver should be a JS object.
|
|
__ test(receiver, Immediate(kSmiTagMask));
|
|
DeoptimizeIf(equal, instr->environment());
|
|
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
|
|
DeoptimizeIf(below, instr->environment());
|
|
__ jmp(&receiver_ok, Label::kNear);
|
|
|
|
__ bind(&global_object);
|
|
// TODO(kmillikin): We have a hydrogen value for the global object. See
|
|
// if it's better to use it than to explicitly fetch it from the context
|
|
// here.
|
|
__ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
|
|
__ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
|
|
__ mov(receiver,
|
|
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
|
|
__ bind(&receiver_ok);
|
|
|
|
// Copy the arguments to this function possibly from the
|
|
// adaptor frame below it.
|
|
const uint32_t kArgumentsLimit = 1 * KB;
|
|
__ cmp(length, kArgumentsLimit);
|
|
DeoptimizeIf(above, instr->environment());
|
|
|
|
__ push(receiver);
|
|
__ mov(receiver, length);
|
|
|
|
// Loop through the arguments pushing them onto the execution
|
|
// stack.
|
|
Label invoke, loop;
|
|
// length is a small non-negative integer, due to the test above.
|
|
__ test(length, Operand(length));
|
|
__ j(zero, &invoke, Label::kNear);
|
|
__ bind(&loop);
|
|
__ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
|
|
__ dec(length);
|
|
__ j(not_zero, &loop);
|
|
|
|
// Invoke the function.
|
|
__ bind(&invoke);
|
|
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
LEnvironment* env = instr->deoptimization_environment();
|
|
RecordPosition(pointers->position());
|
|
RegisterEnvironmentForDeoptimization(env);
|
|
SafepointGenerator safepoint_generator(this,
|
|
pointers,
|
|
env->deoptimization_index());
|
|
ParameterCount actual(eax);
|
|
__ InvokeFunction(function, actual, CALL_FUNCTION,
|
|
safepoint_generator, CALL_AS_METHOD);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
|
LOperand* argument = instr->InputAt(0);
|
|
if (argument->IsConstantOperand()) {
|
|
__ push(ToImmediate(argument));
|
|
} else {
|
|
__ push(ToOperand(argument));
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoThisFunction(LThisFunction* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
__ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoContext(LContext* instr) {
|
|
Register result = ToRegister(instr->result());
|
|
__ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoOuterContext(LOuterContext* instr) {
|
|
Register context = ToRegister(instr->context());
|
|
Register result = ToRegister(instr->result());
|
|
__ mov(result,
|
|
Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
|
|
Register context = ToRegister(instr->context());
|
|
Register result = ToRegister(instr->result());
|
|
__ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
|
|
Register global = ToRegister(instr->global());
|
|
Register result = ToRegister(instr->result());
|
|
__ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
|
|
}
|
|
|
|
|
|
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
|
int arity,
|
|
LInstruction* instr,
|
|
CallKind call_kind) {
|
|
// Change context if needed.
|
|
bool change_context =
|
|
(info()->closure()->context() != function->context()) ||
|
|
scope()->contains_with() ||
|
|
(scope()->num_heap_slots() > 0);
|
|
if (change_context) {
|
|
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
|
} else {
|
|
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
|
}
|
|
|
|
// Set eax to arguments count if adaption is not needed. Assumes that eax
|
|
// is available to write to at this point.
|
|
if (!function->NeedsArgumentsAdaption()) {
|
|
__ mov(eax, arity);
|
|
}
|
|
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
RecordPosition(pointers->position());
|
|
|
|
// Invoke function.
|
|
__ SetCallKind(ecx, call_kind);
|
|
if (*function == *info()->closure()) {
|
|
__ CallSelf();
|
|
} else {
|
|
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
|
|
}
|
|
|
|
// Setup deoptimization.
|
|
RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
__ mov(edi, instr->function());
|
|
CallKnownFunction(instr->function(),
|
|
instr->arity(),
|
|
instr,
|
|
CALL_AS_METHOD);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
|
|
Register input_reg = ToRegister(instr->value());
|
|
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
|
factory()->heap_number_map());
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
|
|
Label done;
|
|
Register tmp = input_reg.is(eax) ? ecx : eax;
|
|
Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
|
|
|
|
// Preserve the value of all registers.
|
|
PushSafepointRegistersScope scope(this);
|
|
|
|
Label negative;
|
|
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
|
|
// Check the sign of the argument. If the argument is positive, just
|
|
// return it. We do not need to patch the stack since |input| and
|
|
// |result| are the same register and |input| will be restored
|
|
// unchanged by popping safepoint registers.
|
|
__ test(tmp, Immediate(HeapNumber::kSignMask));
|
|
__ j(not_zero, &negative);
|
|
__ jmp(&done);
|
|
|
|
__ bind(&negative);
|
|
|
|
Label allocated, slow;
|
|
__ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
|
|
__ jmp(&allocated);
|
|
|
|
// Slow case: Call the runtime system to do the number allocation.
|
|
__ bind(&slow);
|
|
|
|
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
|
|
instr, instr->context());
|
|
|
|
// Set the pointer to the new heap number in tmp.
|
|
if (!tmp.is(eax)) __ mov(tmp, eax);
|
|
|
|
// Restore input_reg after call to runtime.
|
|
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
|
|
|
|
__ bind(&allocated);
|
|
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
|
|
__ and_(tmp2, ~HeapNumber::kSignMask);
|
|
__ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
|
|
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
|
|
__ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
|
|
__ StoreToSafepointRegisterSlot(input_reg, tmp);
|
|
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
|
|
Register input_reg = ToRegister(instr->value());
|
|
__ test(input_reg, Operand(input_reg));
|
|
Label is_positive;
|
|
__ j(not_sign, &is_positive);
|
|
__ neg(input_reg);
|
|
__ test(input_reg, Operand(input_reg));
|
|
DeoptimizeIf(negative, instr->environment());
|
|
__ bind(&is_positive);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
|
|
// Class for deferred case.
|
|
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
|
|
public:
|
|
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
|
|
LUnaryMathOperation* instr)
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
|
virtual void Generate() {
|
|
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
|
|
}
|
|
private:
|
|
LUnaryMathOperation* instr_;
|
|
};
|
|
|
|
ASSERT(instr->value()->Equals(instr->result()));
|
|
Representation r = instr->hydrogen()->value()->representation();
|
|
|
|
if (r.IsDouble()) {
|
|
XMMRegister scratch = xmm0;
|
|
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
|
__ xorps(scratch, scratch);
|
|
__ subsd(scratch, input_reg);
|
|
__ pand(input_reg, scratch);
|
|
} else if (r.IsInteger32()) {
|
|
EmitIntegerMathAbs(instr);
|
|
} else { // Tagged case.
|
|
DeferredMathAbsTaggedHeapNumber* deferred =
|
|
new DeferredMathAbsTaggedHeapNumber(this, instr);
|
|
Register input_reg = ToRegister(instr->value());
|
|
// Smi check.
|
|
__ JumpIfNotSmi(input_reg, deferred->entry());
|
|
EmitIntegerMathAbs(instr);
|
|
__ bind(deferred->exit());
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
|
|
XMMRegister xmm_scratch = xmm0;
|
|
Register output_reg = ToRegister(instr->result());
|
|
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
|
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
|
|
__ ucomisd(input_reg, xmm_scratch);
|
|
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
DeoptimizeIf(below_equal, instr->environment());
|
|
} else {
|
|
DeoptimizeIf(below, instr->environment());
|
|
}
|
|
|
|
// Use truncating instruction (OK because input is positive).
|
|
__ cvttsd2si(output_reg, Operand(input_reg));
|
|
|
|
// Overflow is signalled with minint.
|
|
__ cmp(output_reg, 0x80000000u);
|
|
DeoptimizeIf(equal, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
|
|
XMMRegister xmm_scratch = xmm0;
|
|
Register output_reg = ToRegister(instr->result());
|
|
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
|
|
|
Label below_half, done;
|
|
// xmm_scratch = 0.5
|
|
ExternalReference one_half = ExternalReference::address_of_one_half();
|
|
__ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
|
|
|
|
__ ucomisd(xmm_scratch, input_reg);
|
|
__ j(above, &below_half);
|
|
// input = input + 0.5
|
|
__ addsd(input_reg, xmm_scratch);
|
|
|
|
|
|
// Compute Math.floor(value + 0.5).
|
|
// Use truncating instruction (OK because input is positive).
|
|
__ cvttsd2si(output_reg, Operand(input_reg));
|
|
|
|
// Overflow is signalled with minint.
|
|
__ cmp(output_reg, 0x80000000u);
|
|
DeoptimizeIf(equal, instr->environment());
|
|
__ jmp(&done);
|
|
|
|
__ bind(&below_half);
|
|
|
|
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
|
|
// we can ignore the difference between a result of -0 and +0.
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
// If the sign is positive, we return +0.
|
|
__ movmskpd(output_reg, input_reg);
|
|
__ test(output_reg, Immediate(1));
|
|
DeoptimizeIf(not_zero, instr->environment());
|
|
} else {
|
|
// If the input is >= -0.5, we return +0.
|
|
__ mov(output_reg, Immediate(0xBF000000));
|
|
__ movd(xmm_scratch, Operand(output_reg));
|
|
__ cvtss2sd(xmm_scratch, xmm_scratch);
|
|
__ ucomisd(input_reg, xmm_scratch);
|
|
DeoptimizeIf(below, instr->environment());
|
|
}
|
|
__ Set(output_reg, Immediate(0));
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
|
|
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
|
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
|
|
__ sqrtsd(input_reg, input_reg);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
|
|
XMMRegister xmm_scratch = xmm0;
|
|
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
|
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
|
|
__ xorps(xmm_scratch, xmm_scratch);
|
|
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
|
|
__ sqrtsd(input_reg, input_reg);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoPower(LPower* instr) {
|
|
LOperand* left = instr->InputAt(0);
|
|
LOperand* right = instr->InputAt(1);
|
|
DoubleRegister result_reg = ToDoubleRegister(instr->result());
|
|
Representation exponent_type = instr->hydrogen()->right()->representation();
|
|
|
|
if (exponent_type.IsDouble()) {
|
|
// It is safe to use ebx directly since the instruction is marked
|
|
// as a call.
|
|
__ PrepareCallCFunction(4, ebx);
|
|
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
|
|
__ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
|
|
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
|
4);
|
|
} else if (exponent_type.IsInteger32()) {
|
|
// It is safe to use ebx directly since the instruction is marked
|
|
// as a call.
|
|
ASSERT(!ToRegister(right).is(ebx));
|
|
__ PrepareCallCFunction(4, ebx);
|
|
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
|
|
__ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
|
|
__ CallCFunction(ExternalReference::power_double_int_function(isolate()),
|
|
4);
|
|
} else {
|
|
ASSERT(exponent_type.IsTagged());
|
|
CpuFeatures::Scope scope(SSE2);
|
|
Register right_reg = ToRegister(right);
|
|
|
|
Label non_smi, call;
|
|
__ JumpIfNotSmi(right_reg, &non_smi);
|
|
__ SmiUntag(right_reg);
|
|
__ cvtsi2sd(result_reg, Operand(right_reg));
|
|
__ jmp(&call);
|
|
|
|
__ bind(&non_smi);
|
|
// It is safe to use ebx directly since the instruction is marked
|
|
// as a call.
|
|
ASSERT(!right_reg.is(ebx));
|
|
__ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
__ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
|
|
|
|
__ bind(&call);
|
|
__ PrepareCallCFunction(4, ebx);
|
|
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
|
|
__ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
|
|
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
|
4);
|
|
}
|
|
|
|
// Return value is in st(0) on ia32.
|
|
// Store it into the (fixed) result register.
|
|
__ sub(Operand(esp), Immediate(kDoubleSize));
|
|
__ fstp_d(Operand(esp, 0));
|
|
__ movdbl(result_reg, Operand(esp, 0));
|
|
__ add(Operand(esp), Immediate(kDoubleSize));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
|
|
ASSERT(instr->value()->Equals(instr->result()));
|
|
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
|
Label positive, done, zero;
|
|
__ xorps(xmm0, xmm0);
|
|
__ ucomisd(input_reg, xmm0);
|
|
__ j(above, &positive, Label::kNear);
|
|
__ j(equal, &zero, Label::kNear);
|
|
ExternalReference nan =
|
|
ExternalReference::address_of_canonical_non_hole_nan();
|
|
__ movdbl(input_reg, Operand::StaticVariable(nan));
|
|
__ jmp(&done, Label::kNear);
|
|
__ bind(&zero);
|
|
__ push(Immediate(0xFFF00000));
|
|
__ push(Immediate(0));
|
|
__ movdbl(input_reg, Operand(esp, 0));
|
|
__ add(Operand(esp), Immediate(kDoubleSize));
|
|
__ jmp(&done, Label::kNear);
|
|
__ bind(&positive);
|
|
__ fldln2();
|
|
__ sub(Operand(esp), Immediate(kDoubleSize));
|
|
__ movdbl(Operand(esp, 0), input_reg);
|
|
__ fld_d(Operand(esp, 0));
|
|
__ fyl2x();
|
|
__ fstp_d(Operand(esp, 0));
|
|
__ movdbl(input_reg, Operand(esp, 0));
|
|
__ add(Operand(esp), Immediate(kDoubleSize));
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
|
|
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
|
TranscendentalCacheStub stub(TranscendentalCache::COS,
|
|
TranscendentalCacheStub::UNTAGGED);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
|
|
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
|
TranscendentalCacheStub stub(TranscendentalCache::SIN,
|
|
TranscendentalCacheStub::UNTAGGED);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
|
|
switch (instr->op()) {
|
|
case kMathAbs:
|
|
DoMathAbs(instr);
|
|
break;
|
|
case kMathFloor:
|
|
DoMathFloor(instr);
|
|
break;
|
|
case kMathRound:
|
|
DoMathRound(instr);
|
|
break;
|
|
case kMathSqrt:
|
|
DoMathSqrt(instr);
|
|
break;
|
|
case kMathPowHalf:
|
|
DoMathPowHalf(instr);
|
|
break;
|
|
case kMathCos:
|
|
DoMathCos(instr);
|
|
break;
|
|
case kMathSin:
|
|
DoMathSin(instr);
|
|
break;
|
|
case kMathLog:
|
|
DoMathLog(instr);
|
|
break;
|
|
|
|
default:
|
|
UNREACHABLE();
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->function()).is(edi));
|
|
ASSERT(instr->HasPointerMap());
|
|
ASSERT(instr->HasDeoptimizationEnvironment());
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
LEnvironment* env = instr->deoptimization_environment();
|
|
RecordPosition(pointers->position());
|
|
RegisterEnvironmentForDeoptimization(env);
|
|
SafepointGenerator generator(this, pointers, env->deoptimization_index());
|
|
ParameterCount count(instr->arity());
|
|
__ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->key()).is(ecx));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
int arity = instr->arity();
|
|
Handle<Code> ic = isolate()->stub_cache()->
|
|
ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallNamed(LCallNamed* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
int arity = instr->arity();
|
|
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
|
|
Handle<Code> ic =
|
|
isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
|
|
__ mov(ecx, instr->name());
|
|
CallCode(ic, mode, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallFunction(LCallFunction* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
int arity = instr->arity();
|
|
CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
__ Drop(1);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
int arity = instr->arity();
|
|
RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
|
|
Handle<Code> ic =
|
|
isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
|
|
__ mov(ecx, instr->name());
|
|
CallCode(ic, mode, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
__ mov(edi, instr->target());
|
|
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallNew(LCallNew* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->constructor()).is(edi));
|
|
ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
|
Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
|
|
__ Set(eax, Immediate(instr->arity()));
|
|
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
|
|
CallRuntime(instr->function(), instr->arity(), instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
|
Register object = ToRegister(instr->object());
|
|
Register value = ToRegister(instr->value());
|
|
int offset = instr->offset();
|
|
|
|
if (!instr->transition().is_null()) {
|
|
__ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
|
|
}
|
|
|
|
// Do the store.
|
|
if (instr->is_in_object()) {
|
|
__ mov(FieldOperand(object, offset), value);
|
|
if (instr->needs_write_barrier()) {
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
// Update the write barrier for the object for in-object properties.
|
|
__ RecordWrite(object, offset, value, temp);
|
|
}
|
|
} else {
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
__ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
|
|
__ mov(FieldOperand(temp, offset), value);
|
|
if (instr->needs_write_barrier()) {
|
|
// Update the write barrier for the properties array.
|
|
// object is used as a scratch register.
|
|
__ RecordWrite(temp, offset, value, object);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->object()).is(edx));
|
|
ASSERT(ToRegister(instr->value()).is(eax));
|
|
|
|
__ mov(ecx, instr->name());
|
|
Handle<Code> ic = instr->strict_mode()
|
|
? isolate()->builtins()->StoreIC_Initialize_Strict()
|
|
: isolate()->builtins()->StoreIC_Initialize();
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
|
|
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
|
|
DeoptimizeIf(above_equal, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
|
|
LStoreKeyedSpecializedArrayElement* instr) {
|
|
JSObject::ElementsKind elements_kind = instr->elements_kind();
|
|
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
|
|
instr->key(), elements_kind, 0));
|
|
if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
|
|
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
|
|
__ movss(operand, xmm0);
|
|
} else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
|
|
__ movdbl(operand, ToDoubleRegister(instr->value()));
|
|
} else {
|
|
Register value = ToRegister(instr->value());
|
|
switch (elements_kind) {
|
|
case JSObject::EXTERNAL_PIXEL_ELEMENTS:
|
|
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
|
case JSObject::EXTERNAL_BYTE_ELEMENTS:
|
|
__ mov_b(operand, value);
|
|
break;
|
|
case JSObject::EXTERNAL_SHORT_ELEMENTS:
|
|
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
|
__ mov_w(operand, value);
|
|
break;
|
|
case JSObject::EXTERNAL_INT_ELEMENTS:
|
|
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
|
__ mov(operand, value);
|
|
break;
|
|
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
|
|
case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
|
|
case JSObject::FAST_ELEMENTS:
|
|
case JSObject::FAST_DOUBLE_ELEMENTS:
|
|
case JSObject::DICTIONARY_ELEMENTS:
|
|
case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
|
|
UNREACHABLE();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
|
|
Register value = ToRegister(instr->value());
|
|
Register elements = ToRegister(instr->object());
|
|
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
|
|
|
|
// Do the store.
|
|
if (instr->key()->IsConstantOperand()) {
|
|
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
|
|
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
|
|
int offset =
|
|
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
|
|
__ mov(FieldOperand(elements, offset), value);
|
|
} else {
|
|
__ mov(FieldOperand(elements,
|
|
key,
|
|
times_pointer_size,
|
|
FixedArray::kHeaderSize),
|
|
value);
|
|
}
|
|
|
|
if (instr->hydrogen()->NeedsWriteBarrier()) {
|
|
// Compute address of modified element and store it into key register.
|
|
__ lea(key,
|
|
FieldOperand(elements,
|
|
key,
|
|
times_pointer_size,
|
|
FixedArray::kHeaderSize));
|
|
__ RecordWrite(elements, key, value);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreKeyedFastDoubleElement(
|
|
LStoreKeyedFastDoubleElement* instr) {
|
|
XMMRegister value = ToDoubleRegister(instr->value());
|
|
Register elements = ToRegister(instr->elements());
|
|
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
|
|
Label have_value;
|
|
|
|
__ ucomisd(value, value);
|
|
__ j(parity_odd, &have_value); // NaN.
|
|
|
|
ExternalReference canonical_nan_reference =
|
|
ExternalReference::address_of_canonical_non_hole_nan();
|
|
__ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
|
|
__ bind(&have_value);
|
|
|
|
Operand double_store_operand = BuildFastArrayOperand(
|
|
instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS,
|
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
|
|
__ movdbl(double_store_operand, value);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
ASSERT(ToRegister(instr->object()).is(edx));
|
|
ASSERT(ToRegister(instr->key()).is(ecx));
|
|
ASSERT(ToRegister(instr->value()).is(eax));
|
|
|
|
Handle<Code> ic = instr->strict_mode()
|
|
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
|
|
: isolate()->builtins()->KeyedStoreIC_Initialize();
|
|
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
|
class DeferredStringCharCodeAt: public LDeferredCode {
|
|
public:
|
|
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
|
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
|
|
private:
|
|
LStringCharCodeAt* instr_;
|
|
};
|
|
|
|
Register string = ToRegister(instr->string());
|
|
Register index = no_reg;
|
|
int const_index = -1;
|
|
if (instr->index()->IsConstantOperand()) {
|
|
const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
|
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
|
|
if (!Smi::IsValid(const_index)) {
|
|
// Guaranteed to be out of bounds because of the assert above.
|
|
// So the bounds check that must dominate this instruction must
|
|
// have deoptimized already.
|
|
if (FLAG_debug_code) {
|
|
__ Abort("StringCharCodeAt: out of bounds index.");
|
|
}
|
|
// No code needs to be generated.
|
|
return;
|
|
}
|
|
} else {
|
|
index = ToRegister(instr->index());
|
|
}
|
|
Register result = ToRegister(instr->result());
|
|
|
|
DeferredStringCharCodeAt* deferred =
|
|
new DeferredStringCharCodeAt(this, instr);
|
|
|
|
Label flat_string, ascii_string, done;
|
|
|
|
// Fetch the instance type of the receiver into result register.
|
|
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
|
|
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
|
|
|
|
// We need special handling for non-flat strings.
|
|
STATIC_ASSERT(kSeqStringTag == 0);
|
|
__ test(result, Immediate(kStringRepresentationMask));
|
|
__ j(zero, &flat_string, Label::kNear);
|
|
|
|
// Handle non-flat strings.
|
|
__ test(result, Immediate(kIsConsStringMask));
|
|
__ j(zero, deferred->entry());
|
|
|
|
// ConsString.
|
|
// Check whether the right hand side is the empty string (i.e. if
|
|
// this is really a flat string in a cons string). If that is not
|
|
// the case we would rather go to the runtime system now to flatten
|
|
// the string.
|
|
__ cmp(FieldOperand(string, ConsString::kSecondOffset),
|
|
Immediate(factory()->empty_string()));
|
|
__ j(not_equal, deferred->entry());
|
|
// Get the first of the two strings and load its instance type.
|
|
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
|
|
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
|
|
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
|
|
// If the first cons component is also non-flat, then go to runtime.
|
|
STATIC_ASSERT(kSeqStringTag == 0);
|
|
__ test(result, Immediate(kStringRepresentationMask));
|
|
__ j(not_zero, deferred->entry());
|
|
|
|
// Check for ASCII or two-byte string.
|
|
__ bind(&flat_string);
|
|
STATIC_ASSERT(kAsciiStringTag != 0);
|
|
__ test(result, Immediate(kStringEncodingMask));
|
|
__ j(not_zero, &ascii_string, Label::kNear);
|
|
|
|
// Two-byte string.
|
|
// Load the two-byte character code into the result register.
|
|
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
|
if (instr->index()->IsConstantOperand()) {
|
|
__ movzx_w(result,
|
|
FieldOperand(string,
|
|
SeqTwoByteString::kHeaderSize +
|
|
(kUC16Size * const_index)));
|
|
} else {
|
|
__ movzx_w(result, FieldOperand(string,
|
|
index,
|
|
times_2,
|
|
SeqTwoByteString::kHeaderSize));
|
|
}
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
// ASCII string.
|
|
// Load the byte into the result register.
|
|
__ bind(&ascii_string);
|
|
if (instr->index()->IsConstantOperand()) {
|
|
__ movzx_b(result, FieldOperand(string,
|
|
SeqAsciiString::kHeaderSize + const_index));
|
|
} else {
|
|
__ movzx_b(result, FieldOperand(string,
|
|
index,
|
|
times_1,
|
|
SeqAsciiString::kHeaderSize));
|
|
}
|
|
__ bind(&done);
|
|
__ bind(deferred->exit());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
|
Register string = ToRegister(instr->string());
|
|
Register result = ToRegister(instr->result());
|
|
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
// result register contain a valid pointer because it is already
|
|
// contained in the register pointer map.
|
|
__ Set(result, Immediate(0));
|
|
|
|
PushSafepointRegistersScope scope(this);
|
|
__ push(string);
|
|
// Push the index as a smi. This is safe because of the checks in
|
|
// DoStringCharCodeAt above.
|
|
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
|
|
if (instr->index()->IsConstantOperand()) {
|
|
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
|
__ push(Immediate(Smi::FromInt(const_index)));
|
|
} else {
|
|
Register index = ToRegister(instr->index());
|
|
__ SmiTag(index);
|
|
__ push(index);
|
|
}
|
|
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
|
|
instr, instr->context());
|
|
if (FLAG_debug_code) {
|
|
__ AbortIfNotSmi(eax);
|
|
}
|
|
__ SmiUntag(eax);
|
|
__ StoreToSafepointRegisterSlot(result, eax);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
|
|
class DeferredStringCharFromCode: public LDeferredCode {
|
|
public:
|
|
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
|
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
|
|
private:
|
|
LStringCharFromCode* instr_;
|
|
};
|
|
|
|
DeferredStringCharFromCode* deferred =
|
|
new DeferredStringCharFromCode(this, instr);
|
|
|
|
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
|
|
Register char_code = ToRegister(instr->char_code());
|
|
Register result = ToRegister(instr->result());
|
|
ASSERT(!char_code.is(result));
|
|
|
|
__ cmp(char_code, String::kMaxAsciiCharCode);
|
|
__ j(above, deferred->entry());
|
|
__ Set(result, Immediate(factory()->single_character_string_cache()));
|
|
__ mov(result, FieldOperand(result,
|
|
char_code, times_pointer_size,
|
|
FixedArray::kHeaderSize));
|
|
__ cmp(result, factory()->undefined_value());
|
|
__ j(equal, deferred->entry());
|
|
__ bind(deferred->exit());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
|
|
Register char_code = ToRegister(instr->char_code());
|
|
Register result = ToRegister(instr->result());
|
|
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
// result register contain a valid pointer because it is already
|
|
// contained in the register pointer map.
|
|
__ Set(result, Immediate(0));
|
|
|
|
PushSafepointRegistersScope scope(this);
|
|
__ SmiTag(char_code);
|
|
__ push(char_code);
|
|
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
|
|
__ StoreToSafepointRegisterSlot(result, eax);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStringLength(LStringLength* instr) {
|
|
Register string = ToRegister(instr->string());
|
|
Register result = ToRegister(instr->result());
|
|
__ mov(result, FieldOperand(string, String::kLengthOffset));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStringAdd(LStringAdd* instr) {
|
|
if (instr->left()->IsConstantOperand()) {
|
|
__ push(ToImmediate(instr->left()));
|
|
} else {
|
|
__ push(ToOperand(instr->left()));
|
|
}
|
|
if (instr->right()->IsConstantOperand()) {
|
|
__ push(ToImmediate(instr->right()));
|
|
} else {
|
|
__ push(ToOperand(instr->right()));
|
|
}
|
|
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->IsRegister() || input->IsStackSlot());
|
|
LOperand* output = instr->result();
|
|
ASSERT(output->IsDoubleRegister());
|
|
__ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
|
class DeferredNumberTagI: public LDeferredCode {
|
|
public:
|
|
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
|
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
|
|
private:
|
|
LNumberTagI* instr_;
|
|
};
|
|
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
|
Register reg = ToRegister(input);
|
|
|
|
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
|
|
__ SmiTag(reg);
|
|
__ j(overflow, deferred->entry());
|
|
__ bind(deferred->exit());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
|
Label slow;
|
|
Register reg = ToRegister(instr->InputAt(0));
|
|
Register tmp = reg.is(eax) ? ecx : eax;
|
|
|
|
// Preserve the value of all registers.
|
|
PushSafepointRegistersScope scope(this);
|
|
|
|
// There was overflow, so bits 30 and 31 of the original integer
|
|
// disagree. Try to allocate a heap number in new space and store
|
|
// the value in there. If that fails, call the runtime system.
|
|
Label done;
|
|
__ SmiUntag(reg);
|
|
__ xor_(reg, 0x80000000);
|
|
__ cvtsi2sd(xmm0, Operand(reg));
|
|
if (FLAG_inline_new) {
|
|
__ AllocateHeapNumber(reg, tmp, no_reg, &slow);
|
|
__ jmp(&done, Label::kNear);
|
|
}
|
|
|
|
// Slow case: Call the runtime system to do the number allocation.
|
|
__ bind(&slow);
|
|
|
|
// TODO(3095996): Put a valid pointer value in the stack slot where the result
|
|
// register is stored, as this register is in the pointer map, but contains an
|
|
// integer value.
|
|
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
|
|
// NumberTagI and NumberTagD use the context from the frame, rather than
|
|
// the environment's HContext or HInlinedContext value.
|
|
// They only call Runtime::kAllocateHeapNumber.
|
|
// The corresponding HChange instructions are added in a phase that does
|
|
// not have easy access to the local context.
|
|
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
|
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
|
RecordSafepointWithRegisters(
|
|
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
|
if (!reg.is(eax)) __ mov(reg, eax);
|
|
|
|
// Done. Put the value in xmm0 into the value of the allocated heap
|
|
// number.
|
|
__ bind(&done);
|
|
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
|
|
__ StoreToSafepointRegisterSlot(reg, reg);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
|
class DeferredNumberTagD: public LDeferredCode {
|
|
public:
|
|
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
|
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
|
|
private:
|
|
LNumberTagD* instr_;
|
|
};
|
|
|
|
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
|
Register reg = ToRegister(instr->result());
|
|
Register tmp = ToRegister(instr->TempAt(0));
|
|
|
|
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
|
|
if (FLAG_inline_new) {
|
|
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
|
|
} else {
|
|
__ jmp(deferred->entry());
|
|
}
|
|
__ bind(deferred->exit());
|
|
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
|
// TODO(3095996): Get rid of this. For now, we need to make the
|
|
// result register contain a valid pointer because it is already
|
|
// contained in the register pointer map.
|
|
Register reg = ToRegister(instr->result());
|
|
__ Set(reg, Immediate(0));
|
|
|
|
PushSafepointRegistersScope scope(this);
|
|
// NumberTagI and NumberTagD use the context from the frame, rather than
|
|
// the environment's HContext or HInlinedContext value.
|
|
// They only call Runtime::kAllocateHeapNumber.
|
|
// The corresponding HChange instructions are added in a phase that does
|
|
// not have easy access to the local context.
|
|
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
|
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
|
RecordSafepointWithRegisters(instr->pointer_map(), 0,
|
|
Safepoint::kNoDeoptimizationIndex);
|
|
__ StoreToSafepointRegisterSlot(reg, eax);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
|
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
|
|
__ SmiTag(ToRegister(input));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
|
if (instr->needs_check()) {
|
|
__ test(ToRegister(input), Immediate(kSmiTagMask));
|
|
DeoptimizeIf(not_zero, instr->environment());
|
|
}
|
|
__ SmiUntag(ToRegister(input));
|
|
}
|
|
|
|
|
|
void LCodeGen::EmitNumberUntagD(Register input_reg,
|
|
XMMRegister result_reg,
|
|
bool deoptimize_on_undefined,
|
|
LEnvironment* env) {
|
|
Label load_smi, done;
|
|
|
|
// Smi check.
|
|
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
|
|
|
|
// Heap number map check.
|
|
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
|
factory()->heap_number_map());
|
|
if (deoptimize_on_undefined) {
|
|
DeoptimizeIf(not_equal, env);
|
|
} else {
|
|
Label heap_number;
|
|
__ j(equal, &heap_number, Label::kNear);
|
|
|
|
__ cmp(input_reg, factory()->undefined_value());
|
|
DeoptimizeIf(not_equal, env);
|
|
|
|
// Convert undefined to NaN.
|
|
ExternalReference nan =
|
|
ExternalReference::address_of_canonical_non_hole_nan();
|
|
__ movdbl(result_reg, Operand::StaticVariable(nan));
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
__ bind(&heap_number);
|
|
}
|
|
// Heap number to XMM conversion.
|
|
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
// Smi to XMM conversion
|
|
__ bind(&load_smi);
|
|
__ SmiUntag(input_reg); // Untag smi before converting to float.
|
|
__ cvtsi2sd(result_reg, Operand(input_reg));
|
|
__ SmiTag(input_reg); // Retag smi.
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
class DeferredTaggedToI: public LDeferredCode {
|
|
public:
|
|
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
|
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
|
|
private:
|
|
LTaggedToI* instr_;
|
|
};
|
|
|
|
|
|
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
|
Label done, heap_number;
|
|
Register input_reg = ToRegister(instr->InputAt(0));
|
|
|
|
// Heap number map check.
|
|
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
|
factory()->heap_number_map());
|
|
|
|
if (instr->truncating()) {
|
|
__ j(equal, &heap_number, Label::kNear);
|
|
// Check for undefined. Undefined is converted to zero for truncating
|
|
// conversions.
|
|
__ cmp(input_reg, factory()->undefined_value());
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
__ mov(input_reg, 0);
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
__ bind(&heap_number);
|
|
if (CpuFeatures::IsSupported(SSE3)) {
|
|
CpuFeatures::Scope scope(SSE3);
|
|
Label convert;
|
|
// Use more powerful conversion when sse3 is available.
|
|
// Load x87 register with heap number.
|
|
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
// Get exponent alone and check for too-big exponent.
|
|
__ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
|
|
__ and_(input_reg, HeapNumber::kExponentMask);
|
|
const uint32_t kTooBigExponent =
|
|
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
|
|
__ cmp(Operand(input_reg), Immediate(kTooBigExponent));
|
|
__ j(less, &convert, Label::kNear);
|
|
// Pop FPU stack before deoptimizing.
|
|
__ ffree(0);
|
|
__ fincstp();
|
|
DeoptimizeIf(no_condition, instr->environment());
|
|
|
|
// Reserve space for 64 bit answer.
|
|
__ bind(&convert);
|
|
__ sub(Operand(esp), Immediate(kDoubleSize));
|
|
// Do conversion, which cannot fail because we checked the exponent.
|
|
__ fisttp_d(Operand(esp, 0));
|
|
__ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
|
|
__ add(Operand(esp), Immediate(kDoubleSize));
|
|
} else {
|
|
XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
|
|
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
__ cvttsd2si(input_reg, Operand(xmm0));
|
|
__ cmp(input_reg, 0x80000000u);
|
|
__ j(not_equal, &done);
|
|
// Check if the input was 0x8000000 (kMinInt).
|
|
// If no, then we got an overflow and we deoptimize.
|
|
ExternalReference min_int = ExternalReference::address_of_min_int();
|
|
__ movdbl(xmm_temp, Operand::StaticVariable(min_int));
|
|
__ ucomisd(xmm_temp, xmm0);
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
DeoptimizeIf(parity_even, instr->environment()); // NaN.
|
|
}
|
|
} else {
|
|
// Deoptimize if we don't have a heap number.
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
|
|
XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
|
|
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
__ cvttsd2si(input_reg, Operand(xmm0));
|
|
__ cvtsi2sd(xmm_temp, Operand(input_reg));
|
|
__ ucomisd(xmm0, xmm_temp);
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
DeoptimizeIf(parity_even, instr->environment()); // NaN.
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
__ test(input_reg, Operand(input_reg));
|
|
__ j(not_zero, &done);
|
|
__ movmskpd(input_reg, xmm0);
|
|
__ and_(input_reg, 1);
|
|
DeoptimizeIf(not_zero, instr->environment());
|
|
}
|
|
}
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->IsRegister());
|
|
ASSERT(input->Equals(instr->result()));
|
|
|
|
Register input_reg = ToRegister(input);
|
|
|
|
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
|
|
|
|
// Smi check.
|
|
__ JumpIfNotSmi(input_reg, deferred->entry());
|
|
|
|
// Smi to int32 conversion
|
|
__ SmiUntag(input_reg); // Untag smi.
|
|
|
|
__ bind(deferred->exit());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->IsRegister());
|
|
LOperand* result = instr->result();
|
|
ASSERT(result->IsDoubleRegister());
|
|
|
|
Register input_reg = ToRegister(input);
|
|
XMMRegister result_reg = ToDoubleRegister(result);
|
|
|
|
EmitNumberUntagD(input_reg, result_reg,
|
|
instr->hydrogen()->deoptimize_on_undefined(),
|
|
instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->IsDoubleRegister());
|
|
LOperand* result = instr->result();
|
|
ASSERT(result->IsRegister());
|
|
|
|
XMMRegister input_reg = ToDoubleRegister(input);
|
|
Register result_reg = ToRegister(result);
|
|
|
|
if (instr->truncating()) {
|
|
// Performs a truncating conversion of a floating point number as used by
|
|
// the JS bitwise operations.
|
|
__ cvttsd2si(result_reg, Operand(input_reg));
|
|
__ cmp(result_reg, 0x80000000u);
|
|
if (CpuFeatures::IsSupported(SSE3)) {
|
|
// This will deoptimize if the exponent of the input in out of range.
|
|
CpuFeatures::Scope scope(SSE3);
|
|
Label convert, done;
|
|
__ j(not_equal, &done, Label::kNear);
|
|
__ sub(Operand(esp), Immediate(kDoubleSize));
|
|
__ movdbl(Operand(esp, 0), input_reg);
|
|
// Get exponent alone and check for too-big exponent.
|
|
__ mov(result_reg, Operand(esp, sizeof(int32_t)));
|
|
__ and_(result_reg, HeapNumber::kExponentMask);
|
|
const uint32_t kTooBigExponent =
|
|
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
|
|
__ cmp(Operand(result_reg), Immediate(kTooBigExponent));
|
|
__ j(less, &convert, Label::kNear);
|
|
__ add(Operand(esp), Immediate(kDoubleSize));
|
|
DeoptimizeIf(no_condition, instr->environment());
|
|
__ bind(&convert);
|
|
// Do conversion, which cannot fail because we checked the exponent.
|
|
__ fld_d(Operand(esp, 0));
|
|
__ fisttp_d(Operand(esp, 0));
|
|
__ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
|
|
__ add(Operand(esp), Immediate(kDoubleSize));
|
|
__ bind(&done);
|
|
} else {
|
|
Label done;
|
|
Register temp_reg = ToRegister(instr->TempAt(0));
|
|
XMMRegister xmm_scratch = xmm0;
|
|
|
|
// If cvttsd2si succeeded, we're done. Otherwise, we attempt
|
|
// manual conversion.
|
|
__ j(not_equal, &done, Label::kNear);
|
|
|
|
// Get high 32 bits of the input in result_reg and temp_reg.
|
|
__ pshufd(xmm_scratch, input_reg, 1);
|
|
__ movd(Operand(temp_reg), xmm_scratch);
|
|
__ mov(result_reg, temp_reg);
|
|
|
|
// Prepare negation mask in temp_reg.
|
|
__ sar(temp_reg, kBitsPerInt - 1);
|
|
|
|
// Extract the exponent from result_reg and subtract adjusted
|
|
// bias from it. The adjustment is selected in a way such that
|
|
// when the difference is zero, the answer is in the low 32 bits
|
|
// of the input, otherwise a shift has to be performed.
|
|
__ shr(result_reg, HeapNumber::kExponentShift);
|
|
__ and_(result_reg,
|
|
HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
|
|
__ sub(Operand(result_reg),
|
|
Immediate(HeapNumber::kExponentBias +
|
|
HeapNumber::kExponentBits +
|
|
HeapNumber::kMantissaBits));
|
|
// Don't handle big (> kMantissaBits + kExponentBits == 63) or
|
|
// special exponents.
|
|
DeoptimizeIf(greater, instr->environment());
|
|
|
|
// Zero out the sign and the exponent in the input (by shifting
|
|
// it to the left) and restore the implicit mantissa bit,
|
|
// i.e. convert the input to unsigned int64 shifted left by
|
|
// kExponentBits.
|
|
ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
|
|
// Minus zero has the most significant bit set and the other
|
|
// bits cleared.
|
|
__ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
|
|
__ psllq(input_reg, HeapNumber::kExponentBits);
|
|
__ por(input_reg, xmm_scratch);
|
|
|
|
// Get the amount to shift the input right in xmm_scratch.
|
|
__ neg(result_reg);
|
|
__ movd(xmm_scratch, Operand(result_reg));
|
|
|
|
// Shift the input right and extract low 32 bits.
|
|
__ psrlq(input_reg, xmm_scratch);
|
|
__ movd(Operand(result_reg), input_reg);
|
|
|
|
// Use the prepared mask in temp_reg to negate the result if necessary.
|
|
__ xor_(result_reg, Operand(temp_reg));
|
|
__ sub(result_reg, Operand(temp_reg));
|
|
__ bind(&done);
|
|
}
|
|
} else {
|
|
Label done;
|
|
__ cvttsd2si(result_reg, Operand(input_reg));
|
|
__ cvtsi2sd(xmm0, Operand(result_reg));
|
|
__ ucomisd(xmm0, input_reg);
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
DeoptimizeIf(parity_even, instr->environment()); // NaN.
|
|
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
|
// The integer converted back is equal to the original. We
|
|
// only have to test if we got -0 as an input.
|
|
__ test(result_reg, Operand(result_reg));
|
|
__ j(not_zero, &done, Label::kNear);
|
|
__ movmskpd(result_reg, input_reg);
|
|
// Bit 0 contains the sign of the double in input_reg.
|
|
// If input was positive, we are ok and return 0, otherwise
|
|
// deoptimize.
|
|
__ and_(result_reg, 1);
|
|
DeoptimizeIf(not_zero, instr->environment());
|
|
}
|
|
__ bind(&done);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
__ test(ToOperand(input), Immediate(kSmiTagMask));
|
|
DeoptimizeIf(not_zero, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
__ test(ToOperand(input), Immediate(kSmiTagMask));
|
|
DeoptimizeIf(zero, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
|
|
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
|
|
|
|
if (instr->hydrogen()->is_interval_check()) {
|
|
InstanceType first;
|
|
InstanceType last;
|
|
instr->hydrogen()->GetCheckInterval(&first, &last);
|
|
|
|
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
|
|
static_cast<int8_t>(first));
|
|
|
|
// If there is only one type in the interval check for equality.
|
|
if (first == last) {
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
} else {
|
|
DeoptimizeIf(below, instr->environment());
|
|
// Omit check for the last type.
|
|
if (last != LAST_TYPE) {
|
|
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
|
|
static_cast<int8_t>(last));
|
|
DeoptimizeIf(above, instr->environment());
|
|
}
|
|
}
|
|
} else {
|
|
uint8_t mask;
|
|
uint8_t tag;
|
|
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
|
|
|
|
if (IsPowerOf2(mask)) {
|
|
ASSERT(tag == 0 || IsPowerOf2(tag));
|
|
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
|
|
DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
|
|
} else {
|
|
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
|
|
__ and_(temp, mask);
|
|
__ cmpb(Operand(temp), tag);
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
|
|
ASSERT(instr->InputAt(0)->IsRegister());
|
|
Operand operand = ToOperand(instr->InputAt(0));
|
|
__ cmp(operand, instr->hydrogen()->target());
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCheckMap(LCheckMap* instr) {
|
|
LOperand* input = instr->InputAt(0);
|
|
ASSERT(input->IsRegister());
|
|
Register reg = ToRegister(input);
|
|
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
|
instr->hydrogen()->map());
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
|
|
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
|
|
Register result_reg = ToRegister(instr->result());
|
|
__ ClampDoubleToUint8(value_reg, xmm0, result_reg);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
|
|
ASSERT(instr->unclamped()->Equals(instr->result()));
|
|
Register value_reg = ToRegister(instr->result());
|
|
__ ClampUint8(value_reg);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
|
ASSERT(instr->unclamped()->Equals(instr->result()));
|
|
Register input_reg = ToRegister(instr->unclamped());
|
|
Label is_smi, done, heap_number;
|
|
|
|
__ JumpIfSmi(input_reg, &is_smi);
|
|
|
|
// Check for heap number
|
|
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
|
factory()->heap_number_map());
|
|
__ j(equal, &heap_number, Label::kNear);
|
|
|
|
// Check for undefined. Undefined is converted to zero for clamping
|
|
// conversions.
|
|
__ cmp(input_reg, factory()->undefined_value());
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
__ mov(input_reg, 0);
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
// Heap number
|
|
__ bind(&heap_number);
|
|
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
__ ClampDoubleToUint8(xmm0, xmm1, input_reg);
|
|
__ jmp(&done, Label::kNear);
|
|
|
|
// smi
|
|
__ bind(&is_smi);
|
|
__ SmiUntag(input_reg);
|
|
__ ClampUint8(input_reg);
|
|
|
|
__ bind(&done);
|
|
}
|
|
|
|
|
|
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
|
|
if (isolate()->heap()->InNewSpace(*object)) {
|
|
Handle<JSGlobalPropertyCell> cell =
|
|
isolate()->factory()->NewJSGlobalPropertyCell(object);
|
|
__ mov(result, Operand::Cell(cell));
|
|
} else {
|
|
__ mov(result, object);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
|
|
Register reg = ToRegister(instr->TempAt(0));
|
|
|
|
Handle<JSObject> holder = instr->holder();
|
|
Handle<JSObject> current_prototype = instr->prototype();
|
|
|
|
// Load prototype object.
|
|
LoadHeapObject(reg, current_prototype);
|
|
|
|
// Check prototype maps up to the holder.
|
|
while (!current_prototype.is_identical_to(holder)) {
|
|
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
|
Handle<Map>(current_prototype->map()));
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
current_prototype =
|
|
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
|
|
// Load next prototype object.
|
|
LoadHeapObject(reg, current_prototype);
|
|
}
|
|
|
|
// Check the holder map.
|
|
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
|
Handle<Map>(current_prototype->map()));
|
|
DeoptimizeIf(not_equal, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
// Setup the parameters to the stub/runtime call.
|
|
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
|
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
|
|
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
|
|
__ push(Immediate(instr->hydrogen()->constant_elements()));
|
|
|
|
// Pick the right runtime function or stub to call.
|
|
int length = instr->hydrogen()->length();
|
|
if (instr->hydrogen()->IsCopyOnWrite()) {
|
|
ASSERT(instr->hydrogen()->depth() == 1);
|
|
FastCloneShallowArrayStub::Mode mode =
|
|
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
|
|
FastCloneShallowArrayStub stub(mode, length);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
} else if (instr->hydrogen()->depth() > 1) {
|
|
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
|
|
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
|
|
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
|
|
} else {
|
|
FastCloneShallowArrayStub::Mode mode =
|
|
FastCloneShallowArrayStub::CLONE_ELEMENTS;
|
|
FastCloneShallowArrayStub stub(mode, length);
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
// Setup the parameters to the stub/runtime call.
|
|
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
|
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
|
|
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
|
|
__ push(Immediate(instr->hydrogen()->constant_properties()));
|
|
int flags = instr->hydrogen()->fast_elements()
|
|
? ObjectLiteral::kFastElements
|
|
: ObjectLiteral::kNoFlags;
|
|
flags |= instr->hydrogen()->has_function()
|
|
? ObjectLiteral::kHasFunction
|
|
: ObjectLiteral::kNoFlags;
|
|
__ push(Immediate(Smi::FromInt(flags)));
|
|
|
|
// Pick the right runtime function to call.
|
|
if (instr->hydrogen()->depth() > 1) {
|
|
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
|
|
} else {
|
|
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
|
|
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
|
|
__ push(eax);
|
|
CallRuntime(Runtime::kToFastProperties, 1, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
Label materialized;
|
|
// Registers will be used as follows:
|
|
// edi = JS function.
|
|
// ecx = literals array.
|
|
// ebx = regexp literal.
|
|
// eax = regexp literal clone.
|
|
// esi = context.
|
|
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
|
__ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
|
|
int literal_offset = FixedArray::kHeaderSize +
|
|
instr->hydrogen()->literal_index() * kPointerSize;
|
|
__ mov(ebx, FieldOperand(ecx, literal_offset));
|
|
__ cmp(ebx, factory()->undefined_value());
|
|
__ j(not_equal, &materialized, Label::kNear);
|
|
|
|
// Create regexp literal using runtime function
|
|
// Result will be in eax.
|
|
__ push(ecx);
|
|
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
|
|
__ push(Immediate(instr->hydrogen()->pattern()));
|
|
__ push(Immediate(instr->hydrogen()->flags()));
|
|
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
|
|
__ mov(ebx, eax);
|
|
|
|
__ bind(&materialized);
|
|
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
|
|
Label allocated, runtime_allocate;
|
|
__ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
|
|
__ jmp(&allocated);
|
|
|
|
__ bind(&runtime_allocate);
|
|
__ push(ebx);
|
|
__ push(Immediate(Smi::FromInt(size)));
|
|
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
|
|
__ pop(ebx);
|
|
|
|
__ bind(&allocated);
|
|
// Copy the content into the newly allocated memory.
|
|
// (Unroll copy loop once for better throughput).
|
|
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
|
|
__ mov(edx, FieldOperand(ebx, i));
|
|
__ mov(ecx, FieldOperand(ebx, i + kPointerSize));
|
|
__ mov(FieldOperand(eax, i), edx);
|
|
__ mov(FieldOperand(eax, i + kPointerSize), ecx);
|
|
}
|
|
if ((size % (2 * kPointerSize)) != 0) {
|
|
__ mov(edx, FieldOperand(ebx, size - kPointerSize));
|
|
__ mov(FieldOperand(eax, size - kPointerSize), edx);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
// Use the fast case closure allocation code that allocates in new
|
|
// space for nested functions that don't need literals cloning.
|
|
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
|
|
bool pretenure = instr->hydrogen()->pretenure();
|
|
if (!pretenure && shared_info->num_literals() == 0) {
|
|
FastNewClosureStub stub(
|
|
shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
|
|
__ push(Immediate(shared_info));
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
} else {
|
|
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
|
|
__ push(Immediate(shared_info));
|
|
__ push(Immediate(pretenure
|
|
? factory()->true_value()
|
|
: factory()->false_value()));
|
|
CallRuntime(Runtime::kNewClosure, 3, instr);
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoTypeof(LTypeof* instr) {
|
|
LOperand* input = instr->InputAt(1);
|
|
if (input->IsConstantOperand()) {
|
|
__ push(ToImmediate(input));
|
|
} else {
|
|
__ push(ToOperand(input));
|
|
}
|
|
CallRuntime(Runtime::kTypeof, 1, instr);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
|
|
Register input = ToRegister(instr->InputAt(0));
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
|
Label* false_label = chunk_->GetAssemblyLabel(false_block);
|
|
|
|
Condition final_branch_condition = EmitTypeofIs(true_label,
|
|
false_label,
|
|
input,
|
|
instr->type_literal());
|
|
|
|
EmitBranch(true_block, false_block, final_branch_condition);
|
|
}
|
|
|
|
|
|
Condition LCodeGen::EmitTypeofIs(Label* true_label,
|
|
Label* false_label,
|
|
Register input,
|
|
Handle<String> type_name) {
|
|
Condition final_branch_condition = no_condition;
|
|
if (type_name->Equals(heap()->number_symbol())) {
|
|
__ JumpIfSmi(input, true_label);
|
|
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
|
|
factory()->heap_number_map());
|
|
final_branch_condition = equal;
|
|
|
|
} else if (type_name->Equals(heap()->string_symbol())) {
|
|
__ JumpIfSmi(input, false_label);
|
|
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
|
|
__ j(above_equal, false_label);
|
|
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
|
|
1 << Map::kIsUndetectable);
|
|
final_branch_condition = zero;
|
|
|
|
} else if (type_name->Equals(heap()->boolean_symbol())) {
|
|
__ cmp(input, factory()->true_value());
|
|
__ j(equal, true_label);
|
|
__ cmp(input, factory()->false_value());
|
|
final_branch_condition = equal;
|
|
|
|
} else if (type_name->Equals(heap()->undefined_symbol())) {
|
|
__ cmp(input, factory()->undefined_value());
|
|
__ j(equal, true_label);
|
|
__ JumpIfSmi(input, false_label);
|
|
// Check for undetectable objects => true.
|
|
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
|
|
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
|
|
1 << Map::kIsUndetectable);
|
|
final_branch_condition = not_zero;
|
|
|
|
} else if (type_name->Equals(heap()->function_symbol())) {
|
|
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
|
|
__ JumpIfSmi(input, false_label);
|
|
__ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
|
|
final_branch_condition = above_equal;
|
|
|
|
} else if (type_name->Equals(heap()->object_symbol())) {
|
|
__ JumpIfSmi(input, false_label);
|
|
__ cmp(input, factory()->null_value());
|
|
__ j(equal, true_label);
|
|
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
|
|
__ j(below, false_label);
|
|
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
|
|
__ j(above, false_label);
|
|
// Check for undetectable objects => false.
|
|
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
|
|
1 << Map::kIsUndetectable);
|
|
final_branch_condition = zero;
|
|
|
|
} else {
|
|
final_branch_condition = not_equal;
|
|
__ jmp(false_label);
|
|
// A dead branch instruction will be generated after this point.
|
|
}
|
|
|
|
return final_branch_condition;
|
|
}
|
|
|
|
|
|
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
|
|
Register temp = ToRegister(instr->TempAt(0));
|
|
int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
|
EmitIsConstructCall(temp);
|
|
EmitBranch(true_block, false_block, equal);
|
|
}
|
|
|
|
|
|
void LCodeGen::EmitIsConstructCall(Register temp) {
|
|
// Get the frame pointer for the calling frame.
|
|
__ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
// Skip the arguments adaptor frame if it exists.
|
|
Label check_frame_marker;
|
|
__ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
|
|
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
__ j(not_equal, &check_frame_marker, Label::kNear);
|
|
__ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
|
|
|
|
// Check the marker in the calling frame.
|
|
__ bind(&check_frame_marker);
|
|
__ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
|
|
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
|
|
}
|
|
|
|
|
|
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
|
|
// No code for lazy bailout instruction. Used to capture environment after a
|
|
// call for populating the safepoint data with deoptimization data.
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
|
|
DeoptimizeIf(no_condition, instr->environment());
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
|
|
LOperand* obj = instr->object();
|
|
LOperand* key = instr->key();
|
|
__ push(ToOperand(obj));
|
|
if (key->IsConstantOperand()) {
|
|
__ push(ToImmediate(key));
|
|
} else {
|
|
__ push(ToOperand(key));
|
|
}
|
|
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
LEnvironment* env = instr->deoptimization_environment();
|
|
RecordPosition(pointers->position());
|
|
RegisterEnvironmentForDeoptimization(env);
|
|
// Create safepoint generator that will also ensure enough space in the
|
|
// reloc info for patching in deoptimization (since this is invoking a
|
|
// builtin)
|
|
SafepointGenerator safepoint_generator(this,
|
|
pointers,
|
|
env->deoptimization_index());
|
|
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
|
|
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
|
|
{
|
|
PushSafepointRegistersScope scope(this);
|
|
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
|
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
|
|
RegisterLazyDeoptimization(
|
|
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
|
}
|
|
|
|
// The gap code includes the restoring of the safepoint registers.
|
|
int pc = masm()->pc_offset();
|
|
safepoints_.SetPcAfterGap(pc);
|
|
}
|
|
|
|
|
|
void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
|
class DeferredStackCheck: public LDeferredCode {
|
|
public:
|
|
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
|
|
: LDeferredCode(codegen), instr_(instr) { }
|
|
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
|
|
private:
|
|
LStackCheck* instr_;
|
|
};
|
|
|
|
if (instr->hydrogen()->is_function_entry()) {
|
|
// Perform stack overflow check.
|
|
Label done;
|
|
ExternalReference stack_limit =
|
|
ExternalReference::address_of_stack_limit(isolate());
|
|
__ cmp(esp, Operand::StaticVariable(stack_limit));
|
|
__ j(above_equal, &done, Label::kNear);
|
|
|
|
ASSERT(instr->context()->IsRegister());
|
|
ASSERT(ToRegister(instr->context()).is(esi));
|
|
StackCheckStub stub;
|
|
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
__ bind(&done);
|
|
} else {
|
|
ASSERT(instr->hydrogen()->is_backwards_branch());
|
|
// Perform stack overflow check if this goto needs it before jumping.
|
|
DeferredStackCheck* deferred_stack_check =
|
|
new DeferredStackCheck(this, instr);
|
|
ExternalReference stack_limit =
|
|
ExternalReference::address_of_stack_limit(isolate());
|
|
__ cmp(esp, Operand::StaticVariable(stack_limit));
|
|
__ j(below, deferred_stack_check->entry());
|
|
__ bind(instr->done_label());
|
|
deferred_stack_check->SetExit(instr->done_label());
|
|
}
|
|
}
|
|
|
|
|
|
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
|
|
// This is a pseudo-instruction that ensures that the environment here is
|
|
// properly registered for deoptimization and records the assembler's PC
|
|
// offset.
|
|
LEnvironment* environment = instr->environment();
|
|
environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
|
|
instr->SpilledDoubleRegisterArray());
|
|
|
|
// If the environment were already registered, we would have no way of
|
|
// backpatching it with the spill slot operands.
|
|
ASSERT(!environment->HasBeenRegistered());
|
|
RegisterEnvironmentForDeoptimization(environment);
|
|
ASSERT(osr_pc_offset_ == -1);
|
|
osr_pc_offset_ = masm()->pc_offset();
|
|
}
|
|
|
|
|
|
void LCodeGen::DoIn(LIn* instr) {
|
|
LOperand* obj = instr->object();
|
|
LOperand* key = instr->key();
|
|
if (key->IsConstantOperand()) {
|
|
__ push(ToImmediate(key));
|
|
} else {
|
|
__ push(ToOperand(key));
|
|
}
|
|
if (obj->IsConstantOperand()) {
|
|
__ push(ToImmediate(obj));
|
|
} else {
|
|
__ push(ToOperand(obj));
|
|
}
|
|
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
|
LPointerMap* pointers = instr->pointer_map();
|
|
LEnvironment* env = instr->deoptimization_environment();
|
|
RecordPosition(pointers->position());
|
|
RegisterEnvironmentForDeoptimization(env);
|
|
// Create safepoint generator that will also ensure enough space in the
|
|
// reloc info for patching in deoptimization (since this is invoking a
|
|
// builtin)
|
|
SafepointGenerator safepoint_generator(this,
|
|
pointers,
|
|
env->deoptimization_index());
|
|
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
|
|
}
|
|
|
|
|
|
#undef __
|
|
|
|
} } // namespace v8::internal
|
|
|
|
#endif // V8_TARGET_ARCH_IA32
|