v8/src/ia32/lithium-codegen-ia32.cc
svenpanne@chromium.org 0af76e96ea Simplified LCodeGen::GetNextEmittedBlock and LCodeGen::EmitGoto a bit.
GetNextEmittedBlock is always called with the same argument (an instance
variable), so let's remove it. In EmitGoto, avoid assignment to an argument.

This CL is split off another CL for easier reviewing.

Review URL: https://codereview.chromium.org/14246031

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14349 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-04-19 12:02:12 +00:00

6559 lines
222 KiB
C++

// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "ia32/lithium-codegen-ia32.h"
#include "ic.h"
#include "code-stubs.h"
#include "deoptimizer.h"
#include "stub-cache.h"
#include "codegen.h"
namespace v8 {
namespace internal {
static SaveFPRegsMode GetSaveFPRegsMode() {
// We don't need to save floating point regs when generating the snapshot
return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
}
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
Safepoint::DeoptMode mode)
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) {}
virtual ~SafepointGenerator() { }
virtual void BeforeCall(int call_size) const {}
virtual void AfterCall() const {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
private:
LCodeGen* codegen_;
LPointerMap* pointers_;
Safepoint::DeoptMode deopt_mode_;
};
#define __ masm()->
bool LCodeGen::GenerateCode() {
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
support_aligned_spilled_doubles_ = info()->IsOptimizing();
dynamic_frame_alignment_ = info()->IsOptimizing() &&
((chunk()->num_double_slots() > 2 &&
!chunk()->graph()->is_recursive()) ||
!info()->osr_ast_id().IsNone());
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
GenerateJumpTable() &&
GenerateSafepointTable();
}
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
if (FLAG_weak_embedded_maps_in_optimized_code) {
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
for (int i = 0 ; i < prototype_maps_.length(); i++) {
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
}
void LCodeGen::Abort(const char* reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
void LCodeGen::Comment(const char* format, ...) {
if (!FLAG_code_comments) return;
char buffer[4 * KB];
StringBuilder builder(buffer, ARRAY_SIZE(buffer));
va_list arguments;
va_start(arguments, format);
builder.AddFormattedList(format, arguments);
va_end(arguments);
// Copy the string before recording it in the assembler to avoid
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). ecx is zero for method calls and non-zero for
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
}
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ mov(edx, Immediate(kNoAlignmentPadding));
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
// Align esp + 4 to a multiple of 2 * kPointerSize.
__ test(esp, Immediate(kPointerSize));
__ j(not_zero, &do_not_pad, Label::kNear);
__ push(Immediate(0));
__ mov(ebx, esp);
__ mov(edx, Immediate(kAlignmentPaddingPushed));
// Copy arguments, receiver, and return address.
__ mov(ecx, Immediate(scope()->num_parameters() + 2));
__ bind(&align_loop);
__ mov(eax, Operand(ebx, 1 * kPointerSize));
__ mov(Operand(ebx, 0), eax);
__ add(Operand(ebx), Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &align_loop, Label::kNear);
__ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
__ bind(&do_not_pad);
}
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
if (info()->IsStub()) {
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
} else {
__ push(edi); // Callee's JS function.
}
}
if (info()->IsOptimizing() &&
dynamic_frame_alignment_ &&
FLAG_debug_code) {
__ test(esp, Immediate(kPointerSize));
__ Assert(zero, "frame is expected to be aligned");
}
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
ASSERT(slots != 0 || !info()->IsOptimizing());
if (slots > 0) {
if (slots == 1) {
if (dynamic_frame_alignment_) {
__ push(edx);
} else {
__ push(Immediate(kNoAlignmentPadding));
}
} else {
if (FLAG_debug_code) {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
__ push(eax);
__ mov(Operand(eax), Immediate(slots));
Label loop;
__ bind(&loop);
__ mov(MemOperand(esp, eax, times_4, 0),
Immediate(kSlotsZapValue));
__ dec(eax);
__ j(not_zero, &loop);
__ pop(eax);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
// accessible, we write to each page in turn (the value is irrelevant).
const int kPageSize = 4 * KB;
for (int offset = slots * kPointerSize - kPageSize;
offset > 0;
offset -= kPageSize) {
__ mov(Operand(esp, offset), eax);
}
#endif
}
if (support_aligned_spilled_doubles_) {
Comment(";;; Store dynamic frame alignment tag for spilled doubles");
// Store dynamic frame alignment state in the first local.
int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
if (dynamic_frame_alignment_) {
__ mov(Operand(ebp, offset), edx);
} else {
__ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
}
}
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
Comment(";;; Save clobbered callee double registers");
CpuFeatureScope scope(masm(), SSE2);
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ movdbl(MemOperand(esp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
}
}
}
// Possibly allocate a local context.
int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both eax and esi. It replaces the context
// passed to us. It's saved in the stack and kept live in esi.
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Variable* var = scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
__ mov(eax, Operand(ebp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx.
__ RecordWriteContextSlot(esi,
context_offset,
eax,
ebx,
kDontSaveFPRegs);
}
}
Comment(";;; End allocate local context");
}
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
// We have not executed any compiled code yet, so esi still holds the
// incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
}
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
for (current_instruction_ = 0;
!is_aborted() && current_instruction_ < instructions_->length();
current_instruction_++) {
LInstruction* instr = instructions_->at(current_instruction_);
if (instr->IsLabel()) {
LLabel* label = LLabel::cast(instr);
emit_instructions = !label->HasReplacement();
}
if (emit_instructions) {
if (FLAG_code_comments) {
HValue* hydrogen = instr->hydrogen_value();
if (hydrogen != NULL) {
if (hydrogen->IsChange()) {
HValue* changed_value = HChange::cast(hydrogen)->value();
int use_id = 0;
const char* use_mnemo = "dead";
if (hydrogen->UseCount() >= 1) {
HValue* use_value = hydrogen->uses().value();
use_id = use_value->id();
use_mnemo = use_value->Mnemonic();
}
Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
current_instruction_, instr->Mnemonic(),
changed_value->id(), changed_value->Mnemonic(),
use_id, use_mnemo);
} else {
Comment(";;; @%d: %s. <#%d>", current_instruction_,
instr->Mnemonic(), hydrogen->id());
}
} else {
Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
}
}
if (!CpuFeatures::IsSupported(SSE2)) {
FlushX87StackIfNecessary(instr);
}
instr->CompileToNative(this);
if (!CpuFeatures::IsSupported(SSE2)) {
ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1);
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(x87_stack_depth_);
}
}
}
}
EnsureSpaceForLazyDeopt();
return !is_aborted();
}
bool LCodeGen::GenerateJumpTable() {
Label needs_frame_not_call;
Label needs_frame_is_call;
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
Address entry = jump_table_[i].address;
bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
Deoptimizer::BailoutType type =
is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
} else {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (jump_table_[i].needs_frame) {
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
if (is_lazy_deopt) {
if (needs_frame_is_call.is_bound()) {
__ jmp(&needs_frame_is_call);
} else {
__ bind(&needs_frame_is_call);
__ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
// Push a PC inside the function so that the deopt code can find where
// the deopt comes from. It doesn't have to be the precise return
// address of a "calling" LAZY deopt, it only has to be somewhere
// inside the code body.
Label push_approx_pc;
__ call(&push_approx_pc);
__ bind(&push_approx_pc);
// Push the continuation which was stashed were the ebp should
// be. Replace it with the saved ebp.
__ push(MemOperand(esp, 3 * kPointerSize));
__ mov(MemOperand(esp, 4 * kPointerSize), ebp);
__ lea(ebp, MemOperand(esp, 4 * kPointerSize));
__ ret(0); // Call the continuation without clobbering registers.
}
} else {
if (needs_frame_not_call.is_bound()) {
__ jmp(&needs_frame_not_call);
} else {
__ bind(&needs_frame_not_call);
__ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
// Push the continuation which was stashed were the ebp should
// be. Replace it with the saved ebp.
__ push(MemOperand(esp, 2 * kPointerSize));
__ mov(MemOperand(esp, 3 * kPointerSize), ebp);
__ lea(ebp, MemOperand(esp, 3 * kPointerSize));
__ ret(0); // Call the continuation without clobbering registers.
}
}
} else {
if (is_lazy_deopt) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
}
}
}
return !is_aborted();
}
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Deferred build frame @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(!frame_is_built_);
ASSERT(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ push(ebp); // Caller's frame pointer.
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
__ lea(ebp, Operand(esp, 2 * kPointerSize));
}
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Deferred destroy frame @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(frame_is_built_);
frame_is_built_ = false;
__ mov(esp, ebp);
__ pop(ebp);
}
__ jmp(code->exit());
}
}
// Deferred code is the last part of the instruction sequence. Mark
// the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
return !is_aborted();
}
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
if (!info()->IsStub()) {
// For lazy deoptimization we need space to patch a call after every call.
// Ensure there is always space for such patching, even if the code ends
// in a call.
int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
while (masm()->pc_offset() < target_offset) {
masm()->nop();
}
}
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
Register LCodeGen::ToRegister(int index) const {
return Register::FromAllocationIndex(index);
}
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
return XMMRegister::FromAllocationIndex(index);
}
bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
return op->IsDoubleRegister();
}
void LCodeGen::ReadX87Operand(Operand dst) {
ASSERT(x87_stack_depth_ == 1);
__ fst_d(dst);
}
void LCodeGen::PushX87DoubleOperand(Operand src) {
ASSERT(x87_stack_depth_ == 0);
x87_stack_depth_++;
__ fld_d(src);
}
void LCodeGen::PushX87FloatOperand(Operand src) {
ASSERT(x87_stack_depth_ == 0);
x87_stack_depth_++;
__ fld_s(src);
}
void LCodeGen::PopX87() {
ASSERT(x87_stack_depth_ == 1);
x87_stack_depth_--;
__ fstp(0);
}
void LCodeGen::CurrentInstructionReturnsX87Result() {
ASSERT(x87_stack_depth_ <= 1);
if (x87_stack_depth_ == 0) {
x87_stack_depth_ = 1;
}
}
void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
if (x87_stack_depth_ > 0) {
if ((instr->ClobbersDoubleRegisters() ||
instr->HasDoubleRegisterResult()) &&
!instr->HasDoubleRegisterInput()) {
PopX87();
}
}
}
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
}
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
int LCodeGen::ToInteger32(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
return constant->Integer32Value();
}
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
return constant->handle();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(constant->HasDoubleValue());
return constant->DoubleValue();
}
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
return chunk_->LookupLiteralRepresentation(op).IsInteger32();
}
Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsRegister()) return Operand(ToRegister(op));
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
return Operand(ebp, StackSlotOffset(op->index()));
}
Operand LCodeGen::HighOperand(LOperand* op) {
ASSERT(op->IsDoubleStackSlot());
return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation,
int* pushed_arguments_index,
int* pushed_arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
int translation_size = environment->values()->length();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
// Function parameters are arguments to the outermost environment. The
// arguments index points to the first element of a sequence of tagged
// values on the stack that represent the arguments. This needs to be
// kept in sync with the LArgumentsElements implementation.
*pushed_arguments_index = -environment->parameter_count();
*pushed_arguments_count = environment->parameter_count();
WriteTranslation(environment->outer(),
translation,
pushed_arguments_index,
pushed_arguments_count);
bool has_closure_id = !info()->closure().is_null() &&
*info()->closure() != *environment->closure();
int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
ASSERT(translation_size == 1);
ASSERT(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
ASSERT(translation_size == 2);
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
default:
UNREACHABLE();
}
// Inlined frames which push their arguments cause the index to be
// bumped and another stack area to be used for materialization,
// otherwise actual argument values are unknown for inlined frames.
bool arguments_known = true;
int arguments_index = *pushed_arguments_index;
int arguments_count = *pushed_arguments_count;
if (environment->entry() != NULL) {
arguments_known = environment->entry()->arguments_pushed();
arguments_index = arguments_index < 0
? GetStackSlotCount() : arguments_index + arguments_count;
arguments_count = environment->entry()->arguments_count() + 1;
if (environment->entry()->arguments_pushed()) {
*pushed_arguments_index = arguments_index;
*pushed_arguments_count = arguments_count;
}
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
// both NULL or both set.
if (environment->spilled_registers() != NULL && value != NULL) {
if (value->IsRegister() &&
environment->spilled_registers()[value->index()] != NULL) {
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
arguments_known,
arguments_index,
arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
translation->MarkDuplicate();
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
false,
false,
arguments_known,
arguments_index,
arguments_count);
}
}
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
arguments_known,
arguments_index,
arguments_count);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
bool arguments_known,
int arguments_index,
int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
translation->StoreArgumentsObject(
arguments_known, arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsArgument()) {
ASSERT(is_tagged);
int src_index = GetStackSlotCount() + op->index();
translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
} else if (is_uint32) {
translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
} else if (op->IsDoubleRegister()) {
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
}
}
void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
if (code->kind() == Code::BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
}
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
}
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
LInstruction* instr) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ CallRuntime(fun, argc);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
ASSERT(info()->is_calling());
}
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
if (!ToRegister(context).is(esi)) {
__ mov(esi, ToRegister(context));
}
} else if (context->IsStackSlot()) {
__ mov(esi, ToOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
__ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
} else {
UNREACHABLE();
}
}
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context) {
LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
ASSERT(info()->is_calling());
}
void LCodeGen::RegisterEnvironmentForDeoptimization(
LEnvironment* environment, Safepoint::DeoptMode mode) {
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
// [incoming arguments] [spill slots] [pushed outgoing arguments]
// Layout of the environment:
// 0 ..................................................... size-1
// [parameters] [locals] [expression stack including arguments]
// Layout of the translation:
// 0 ........................................................ size - 1 + 4
// [expression stack including arguments] [locals] [4 words] [parameters]
// |>------------ translation_size ------------<|
int frame_count = 0;
int jsframe_count = 0;
int args_index = 0;
int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
WriteTranslation(environment, &translation, &args_index, &args_count);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
deoptimizations_.Add(environment, zone());
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
// It's an error to deoptimize with the x87 fp stack in use.
ASSERT(x87_stack_depth_ == 0);
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
Handle<SharedFunctionInfo> shared(info()->shared_info());
Label no_deopt;
__ pushfd();
__ push(eax);
__ push(ebx);
__ mov(ebx, shared);
__ mov(eax,
FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
__ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
__ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
__ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
eax);
__ pop(ebx);
__ pop(eax);
__ popfd();
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
eax);
__ pop(ebx);
__ pop(eax);
__ popfd();
}
if (FLAG_trap_on_deopt) {
Label done;
if (cc != no_condition) {
__ j(NegateCondition(cc), &done, Label::kNear);
}
__ int3();
__ bind(&done);
}
ASSERT(info()->IsStub() || frame_is_built_);
bool needs_lazy_deopt = info()->IsStub();
if (cc == no_condition && frame_is_built_) {
if (needs_lazy_deopt) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
}
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
jump_table_.last().address != entry ||
jump_table_.last().needs_frame != !frame_is_built_ ||
jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
__ jmp(&jump_table_.last().label);
} else {
__ j(cc, &jump_table_.last().label);
}
}
}
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT &&
it.rinfo()->target_object()->IsMap()) {
Handle<Map> map(Map::cast(it.rinfo()->target_object()));
if (map->CanTransition()) {
maps.Add(map, zone());
}
}
}
#ifdef VERIFY_HEAP
// This disables verification of weak embedded maps after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
}
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
data->SetLiteralArray(*literals);
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
data->SetPc(i, Smi::FromInt(env->pc_offset()));
}
code->set_deoptimization_data(*data);
}
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.length();
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
ASSERT(deoptimization_literals_.length() == 0);
const ZoneList<Handle<JSFunction> >* inlined_closures =
chunk()->inlined_closures();
for (int i = 0, length = inlined_closures->length();
i < length;
i++) {
DefineDeoptimizationLiteral(inlined_closures->at(i));
}
inlined_function_count_ = deoptimization_literals_.length();
}
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
} else {
ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
}
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode deopt_mode) {
ASSERT(kind == expected_safepoint_kind_);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint =
safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
Safepoint::DeoptMode mode) {
RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
}
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, mode);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode) {
RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
}
void LCodeGen::RecordPosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
void LCodeGen::DoLabel(LLabel* label) {
Comment(";;; -------------------- B%d%s --------------------",
label->block_id(),
label->is_loop_header() ? " (loop header)" : "");
__ bind(label->label());
current_block_ = label->block_id();
DoGap(label);
}
void LCodeGen::DoParallelMove(LParallelMove* move) {
resolver_.Resolve(move);
}
void LCodeGen::DoGap(LGap* gap) {
for (int i = LGap::FIRST_INNER_POSITION;
i <= LGap::LAST_INNER_POSITION;
i++) {
LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
LParallelMove* move = gap->GetParallelMove(inner_pos);
if (move != NULL) DoParallelMove(move);
}
}
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
DoGap(instr);
}
void LCodeGen::DoParameter(LParameter* instr) {
// Nothing to do.
}
void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
UNREACHABLE();
}
}
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
// Nothing to do.
}
void LCodeGen::DoModI(LModI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
Register dividend = ToRegister(instr->left());
int32_t divisor =
HConstant::cast(instr->hydrogen()->right())->Integer32Value();
if (divisor < 0) divisor = -divisor;
Label positive_dividend, done;
__ test(dividend, Operand(dividend));
__ j(not_sign, &positive_dividend, Label::kNear);
__ neg(dividend);
__ and_(dividend, divisor - 1);
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(not_zero, &done, Label::kNear);
DeoptimizeIf(no_condition, instr->environment());
} else {
__ jmp(&done, Label::kNear);
}
__ bind(&positive_dividend);
__ and_(dividend, divisor - 1);
__ bind(&done);
} else {
Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
Register left_reg = ToRegister(instr->left());
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
ASSERT(left_reg.is(eax));
ASSERT(result_reg.is(edx));
ASSERT(!right_reg.is(eax));
ASSERT(!right_reg.is(edx));
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
DeoptimizeIf(zero, instr->environment());
}
__ test(left_reg, Operand(left_reg));
__ j(zero, &remainder_eq_dividend, Label::kNear);
__ j(sign, &slow, Label::kNear);
__ test(right_reg, Operand(right_reg));
__ j(not_sign, &both_positive, Label::kNear);
// The sign of the divisor doesn't matter.
__ neg(right_reg);
__ bind(&both_positive);
// If the dividend is smaller than the nonnegative
// divisor, the dividend is the result.
__ cmp(left_reg, Operand(right_reg));
__ j(less, &remainder_eq_dividend, Label::kNear);
// Check if the divisor is a PowerOfTwo integer.
Register scratch = ToRegister(instr->temp());
__ mov(scratch, right_reg);
__ sub(Operand(scratch), Immediate(1));
__ test(scratch, Operand(right_reg));
__ j(not_zero, &do_subtraction, Label::kNear);
__ and_(left_reg, Operand(scratch));
__ jmp(&remainder_eq_dividend, Label::kNear);
__ bind(&do_subtraction);
const int kUnfolds = 3;
// Try a few subtractions of the dividend.
__ mov(scratch, left_reg);
for (int i = 0; i < kUnfolds; i++) {
// Reduce the dividend by the divisor.
__ sub(left_reg, Operand(right_reg));
// Check if the dividend is less than the divisor.
__ cmp(left_reg, Operand(right_reg));
__ j(less, &remainder_eq_dividend, Label::kNear);
}
__ mov(left_reg, scratch);
// Slow case, using idiv instruction.
__ bind(&slow);
// Check for (kMinInt % -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmp(left_reg, kMinInt);
__ j(not_zero, &left_not_min_int, Label::kNear);
__ cmp(right_reg, -1);
DeoptimizeIf(zero, instr->environment());
__ bind(&left_not_min_int);
}
// Sign extend to edx.
__ cdq();
// Check for (0 % -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive_left;
Label done;
__ test(left_reg, Operand(left_reg));
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
// Test the remainder for 0, because then the result would be -0.
__ test(result_reg, Operand(result_reg));
__ j(not_zero, &done, Label::kNear);
DeoptimizeIf(no_condition, instr->environment());
__ bind(&positive_left);
__ idiv(right_reg);
__ bind(&done);
} else {
__ idiv(right_reg);
}
__ jmp(&done, Label::kNear);
__ bind(&remainder_eq_dividend);
__ mov(result_reg, left_reg);
__ bind(&done);
}
}
void LCodeGen::DoDivI(LDivI* instr) {
if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
Register dividend = ToRegister(instr->left());
int32_t divisor =
HConstant::cast(instr->hydrogen()->right())->Integer32Value();
int32_t test_value = 0;
int32_t power = 0;
if (divisor > 0) {
test_value = divisor - 1;
power = WhichPowerOf2(divisor);
} else {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ test(dividend, Operand(dividend));
DeoptimizeIf(zero, instr->environment());
}
// Check for (kMinInt / -1).
if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
__ cmp(dividend, kMinInt);
DeoptimizeIf(zero, instr->environment());
}
test_value = - divisor - 1;
power = WhichPowerOf2(-divisor);
}
if (test_value != 0) {
// Deoptimize if remainder is not 0.
__ test(dividend, Immediate(test_value));
DeoptimizeIf(not_zero, instr->environment());
__ sar(dividend, power);
}
if (divisor < 0) __ neg(dividend);
return;
}
LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(ToRegister(instr->left()).is(eax));
ASSERT(!ToRegister(instr->right()).is(eax));
ASSERT(!ToRegister(instr->right()).is(edx));
Register left_reg = eax;
// Check for x / 0.
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, ToOperand(right));
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ test(left_reg, Operand(left_reg));
__ j(not_zero, &left_not_zero, Label::kNear);
__ test(right_reg, ToOperand(right));
DeoptimizeIf(sign, instr->environment());
__ bind(&left_not_zero);
}
// Check for (kMinInt / -1).
if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmp(left_reg, kMinInt);
__ j(not_zero, &left_not_min_int, Label::kNear);
__ cmp(right_reg, -1);
DeoptimizeIf(zero, instr->environment());
__ bind(&left_not_min_int);
}
// Sign extend to edx.
__ cdq();
__ idiv(right_reg);
if (!instr->is_flooring()) {
// Deoptimize if remainder is not 0.
__ test(edx, Operand(edx));
DeoptimizeIf(not_zero, instr->environment());
} else {
Label done;
__ test(edx, edx);
__ j(zero, &done, Label::kNear);
__ xor_(edx, right_reg);
__ sar(edx, 31);
__ add(eax, edx);
__ bind(&done);
}
}
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
ASSERT(instr->right()->IsConstantOperand());
Register dividend = ToRegister(instr->left());
int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
Register result = ToRegister(instr->result());
switch (divisor) {
case 0:
DeoptimizeIf(no_condition, instr->environment());
return;
case 1:
__ Move(result, dividend);
return;
case -1:
__ Move(result, dividend);
__ neg(result);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr->environment());
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
}
return;
}
uint32_t divisor_abs = abs(divisor);
if (IsPowerOf2(divisor_abs)) {
int32_t power = WhichPowerOf2(divisor_abs);
if (divisor < 0) {
// Input[dividend] is clobbered.
// The sequence is tedious because neg(dividend) might overflow.
__ mov(result, dividend);
__ sar(dividend, 31);
__ neg(result);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr->environment());
}
__ shl(dividend, 32 - power);
__ sar(result, power);
__ not_(dividend);
// Clear result.sign if dividend.sign is set.
__ and_(result, dividend);
} else {
__ Move(result, dividend);
__ sar(result, power);
}
} else {
ASSERT(ToRegister(instr->left()).is(eax));
ASSERT(ToRegister(instr->result()).is(edx));
Register scratch = ToRegister(instr->temp());
// Find b which: 2^b < divisor_abs < 2^(b+1).
unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
unsigned shift = 32 + b; // Precision +1bit (effectively).
double multiplier_f =
static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
int64_t multiplier;
if (multiplier_f - floor(multiplier_f) < 0.5) {
multiplier = static_cast<int64_t>(floor(multiplier_f));
} else {
multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
}
// The multiplier is a uint32.
ASSERT(multiplier > 0 &&
multiplier < (static_cast<int64_t>(1) << 32));
__ mov(scratch, dividend);
if (divisor < 0 &&
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ test(dividend, dividend);
DeoptimizeIf(zero, instr->environment());
}
__ mov(edx, static_cast<int32_t>(multiplier));
__ imul(edx);
if (static_cast<int32_t>(multiplier) < 0) {
__ add(edx, scratch);
}
Register reg_lo = eax;
Register reg_byte_scratch = scratch;
if (!reg_byte_scratch.is_byte_register()) {
__ xchg(reg_lo, reg_byte_scratch);
reg_lo = scratch;
reg_byte_scratch = eax;
}
if (divisor < 0) {
__ xor_(reg_byte_scratch, reg_byte_scratch);
__ cmp(reg_lo, 0x40000000);
__ setcc(above, reg_byte_scratch);
__ neg(edx);
__ sub(edx, reg_byte_scratch);
} else {
__ xor_(reg_byte_scratch, reg_byte_scratch);
__ cmp(reg_lo, 0xC0000000);
__ setcc(above_equal, reg_byte_scratch);
__ add(edx, reg_byte_scratch);
}
__ sar(edx, shift - 32);
}
}
void LCodeGen::DoMulI(LMulI* instr) {
Register left = ToRegister(instr->left());
LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ mov(ToRegister(instr->temp()), left);
}
if (right->IsConstantOperand()) {
// Try strength reductions on the multiplication.
// All replacement instructions are at most as long as the imul
// and have better latency.
int constant = ToInteger32(LConstantOperand::cast(right));
if (constant == -1) {
__ neg(left);
} else if (constant == 0) {
__ xor_(left, Operand(left));
} else if (constant == 2) {
__ add(left, Operand(left));
} else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// If we know that the multiplication can't overflow, it's safe to
// use instructions that don't set the overflow flag for the
// multiplication.
switch (constant) {
case 1:
// Do nothing.
break;
case 3:
__ lea(left, Operand(left, left, times_2, 0));
break;
case 4:
__ shl(left, 2);
break;
case 5:
__ lea(left, Operand(left, left, times_4, 0));
break;
case 8:
__ shl(left, 3);
break;
case 9:
__ lea(left, Operand(left, left, times_8, 0));
break;
case 16:
__ shl(left, 4);
break;
default:
__ imul(left, left, constant);
break;
}
} else {
__ imul(left, left, constant);
}
} else {
__ imul(left, ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
Label done;
__ test(left, Operand(left));
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
DeoptimizeIf(less, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
}
}
void LCodeGen::DoBitI(LBitI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
int right_operand = ToInteger32(LConstantOperand::cast(right));
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), right_operand);
break;
case Token::BIT_OR:
__ or_(ToRegister(left), right_operand);
break;
case Token::BIT_XOR:
__ xor_(ToRegister(left), right_operand);
break;
default:
UNREACHABLE();
break;
}
} else {
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), ToOperand(right));
break;
case Token::BIT_OR:
__ or_(ToRegister(left), ToOperand(right));
break;
case Token::BIT_XOR:
__ xor_(ToRegister(left), ToOperand(right));
break;
default:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoShiftI(LShiftI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
if (right->IsRegister()) {
ASSERT(ToRegister(right).is(ecx));
switch (instr->op()) {
case Token::ROR:
__ ror_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), Immediate(0x80000000));
DeoptimizeIf(not_zero, instr->environment());
}
break;
case Token::SAR:
__ sar_cl(ToRegister(left));
break;
case Token::SHR:
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), Immediate(0x80000000));
DeoptimizeIf(not_zero, instr->environment());
}
break;
case Token::SHL:
__ shl_cl(ToRegister(left));
break;
default:
UNREACHABLE();
break;
}
} else {
int value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), Immediate(0x80000000));
DeoptimizeIf(not_zero, instr->environment());
} else {
__ ror(ToRegister(left), shift_count);
}
break;
case Token::SAR:
if (shift_count != 0) {
__ sar(ToRegister(left), shift_count);
}
break;
case Token::SHR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), Immediate(0x80000000));
DeoptimizeIf(not_zero, instr->environment());
} else {
__ shr(ToRegister(left), shift_count);
}
break;
case Token::SHL:
if (shift_count != 0) {
__ shl(ToRegister(left), shift_count);
}
break;
default:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoSubI(LSubI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
__ sub(ToOperand(left), ToInteger32Immediate(right));
} else {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
}
}
void LCodeGen::DoConstantI(LConstantI* instr) {
ASSERT(instr->result()->IsRegister());
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
void LCodeGen::DoConstantD(LConstantD* instr) {
double v = instr->value();
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
__ push(Immediate(lower));
__ push(Immediate(upper));
PushX87DoubleOperand(Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
CurrentInstructionReturnsX87Result();
} else {
CpuFeatureScope scope1(masm(), SSE2);
ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
if (int_val == 0) {
__ xorps(res, res);
} else {
Register temp = ToRegister(instr->temp());
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope2(masm(), SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
__ movd(res, Operand(temp));
__ Set(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
} else {
__ xorps(res, res);
__ Set(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
}
} else {
__ Set(temp, Immediate(upper));
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
__ Set(temp, Immediate(lower));
__ movd(xmm0, Operand(temp));
__ por(res, xmm0);
}
}
}
}
}
void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
if (handle->IsHeapObject()) {
__ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
__ Set(reg, Immediate(handle));
}
}
void LCodeGen::DoFixedArrayBaseLength(
LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->value());
__ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
}
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->value());
__ EnumLength(result, map);
}
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->value());
// Load map into |result|.
__ mov(result, FieldOperand(input, HeapObject::kMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
__ mov(result, FieldOperand(result, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(result, Map::kElementsKindMask);
__ shr(result, Map::kElementsKindShift);
}
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->temp());
ASSERT(input.is(result));
Label done;
// If the object is a smi return the object.
__ JumpIfSmi(input, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, map);
__ j(not_equal, &done, Label::kNear);
__ mov(result, FieldOperand(input, JSValue::kValueOffset));
__ bind(&done);
}
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
ASSERT(object.is(result));
ASSERT(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand::StaticVariable(stamp));
__ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ mov(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
__ mov(Operand(esp, 0), object);
__ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
}
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
SeqStringSetCharGenerator::Generate(masm(),
instr->encoding(),
ToRegister(instr->string()),
ToRegister(instr->index()),
ToRegister(instr->value()));
}
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
__ not_(ToRegister(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToOperand(instr->value()));
ASSERT(ToRegister(instr->context()).is(esi));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
Comment("Unreachable code.");
__ int3();
}
}
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
__ add(ToOperand(left), ToInteger32Immediate(right));
} else {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
}
}
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
CpuFeatureScope scope(masm(), SSE2);
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
HMathMinMax::Operation operation = instr->hydrogen()->operation();
if (instr->hydrogen()->representation().IsInteger32()) {
Label return_left;
Condition condition = (operation == HMathMinMax::kMathMin)
? less_equal
: greater_equal;
if (right->IsConstantOperand()) {
Operand left_op = ToOperand(left);
Immediate right_imm = ToInteger32Immediate(right);
__ cmp(left_op, right_imm);
__ j(condition, &return_left, Label::kNear);
__ mov(left_op, right_imm);
} else {
Register left_reg = ToRegister(left);
Operand right_op = ToOperand(right);
__ cmp(left_reg, right_op);
__ j(condition, &return_left, Label::kNear);
__ mov(left_reg, right_op);
}
__ bind(&return_left);
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
Label check_nan_left, check_zero, return_left, return_right;
Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
XMMRegister left_reg = ToDoubleRegister(left);
XMMRegister right_reg = ToDoubleRegister(right);
__ ucomisd(left_reg, right_reg);
__ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
XMMRegister xmm_scratch = xmm0;
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
__ orpd(left_reg, right_reg);
} else {
// Since we operate on +0 and/or -0, addsd and andsd have the same effect.
__ addsd(left_reg, right_reg);
}
__ jmp(&return_left, Label::kNear);
__ bind(&check_nan_left);
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
__ movsd(left_reg, right_reg);
__ bind(&return_left);
}
}
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
// Modulo uses a fixed result register.
ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
__ addsd(left, right);
break;
case Token::SUB:
__ subsd(left, right);
break;
case Token::MUL:
__ mulsd(left, right);
break;
case Token::DIV:
__ divsd(left, right);
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a mulsd depending on the result
__ movaps(left, left);
break;
case Token::MOD: {
// Pass two doubles as arguments on the stack.
__ PrepareCallCFunction(4, eax);
__ movdbl(Operand(esp, 0 * kDoubleSize), left);
__ movdbl(Operand(esp, 1 * kDoubleSize), right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
4);
// Return value is in st(0) on ia32.
// Store it into the (fixed) result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
__ movdbl(result, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
break;
}
default:
UNREACHABLE();
break;
}
}
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->left()).is(edx));
ASSERT(ToRegister(instr->right()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
int LCodeGen::GetNextEmittedBlock() {
for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
if (!chunk_->GetLabel(i)->HasReplacement()) return i;
}
return -1;
}
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
int next_block = GetNextEmittedBlock();
right_block = chunk_->LookupDestination(right_block);
left_block = chunk_->LookupDestination(left_block);
if (right_block == left_block) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
} else if (right_block == next_block) {
__ j(cc, chunk_->GetAssemblyLabel(left_block));
} else {
__ j(cc, chunk_->GetAssemblyLabel(left_block));
__ jmp(chunk_->GetAssemblyLabel(right_block));
}
}
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
CpuFeatureScope scope(masm(), SSE2);
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->value());
__ test(reg, Operand(reg));
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
XMMRegister reg = ToDoubleRegister(instr->value());
__ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
} else if (type.IsSmi()) {
__ test(reg, Operand(reg));
EmitBranch(true_block, false_block, not_equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
__ j(equal, false_label);
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// true -> true.
__ cmp(reg, factory()->true_value());
__ j(equal, true_label);
// false -> false.
__ cmp(reg, factory()->false_value());
__ j(equal, false_label);
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
__ j(equal, false_label);
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
__ j(equal, false_label);
__ JumpIfSmi(reg, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
}
Register map = no_reg; // Keep the compiler happy.
if (expected.NeedsMap()) {
map = ToRegister(instr->temp());
ASSERT(!map.is(reg));
__ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(not_zero, false_label);
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
__ j(above_equal, true_label);
}
if (expected.Contains(ToBooleanStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, true_label);
__ jmp(false_label);
__ bind(&not_string);
}
if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, true_label);
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(not_equal, &not_heap_number, Label::kNear);
__ xorps(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, false_label);
__ jmp(true_label);
__ bind(&not_heap_number);
}
// We've seen something for the first time -> deopt.
DeoptimizeIf(no_condition, instr->environment());
}
}
}
void LCodeGen::EmitGoto(int block) {
int destination = chunk_->LookupDestination(block);
if (destination != GetNextEmittedBlock()) {
__ jmp(chunk_->GetAssemblyLabel(destination));
}
}
void LCodeGen::DoGoto(LGoto* instr) {
EmitGoto(instr->block_id());
}
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition;
switch (op) {
case Token::EQ:
case Token::EQ_STRICT:
cond = equal;
break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
case Token::GT:
cond = is_unsigned ? above : greater;
break;
case Token::LTE:
cond = is_unsigned ? below_equal : less_equal;
break;
case Token::GTE:
cond = is_unsigned ? above_equal : greater_equal;
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
return cond;
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
int next_block =
EvalComparison(instr->op(), left_val, right_val) ? true_block
: false_block;
EmitGoto(next_block);
} else {
if (instr->is_double()) {
CpuFeatureScope scope(masm(), SSE2);
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, chunk_->GetAssemblyLabel(false_block));
} else {
if (right->IsConstantOperand()) {
__ cmp(ToRegister(left), ToInteger32Immediate(right));
} else if (left->IsConstantOperand()) {
__ cmp(ToOperand(right), ToInteger32Immediate(left));
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
} else {
__ cmp(ToRegister(left), ToOperand(right));
}
}
EmitBranch(true_block, false_block, cc);
}
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
if (instr->right()->IsConstantOperand()) {
__ cmp(left, ToHandle(LConstantOperand::cast(instr->right())));
} else {
Operand right = ToOperand(instr->right());
__ cmp(left, Operand(right));
}
EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->left());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ cmp(left, instr->hydrogen()->right());
EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register reg = ToRegister(instr->value());
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
// not null, and it can't be a an undetectable object.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id());
Handle<Object> nil_value = instr->nil() == kNullValue ?
factory()->null_value() :
factory()->undefined_value();
__ cmp(reg, nil_value);
if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, equal);
} else {
Handle<Object> other_nil_value = instr->nil() == kNullValue ?
factory()->undefined_value() :
factory()->null_value();
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
__ cmp(reg, other_nil_value);
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = ToRegister(instr->temp());
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
EmitBranch(true_block, false_block, not_zero);
}
}
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
Label* is_object) {
__ JumpIfSmi(input, is_not_object);
__ cmp(input, isolate()->factory()->null_value());
__ j(equal, is_object);
__ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(not_zero, is_not_object);
__ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
__ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, is_not_object);
__ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
return below_equal;
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string) {
__ JumpIfSmi(input, is_not_string);
Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
return cond;
}
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond = EmitIsString(reg, temp, false_label);
EmitBranch(true_block, false_block, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ test(input, Immediate(kSmiTagMask));
EmitBranch(true_block, false_block, zero);
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
EmitBranch(true_block, false_block, not_zero);
}
static Condition ComputeCompareCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
return equal;
case Token::LT:
return less;
case Token::GT:
return greater;
case Token::LTE:
return less_equal;
case Token::GTE:
return greater_equal;
default:
UNREACHABLE();
return no_condition;
}
}
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
__ test(eax, Operand(eax));
EmitBranch(true_block, false_block, condition);
}
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
ASSERT(from == to || to == LAST_TYPE);
return from;
}
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
if (to == LAST_TYPE) return above_equal;
if (from == FIRST_TYPE) return below_equal;
UNREACHABLE();
return equal;
}
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, TestType(instr->hydrogen()), temp);
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ AssertString(input);
__ mov(result, FieldOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
EmitBranch(true_block, false_block, equal);
}
// Branches to a label or falls through with the answer in the z flag. Trashes
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String>class_name,
Register input,
Register temp,
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!input.is(temp2));
ASSERT(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
__ j(equal, is_true);
__ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
}
// temp now contains the constructor function. Grab the
// instance class name from there.
__ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ mov(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is internalized since it's a literal.
// The name in the constructor is internalized because of the way the context
// is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
// syntax. Since both sides are internalized it is sufficient to use an
// identity comparison.
__ cmp(temp, class_name);
// End with the answer in the z flag.
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
__ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ test(eax, Operand(eax));
__ j(zero, &true_value, Label::kNear);
__ mov(ToRegister(instr->result()), factory()->false_value());
__ jmp(&done, Label::kNear);
__ bind(&true_value);
__ mov(ToRegister(instr->result()), factory()->true_value());
__ bind(&done);
}
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
};
DeferredInstanceOfKnownGlobal* deferred;
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
// A Smi is not an instance of anything.
__ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
Label cache_miss;
Register map = ToRegister(instr->temp());
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<JSGlobalPropertyCell> cache_cell =
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
__ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map.
__ j(not_equal, &cache_miss, Label::kNear);
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
__ jmp(&done);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
__ bind(&cache_miss);
// Null is not an instance of anything.
__ cmp(object, factory()->null_value());
__ j(equal, &false_result);
// String values are not instances of anything.
Condition is_string = masm_->IsObjectStringType(object, temp, temp);
__ j(is_string, &false_result);
// Go to the deferred code.
__ jmp(deferred->entry());
__ bind(&false_result);
__ mov(ToRegister(instr->result()), factory()->false_value());
// Here result has either true or false. Deferred code also produces true or
// false object.
__ bind(deferred->exit());
__ bind(&done);
}
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(flags);
// Get the temp register reserved by the instruction. This needs to be a
// register which is pushed last by PushSafepointRegisters as top of the
// stack is used to pass the offset to the location of the map check to
// the stub.
Register temp = ToRegister(instr->temp());
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 13;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
// Get the deoptimization index of the LLazyBailout-environment that
// corresponds to this instruction.
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value into the eax slot and restore all registers.
__ StoreToSafepointRegisterSlot(eax, eax);
}
void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
__ mov(result, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
}
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
Label true_value, done;
__ test(eax, Operand(eax));
__ j(condition, &true_value, Label::kNear);
__ mov(ToRegister(instr->result()), factory()->false_value());
__ jmp(&done, Label::kNear);
__ bind(&true_value);
__ mov(ToRegister(instr->result()), factory()->true_value());
__ bind(&done);
}
void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
int extra_value_count = dynamic_frame_alignment ? 2 : 1;
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
if (dynamic_frame_alignment && FLAG_debug_code) {
__ cmp(Operand(esp,
(parameter_count + extra_value_count) * kPointerSize),
Immediate(kAlignmentZapValue));
__ Assert(equal, "expected alignment marker");
}
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
Register reg = ToRegister(instr->parameter_count());
Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
if (dynamic_frame_alignment && FLAG_debug_code) {
ASSERT(extra_value_count == 2);
__ cmp(Operand(esp, reg, times_pointer_size,
extra_value_count * kPointerSize),
Immediate(kAlignmentZapValue));
__ Assert(equal, "expected alignment marker");
}
// emit code to restore stack based on instr->parameter_count()
__ pop(return_addr_reg); // save return address
if (dynamic_frame_alignment) {
__ inc(reg); // 1 more for alignment
}
__ shl(reg, kPointerSizeLog2);
__ add(esp, reg);
__ jmp(return_addr_reg);
}
}
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime call
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
__ push(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
ASSERT(NeedsEagerFrame());
CpuFeatureScope scope(masm(), SSE2);
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(esp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
}
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
__ j(equal, &no_padding);
EmitReturn(instr, true);
__ bind(&no_padding);
}
EmitReturn(instr, false);
}
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
}
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->global_object()).is(edx));
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, mode, instr);
}
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
__ mov(Operand::Cell(cell_handle), value);
// Cells are always rescanned, so no write barrier here.
}
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->global_object()).is(edx));
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr->environment());
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
__ mov(result, factory()->undefined_value());
__ bind(&is_not_hole);
}
}
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Label skip_assignment;
Operand target = ContextOperand(context, instr->slot_index());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr->environment());
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
}
__ mov(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register temp = ToRegister(instr->temp());
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWriteContextSlot(context,
offset,
value,
temp,
GetSaveFPRegsMode(),
EMIT_REMEMBERED_SET,
check_needed);
}
__ bind(&skip_assignment);
}
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
} else {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
}
}
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
__ mov(result, FieldOperand(object, offset + type->instance_size()));
} else {
// Non-negative property indices are in the properties array.
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
} else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
} else {
// Negative lookup.
// Check prototypes.
Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
Heap* heap = type->GetHeap();
while (*current != heap->null_value()) {
__ LoadHeapObject(result, current);
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Handle<Map>(current->map()));
DeoptimizeIf(not_equal, env);
current =
Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
}
__ mov(result, factory()->undefined_value());
}
}
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
if (object->IsSmi()) {
__ Push(Handle<Smi>::cast(object));
} else {
__ PushHeapObject(Handle<HeapObject>::cast(object));
}
} else if (operand->IsRegister()) {
__ push(ToRegister(operand));
} else {
__ push(ToOperand(operand));
}
}
// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
// prototype chain, which causes unbounded code generation.
static bool CompactEmit(SmallMapList* list,
Handle<String> name,
int i,
Isolate* isolate) {
Handle<Map> map = list->at(i);
// If the map has ElementsKind transitions, we will generate map checks
// for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
if (map->HasElementsTransition()) return false;
LookupResult lookup(isolate);
map->LookupDescriptor(NULL, *name, &lookup);
return lookup.IsField() || lookup.IsConstantFunction();
}
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(no_condition, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
Label done;
bool all_are_compact = true;
for (int i = 0; i < map_count; ++i) {
if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
all_are_compact = false;
break;
}
}
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
__ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
} else {
Label next;
bool compact = all_are_compact ? true :
CompactEmit(instr->hydrogen()->types(), name, i, isolate());
__ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
__ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
}
if (need_generic) {
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
__ bind(&done);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
// Check that the function really is a function.
__ CmpObjectType(function, JS_FUNCTION_TYPE, result);
DeoptimizeIf(not_equal, instr->environment());
// Check whether the function has an instance prototype.
Label non_instance;
__ test_b(FieldOperand(result, Map::kBitFieldOffset),
1 << Map::kHasNonInstancePrototype);
__ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
__ mov(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
DeoptimizeIf(equal, instr->environment());
// If the function does not have an initial map, we're done.
Label done;
__ CmpObjectType(result, MAP_TYPE, temp);
__ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
__ mov(result, FieldOperand(result, Map::kPrototypeOffset));
__ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
__ bind(&non_instance);
__ mov(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
__ bind(&done);
}
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->object());
__ mov(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done, ok, fail;
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
__ j(equal, &done, Label::kNear);
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(factory()->fixed_cow_array_map()));
__ j(equal, &done, Label::kNear);
Register temp((result.is(eax)) ? ebx : eax);
__ push(temp);
__ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
__ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Map::kElementsKindMask);
__ shr(temp, Map::kElementsKindShift);
__ cmp(temp, GetInitialFastElementsKind());
__ j(less, &fail, Label::kNear);
__ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
__ j(less_equal, &ok, Label::kNear);
__ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less, &fail, Label::kNear);
__ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less_equal, &ok, Label::kNear);
__ bind(&fail);
__ Abort("Check for fast or external elements failed.");
__ bind(&ok);
__ pop(temp);
__ bind(&done);
}
}
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->object());
__ mov(result, FieldOperand(input,
ExternalArray::kExternalPointerOffset));
}
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
Operand index = ToOperand(instr->index());
Register result = ToRegister(instr->result());
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more.
__ sub(length, index);
__ mov(result, Operand(arguments, length, times_4, kPointerSize));
}
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand() &&
ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
elements_kind)) {
__ SmiUntag(ToRegister(key));
}
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
instr->hydrogen()->key()->representation(),
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
} else {
PushX87FloatOperand(operand);
CurrentInstructionReturnsX87Result();
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ movdbl(ToDoubleRegister(instr->result()), operand);
} else {
PushX87DoubleOperand(operand);
CurrentInstructionReturnsX87Result();
}
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ movsx_b(result, operand);
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movzx_b(result, operand);
break;
case EXTERNAL_SHORT_ELEMENTS:
__ movsx_w(result, operand);
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzx_w(result, operand);
break;
case EXTERNAL_INT_ELEMENTS:
__ mov(result, operand);
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
DeoptimizeIf(negative, instr->environment());
}
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
offset,
instr->additional_index());
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
__ movdbl(result, double_load_operand);
} else {
PushX87DoubleOperand(double_load_operand);
CurrentInstructionReturnsX87Result();
}
}
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register result = ToRegister(instr->result());
// Load the result.
__ mov(result,
BuildFastArrayOperand(instr->elements(),
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(not_equal, instr->environment());
} else {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
}
}
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
if (instr->is_external()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
} else {
DoLoadKeyedFixedArray(instr);
}
}
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
Representation key_representation,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
Abort("array index constant value too big");
}
return Operand(elements_pointer_reg,
((constant_value + additional_index) << shift_size)
+ offset);
} else {
// Take the tag bit into account while computing the shift size.
if (key_representation.IsTagged() && (shift_size >= 1)) {
shift_size -= kSmiTagSize;
}
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
offset + (additional_index << shift_size));
}
}
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->key()).is(ecx));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
__ lea(result, Operand(esp, -2 * kPointerSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
__ cmp(Operand(result),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
__ mov(result, Operand(ebp));
__ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
}
}
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Operand elem = ToOperand(instr->elements());
Register result = ToRegister(instr->result());
Label done;
// If no arguments adaptor frame the number of arguments is fixed.
__ cmp(ebp, elem);
__ mov(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(result, Operand(result,
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(result);
// Argument length is in result register.
__ bind(&done);
}
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register scratch = ToRegister(instr->temp());
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
// Do not transform the receiver to object for strict mode
// functions.
__ mov(scratch,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
__ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &receiver_ok);
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
__ j(equal, &global_object, Label::kNear);
__ cmp(receiver, factory()->undefined_value());
__ j(equal, &global_object, Label::kNear);
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr->environment());
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
DeoptimizeIf(below, instr->environment());
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// TODO(kmillikin): We have a hydrogen value for the global object. See
// if it's better to use it than to explicitly fetch it from the context
// here.
__ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
__ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ mov(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
ASSERT(receiver.is(eax)); // Used for parameter count.
ASSERT(function.is(edi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(eax));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
DeoptimizeIf(above, instr->environment());
__ push(receiver);
__ mov(receiver, length);
// Loop through the arguments pushing them onto the execution
// stack.
Label invoke, loop;
// length is a small non-negative integer, due to the test above.
__ test(length, Operand(length));
__ j(zero, &invoke, Label::kNear);
__ bind(&loop);
__ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
__ dec(length);
__ j(not_zero, &loop);
// Invoke the function.
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
EmitPushTaggedOperand(argument);
}
void LCodeGen::DoDrop(LDrop* instr) {
__ Drop(instr->count());
}
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
if (info()->IsOptimizing()) {
__ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in esi.
ASSERT(result.is(esi));
}
}
void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result,
Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
__ push(esi); // The context is the first argument.
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result,
Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
__ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
}
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
CallKind call_kind,
EDIState edi_state) {
bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
function->shared()->formal_parameter_count() == arity;
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
if (can_invoke_directly) {
if (edi_state == EDI_UNINITIALIZED) {
__ LoadHeapObject(edi, function);
}
// Change context.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Set eax to arguments count if adaption is not needed. Assumes that eax
// is available to write to at this point.
if (!function->NeedsArgumentsAdaption()) {
__ mov(eax, arity);
}
// Invoke function directly.
__ SetCallKind(ecx, call_kind);
if (*function == *info()->closure()) {
__ CallSelf();
} else {
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
__ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
}
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
CallKnownFunction(instr->function(),
instr->arity(),
instr,
CALL_AS_METHOD,
EDI_UNINITIALIZED);
}
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
Label done;
Register tmp = input_reg.is(eax) ? ecx : eax;
Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
__ j(not_zero, &negative);
__ jmp(&done);
__ bind(&negative);
Label allocated, slow;
__ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
__ jmp(&allocated);
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
__ and_(tmp2, ~HeapNumber::kSignMask);
__ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
}
void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ test(input_reg, Operand(input_reg));
Label is_positive;
__ j(not_sign, &is_positive);
__ neg(input_reg);
__ test(input_reg, Operand(input_reg));
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
LMathAbs* instr_;
};
ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
}
}
void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope(masm(), SSE4_1);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Deoptimize on negative zero.
Label non_zero;
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
__ j(not_equal, &non_zero, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
__ bind(&non_zero);
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr->environment());
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Check for negative zero.
Label positive_sign;
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
__ Set(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
}
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
__ cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr->environment());
__ bind(&done);
}
}
void LCodeGen::DoMathRound(LMathRound* instr) {
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = xmm0;
XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
ExternalReference minus_one_half =
ExternalReference::address_of_minus_one_half();
Label done, round_to_zero, below_one_half, do_not_compensate;
__ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
__ jmp(&done);
__ bind(&below_one_half);
__ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
__ movsd(input_temp, input_reg); // Do not alter input_reg.
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmp(output_reg, 0x80000000u);
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
__ cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
__ j(equal, &done);
__ sub(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
__ jmp(&done);
__ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
// we can ignore the difference between a result of -0 and +0.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// If the sign is positive, we return +0.
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
__ RecordComment("Minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
__ Set(output_reg, Immediate(0));
__ bind(&done);
}
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
// Math.sqrt(-Infinity) == NaN
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, single-precision
// -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
__ mov(scratch, 0xFF800000);
__ movd(xmm_scratch, scratch);
__ cvtss2sd(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &sqrt, Label::kNear);
__ j(carry, &sqrt, Label::kNear);
// If input is -Infinity, return Infinity.
__ xorps(input_reg, input_reg);
__ subsd(input_reg, xmm_scratch);
__ jmp(&done, Label::kNear);
// Square root.
__ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
__ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
ASSERT(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(xmm1));
ASSERT(!instr->right()->IsRegister() ||
ToRegister(instr->right()).is(eax));
ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(eax, &no_deopt);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
MathPowStub stub(MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
MathPowStub stub(MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
void LCodeGen::DoRandom(LRandom* instr) {
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LRandom* instr_;
};
DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
CpuFeatureScope scope(masm(), SSE2);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
ASSERT(ToRegister(instr->global_object()).is(eax));
// Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
__ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
// ebx: FixedArray of the native context's random seeds
// Load state[0].
__ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
// If state[0] == 0, call runtime to initialize seeds.
__ test(ecx, ecx);
__ j(zero, deferred->entry());
// Load state[1].
__ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
// ecx: state[0]
// eax: state[1]
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
__ movzx_w(edx, ecx);
__ imul(edx, edx, 18273);
__ shr(ecx, 16);
__ add(ecx, edx);
// Save state[0].
__ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
__ movzx_w(edx, eax);
__ imul(edx, edx, 36969);
__ shr(eax, 16);
__ add(eax, edx);
// Save state[1].
__ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
__ shl(ecx, 14);
__ and_(eax, Immediate(0x3FFFF));
__ add(eax, ecx);
__ bind(deferred->exit());
// Convert 32 random bits in eax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm2, ebx);
__ movd(xmm1, eax);
__ cvtss2sd(xmm2, xmm2);
__ xorps(xmm1, xmm2);
__ subsd(xmm1, xmm2);
}
void LCodeGen::DoDeferredRandom(LRandom* instr) {
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0), eax);
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Return value is in eax.
}
void LCodeGen::DoMathLog(LMathLog* instr) {
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
Label positive, done, zero;
__ xorps(xmm0, xmm0);
__ ucomisd(input_reg, xmm0);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
__ push(Immediate(0xFFF00000));
__ push(Immediate(0));
__ movdbl(input_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ sub(Operand(esp), Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), input_reg);
__ fld_d(Operand(esp, 0));
__ fyl2x();
__ fstp_d(Operand(esp, 0));
__ movdbl(input_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
}
void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
}
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(instr->HasPointerMap());
if (instr->known_function().is_null()) {
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
} else {
CallKnownFunction(instr->known_function(),
instr->arity(),
instr,
CALL_AS_METHOD,
EDI_CONTAINS_TARGET);
}
}
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(ecx, instr->name());
CallCode(ic, mode, instr);
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(ecx, instr->name());
CallCode(ic, mode, instr);
}
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
CallKnownFunction(instr->target(),
instr->arity(),
instr,
CALL_AS_FUNCTION,
EDI_UNINITIALIZED);
}
void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
if (FLAG_optimize_constructed_arrays) {
// No cell in ebx for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
isolate());
__ mov(ebx, Immediate(undefined_value));
}
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(eax, Immediate(instr->arity()));
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(FLAG_optimize_constructed_arrays);
__ mov(ebx, instr->hydrogen()->property_cell());
Handle<Code> array_construct_code =
isolate()->builtins()->ArrayConstructCode();
__ Set(eax, Immediate(instr->arity()));
CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
}
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
__ lea(result, Operand(base, instr->offset()));
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
int offset = instr->offset();
if (!instr->transition().is_null()) {
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
__ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
} else {
Register temp = ToRegister(instr->temp());
Register temp_map = ToRegister(instr->temp_map());
__ mov(temp_map, instr->transition());
__ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
// Update the write barrier for the map field.
__ RecordWriteField(object,
HeapObject::kMapOffset,
temp_map,
temp,
GetSaveFPRegsMode(),
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
}
// Do the store.
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ mov(FieldOperand(object, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
Register temp = ToRegister(instr->temp());
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
value,
temp,
GetSaveFPRegsMode(),
EMIT_REMEMBERED_SET,
check_needed);
}
} else {
Register temp = ToRegister(instr->temp());
__ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(FieldOperand(temp, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWriteField(temp,
offset,
value,
object,
GetSaveFPRegsMode(),
EMIT_REMEMBERED_SET,
check_needed);
}
}
}
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->hydrogen()->skip_check()) return;
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
if (instr->hydrogen()->length()->representation().IsTagged()) {
__ cmp(ToOperand(instr->length()),
Immediate(Smi::FromInt(constant_index)));
} else {
__ cmp(ToOperand(instr->length()), Immediate(constant_index));
}
DeoptimizeIf(below_equal, instr->environment());
} else {
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
DeoptimizeIf(above_equal, instr->environment());
}
}
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand() &&
ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
elements_kind)) {
__ SmiUntag(ToRegister(key));
}
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
instr->hydrogen()->key()->representation(),
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
} else {
__ fld(0);
__ fstp_s(operand);
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
__ fst_d(operand);
}
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
__ mov_b(operand, value);
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(operand, value);
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(operand, value);
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
ExternalReference canonical_nan_reference =
ExternalReference::address_of_canonical_non_hole_nan();
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
if (instr->NeedsCanonicalization()) {
Label have_value;
__ ucomisd(value, value);
__ j(parity_odd, &have_value); // NaN.
__ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
__ bind(&have_value);
}
__ movdbl(double_store_operand, value);
} else {
// Can't use SSE2 in the serializer
if (instr->hydrogen()->IsConstantHoleStore()) {
// This means we should store the (double) hole. No floating point
// registers required.
double nan_double = FixedDoubleArray::hole_nan_as_double();
uint64_t int_val = BitCast<uint64_t, double>(nan_double);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
__ mov(double_store_operand, Immediate(lower));
Operand double_store_operand2 = BuildFastArrayOperand(
instr->elements(),
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
instr->additional_index());
__ mov(double_store_operand2, Immediate(upper));
} else {
Label no_special_nan_handling;
ASSERT(x87_stack_depth_ > 0);
if (instr->NeedsCanonicalization()) {
__ fld(0);
__ fld(0);
__ FCmp();
__ j(parity_odd, &no_special_nan_handling);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
Immediate(kHoleNanUpper32));
__ add(esp, Immediate(kDoubleSize));
Label canonicalize;
__ j(not_equal, &canonicalize);
__ jmp(&no_special_nan_handling);
__ bind(&canonicalize);
__ fstp(0);
__ fld_d(Operand::StaticVariable(canonical_nan_reference));
}
__ bind(&no_special_nan_handling);
__ fst_d(double_store_operand);
}
}
}
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Operand operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
__ mov(operand, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, operand);
__ RecordWrite(elements,
key,
value,
GetSaveFPRegsMode(),
EMIT_REMEMBERED_SET,
check_needed);
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases...external, fast-double, fast
if (instr->is_external()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
} else {
DoStoreKeyedFixedArray(instr);
}
}
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
__ TestJSArrayForAllocationSiteInfo(object, temp);
DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
ElementsKind from_kind = instr->from_kind();
ElementsKind to_kind = instr->to_kind();
Label not_applicable;
bool is_simple_map_transition =
IsSimpleMapChangeTransition(from_kind, to_kind);
Label::Distance branch_distance =
is_simple_map_transition ? Label::kNear : Label::kFar;
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable, branch_distance);
if (is_simple_map_transition) {
Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> map = instr->hydrogen()->transitioned_map();
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
Immediate(map));
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
ToRegister(instr->temp()),
kDontSaveFPRegs);
} else if (FLAG_compiled_transitions) {
PushSafepointRegistersScope scope(this);
if (!object_reg.is(eax)) {
__ push(object_reg);
}
LoadContextFromDeferred(instr->context());
if (!object_reg.is(eax)) {
__ pop(eax);
}
__ mov(ebx, to_map);
TransitionElementsKindStub stub(from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
__ mov(new_map_reg, to_map);
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
__ mov(new_map_reg, to_map);
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
} else {
UNREACHABLE();
}
__ bind(&not_applicable);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
DeferredStringCharCodeAt* deferred =
new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
factory(),
ToRegister(instr->string()),
ToRegister(instr->index()),
ToRegister(instr->result()),
deferred->entry());
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Set(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (instr->index()->IsConstantOperand()) {
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
__ push(Immediate(Smi::FromInt(const_index)));
} else {
Register index = ToRegister(instr->index());
__ SmiTag(index);
__ push(index);
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
instr, instr->context());
__ AssertSmi(eax);
__ SmiUntag(eax);
__ StoreToSafepointRegisterSlot(result, eax);
}
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
class DeferredStringCharFromCode: public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
DeferredStringCharFromCode* deferred =
new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
Register result = ToRegister(instr->result());
ASSERT(!char_code.is(result));
__ cmp(char_code, String::kMaxOneByteCharCode);
__ j(above, deferred->entry());
__ Set(result, Immediate(factory()->single_character_string_cache()));
__ mov(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
__ cmp(result, factory()->undefined_value());
__ j(equal, deferred->entry());
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
Register char_code = ToRegister(instr->char_code());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Set(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
void LCodeGen::DoStringLength(LStringLength* instr) {
Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
__ mov(result, FieldOperand(string, String::kLengthOffset));
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
__ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
} else {
UNREACHABLE();
}
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
CpuFeatureScope scope(masm(), SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
LOperand* temp = instr->temp();
__ LoadUint32(ToDoubleRegister(output),
ToRegister(input),
ToDoubleRegister(temp));
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
}
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(reg);
__ j(overflow, deferred->entry());
__ bind(deferred->exit());
}
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
class DeferredNumberTagU: public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
}
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagU* instr_;
};
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
__ cmp(reg, Immediate(Smi::kMaxValue));
__ j(above, deferred->entry());
__ SmiTag(reg);
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
Label slow;
Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
__ SmiUntag(reg);
__ xor_(reg, 0x80000000);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
__ cvtsi2sd(xmm0, Operand(reg));
} else {
__ push(reg);
__ fild_s(Operand(esp, 0));
__ pop(reg);
}
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
__ LoadUint32(xmm0, reg, xmm1);
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
// int manually.
__ push(Immediate(0));
__ push(reg);
__ fild_d(Operand(esp, 0));
__ pop(reg);
__ pop(reg);
}
}
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, no_reg, &slow);
__ jmp(&done, Label::kNear);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
// They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
// Done. Put the value in xmm0 into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ StoreToSafepointRegisterSlot(reg, reg);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
class DeferredNumberTagD: public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
Register reg = ToRegister(instr->result());
bool convert_hole = false;
HValue* change_input = instr->hydrogen()->value();
if (change_input->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(change_input);
convert_hole = load->UsesMustHandleHole();
}
Label no_special_nan_handling;
Label done;
if (convert_hole) {
bool use_sse2 = CpuFeatures::IsSupported(SSE2);
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ ucomisd(input_reg, input_reg);
} else {
__ fld(0);
__ fld(0);
__ FCmp();
}
__ j(parity_odd, &no_special_nan_handling);
__ sub(esp, Immediate(kDoubleSize));
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(MemOperand(esp, 0), input_reg);
} else {
__ fld(0);
__ fstp_d(MemOperand(esp, 0));
}
__ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
Immediate(kHoleNanUpper32));
Label canonicalize;
__ j(not_equal, &canonicalize);
__ add(esp, Immediate(kDoubleSize));
__ mov(reg, factory()->the_hole_value());
if (!use_sse2) {
__ fstp(0);
}
__ jmp(&done);
__ bind(&canonicalize);
__ add(esp, Immediate(kDoubleSize));
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(input_reg, Operand::StaticVariable(nan));
} else {
__ fstp(0);
__ fld_d(Operand::StaticVariable(nan));
}
}
__ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
Register tmp = ToRegister(instr->temp());
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
__ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ bind(&done);
}
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
__ Set(reg, Immediate(0));
PushSafepointRegistersScope scope(this);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
// They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(reg, eax);
}
void LCodeGen::DoSmiTag(LSmiTag* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
__ SmiTag(ToRegister(input));
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(ToRegister(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
} else {
__ AssertSmi(ToRegister(input));
}
__ SmiUntag(ToRegister(input));
}
void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
Register temp_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (deoptimize_on_undefined) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number;
__ j(equal, &heap_number, Label::kNear);
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, env);
// Convert undefined to NaN.
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ fld_d(Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&heap_number);
}
// Heap number to x87 conversion.
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ fldz();
__ FCmp();
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
__ j(not_zero, &done, Label::kNear);
// Use general purpose registers to check if we have -0.0
__ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
__ test(temp_reg, Immediate(HeapNumber::kSignMask));
__ j(zero, &done, Label::kNear);
// Pop FPU stack before deoptimizing.
__ fstp(0);
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
} else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
__ test(input_reg, Immediate(kSmiTagMask));
DeoptimizeIf(not_equal, env);
} else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
__ test(input_reg, Immediate(kSmiTagMask));
__ j(zero, &load_smi);
ExternalReference hole_nan_reference =
ExternalReference::address_of_the_hole_nan();
__ fld_d(Operand::StaticVariable(hole_nan_reference));
__ jmp(&done, Label::kNear);
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
__ bind(&load_smi);
__ SmiUntag(input_reg); // Untag smi before converting to float.
__ push(input_reg);
__ fild_s(Operand(esp, 0));
__ pop(input_reg);
__ SmiTag(input_reg); // Retag smi.
__ bind(&done);
}
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (deoptimize_on_undefined) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number;
__ j(equal, &heap_number, Label::kNear);
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, env);
// Convert undefined to NaN.
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(result_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&heap_number);
}
// Heap number to XMM conversion.
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
XMMRegister xmm_scratch = xmm0;
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
__ test_b(temp_reg, 1);
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
} else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
__ test(input_reg, Immediate(kSmiTagMask));
DeoptimizeIf(not_equal, env);
} else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
__ test(input_reg, Immediate(kSmiTagMask));
__ j(zero, &load_smi);
ExternalReference hole_nan_reference =
ExternalReference::address_of_the_hole_nan();
__ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference));
__ jmp(&done, Label::kNear);
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
// Smi to XMM conversion
__ bind(&load_smi);
__ SmiUntag(input_reg); // Untag smi before converting to float.
__ cvtsi2sd(result_reg, Operand(input_reg));
__ SmiTag(input_reg); // Retag smi.
__ bind(&done);
}
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
Register input_reg = ToRegister(instr->value());
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (instr->truncating()) {
__ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ cmp(input_reg, factory()->undefined_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
__ bind(&heap_number);
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatureScope scope(masm(), SSE3);
Label convert;
// Use more powerful conversion when sse3 is available.
// Load x87 register with heap number.
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
// Get exponent alone and check for too-big exponent.
__ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
__ and_(input_reg, HeapNumber::kExponentMask);
const uint32_t kTooBigExponent =
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
__ cmp(Operand(input_reg), Immediate(kTooBigExponent));
__ j(less, &convert, Label::kNear);
// Pop FPU stack before deoptimizing.
__ fstp(0);
__ RecordComment("Deferred TaggedToI: exponent too big");
DeoptimizeIf(no_condition, instr->environment());
// Reserve space for 64 bit answer.
__ bind(&convert);
__ sub(Operand(esp), Immediate(kDoubleSize));
// Do conversion, which cannot fail because we checked the exponent.
__ fisttp_d(Operand(esp, 0));
__ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
__ add(Operand(esp), Immediate(kDoubleSize));
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ cmp(input_reg, 0x80000000u);
__ j(not_equal, &done);
// Check if the input was 0x8000000 (kMinInt).
// If no, then we got an overflow and we deoptimize.
ExternalReference min_int = ExternalReference::address_of_min_int();
__ movdbl(xmm_temp, Operand::StaticVariable(min_int));
__ ucomisd(xmm_temp, xmm0);
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
} else {
UNREACHABLE();
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
// Deoptimize if we don't have a heap number.
__ RecordComment("Deferred TaggedToI: not a heap number");
DeoptimizeIf(not_equal, instr->environment());
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ cvtsi2sd(xmm_temp, Operand(input_reg));
__ ucomisd(xmm0, xmm_temp);
__ RecordComment("Deferred TaggedToI: lost precision");
DeoptimizeIf(not_equal, instr->environment());
__ RecordComment("Deferred TaggedToI: NaN");
DeoptimizeIf(parity_even, instr->environment()); // NaN.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, &done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
__ RecordComment("Deferred TaggedToI: minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
} else {
UNREACHABLE();
}
__ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiUntag(input_reg);
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
Label done, heap_number;
Register result_reg = ToRegister(instr->result());
Register input_reg = ToRegister(instr->value());
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ cmp(input_reg, factory()->undefined_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
__ xor_(result_reg, result_reg);
__ jmp(&done, Label::kFar);
__ bind(&heap_number);
// Surprisingly, all of this crazy bit manipulation is considerably
// faster than using the built-in x86 CPU conversion functions (about 6x).
Label right_exponent, adjust_bias, zero_result;
Register scratch = ToRegister(instr->scratch());
Register scratch2 = ToRegister(instr->scratch2());
// Get exponent word.
__ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
__ shr(scratch2, HeapNumber::kExponentShift);
if (instr->truncating()) {
__ j(zero, &zero_result);
} else {
__ j(not_zero, &adjust_bias);
__ test(scratch, Immediate(HeapNumber::kMantissaMask));
DeoptimizeIf(not_zero, instr->environment());
__ cmp(FieldOperand(input_reg, HeapNumber::kMantissaOffset), Immediate(0));
DeoptimizeIf(not_equal, instr->environment());
__ bind(&adjust_bias);
}
__ sub(scratch2, Immediate(HeapNumber::kExponentBias));
if (!instr->truncating()) {
DeoptimizeIf(negative, instr->environment());
} else {
__ j(negative, &zero_result);
}
// Get the second half of the double. For some exponents we don't
// actually need this because the bits get shifted out again, but
// it's probably slower to test than just to do it.
Register scratch3 = ToRegister(instr->scratch3());
__ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
__ xor_(result_reg, result_reg);
const uint32_t non_int32_exponent = 31;
__ cmp(scratch2, Immediate(non_int32_exponent));
// If we have a match of the int32 exponent then skip some logic.
__ j(equal, &right_exponent, Label::kNear);
// If the number doesn't find in an int32, deopt.
DeoptimizeIf(greater, instr->environment());
// Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
// < 31.
__ mov(result_reg, Immediate(31));
__ sub(result_reg, scratch2);
__ bind(&right_exponent);
// Save off exponent for negative check later.
__ mov(scratch2, scratch);
// Here result_reg is the shift, scratch is the exponent word.
// Get the top bits of the mantissa.
__ and_(scratch, HeapNumber::kMantissaMask);
// Put back the implicit 1.
__ or_(scratch, 1 << HeapNumber::kExponentShift);
// Shift up the mantissa bits to take up the space the exponent used to
// take. We have kExponentShift + 1 significant bits int he low end of the
// word. Shift them to the top bits.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
__ shl(scratch, shift_distance);
if (!instr->truncating()) {
// If not truncating, a non-zero value in the bottom 22 bits means a
// non-integral value --> trigger a deopt.
__ test(scratch3, Immediate((1 << (32 - shift_distance)) - 1));
DeoptimizeIf(not_equal, instr->environment());
}
// Shift down 22 bits to get the most significant 10 bits or the low
// mantissa word.
__ shr(scratch3, 32 - shift_distance);
__ or_(scratch3, scratch);
if (!instr->truncating()) {
// If truncating, a non-zero value in the bits that will be shifted away
// when adjusting the exponent means rounding --> deopt.
__ mov(scratch, 0x1);
ASSERT(result_reg.is(ecx));
__ shl_cl(scratch);
__ dec(scratch);
__ test(scratch3, scratch);
DeoptimizeIf(not_equal, instr->environment());
}
// Move down according to the exponent.
ASSERT(result_reg.is(ecx));
__ shr_cl(scratch3);
// Now the unsigned 32-bit answer is in scratch3. We need to move it to
// result_reg and we may need to fix the sign.
Label negative_result;
__ xor_(result_reg, result_reg);
__ cmp(scratch2, result_reg);
__ j(less, &negative_result, Label::kNear);
__ cmp(scratch3, result_reg);
__ mov(result_reg, scratch3);
// If the result is > MAX_INT, result doesn't fit in signed 32-bit --> deopt.
DeoptimizeIf(less, instr->environment());
__ jmp(&done, Label::kNear);
__ bind(&zero_result);
__ xor_(result_reg, result_reg);
__ jmp(&done, Label::kNear);
__ bind(&negative_result);
__ sub(result_reg, scratch3);
if (!instr->truncating()) {
// -0.0 triggers a deopt.
DeoptimizeIf(zero, instr->environment());
}
// If the negative subtraction overflows into a positive number, there was an
// overflow --> deopt.
DeoptimizeIf(positive, instr->environment());
__ bind(&done);
}
void LCodeGen::DoTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
class DeferredTaggedToINoSSE2: public LDeferredCode {
public:
DeferredTaggedToINoSSE2(LCodeGen* codegen, LTaggedToINoSSE2* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToINoSSE2(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LTaggedToINoSSE2* instr_;
};
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
DeferredTaggedToINoSSE2* deferred =
new(zone()) DeferredTaggedToINoSSE2(this, instr);
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiUntag(input_reg); // Untag smi.
__ bind(deferred->exit());
}
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* temp = instr->temp();
ASSERT(temp == NULL || temp->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
bool deoptimize_on_minus_zero =
instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
if (value->type().IsSmi()) {
if (value->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(value);
if (load->UsesMustHandleHole()) {
if (load->hole_mode() == ALLOW_RETURN_HOLE) {
mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
} else {
mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
}
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
}
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result_reg = ToDoubleRegister(result);
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
} else {
EmitNumberUntagDNoSSE2(input_reg,
temp_reg,
instr->hydrogen()->deoptimize_on_undefined(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
CurrentInstructionReturnsX87Result();
}
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
if (instr->truncating()) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
__ cvttsd2si(result_reg, Operand(input_reg));
__ cmp(result_reg, 0x80000000u);
if (CpuFeatures::IsSupported(SSE3)) {
// This will deoptimize if the exponent of the input in out of range.
CpuFeatureScope scope(masm(), SSE3);
Label convert, done;
__ j(not_equal, &done, Label::kNear);
__ sub(Operand(esp), Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), input_reg);
// Get exponent alone and check for too-big exponent.
__ mov(result_reg, Operand(esp, sizeof(int32_t)));
__ and_(result_reg, HeapNumber::kExponentMask);
const uint32_t kTooBigExponent =
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
__ cmp(Operand(result_reg), Immediate(kTooBigExponent));
__ j(less, &convert, Label::kNear);
__ add(Operand(esp), Immediate(kDoubleSize));
DeoptimizeIf(no_condition, instr->environment());
__ bind(&convert);
// Do conversion, which cannot fail because we checked the exponent.
__ fld_d(Operand(esp, 0));
__ fisttp_d(Operand(esp, 0));
__ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
} else {
Label done;
Register temp_reg = ToRegister(instr->temp());
XMMRegister xmm_scratch = xmm0;
// If cvttsd2si succeeded, we're done. Otherwise, we attempt
// manual conversion.
__ j(not_equal, &done, Label::kNear);
// Get high 32 bits of the input in result_reg and temp_reg.
__ pshufd(xmm_scratch, input_reg, 1);
__ movd(Operand(temp_reg), xmm_scratch);
__ mov(result_reg, temp_reg);
// Prepare negation mask in temp_reg.
__ sar(temp_reg, kBitsPerInt - 1);
// Extract the exponent from result_reg and subtract adjusted
// bias from it. The adjustment is selected in a way such that
// when the difference is zero, the answer is in the low 32 bits
// of the input, otherwise a shift has to be performed.
__ shr(result_reg, HeapNumber::kExponentShift);
__ and_(result_reg,
HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
__ sub(Operand(result_reg),
Immediate(HeapNumber::kExponentBias +
HeapNumber::kExponentBits +
HeapNumber::kMantissaBits));
// Don't handle big (> kMantissaBits + kExponentBits == 63) or
// special exponents.
DeoptimizeIf(greater, instr->environment());
// Zero out the sign and the exponent in the input (by shifting
// it to the left) and restore the implicit mantissa bit,
// i.e. convert the input to unsigned int64 shifted left by
// kExponentBits.
ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
// Minus zero has the most significant bit set and the other
// bits cleared.
__ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
__ psllq(input_reg, HeapNumber::kExponentBits);
__ por(input_reg, xmm_scratch);
// Get the amount to shift the input right in xmm_scratch.
__ neg(result_reg);
__ movd(xmm_scratch, Operand(result_reg));
// Shift the input right and extract low 32 bits.
__ psrlq(input_reg, xmm_scratch);
__ movd(Operand(result_reg), input_reg);
// Use the prepared mask in temp_reg to negate the result if necessary.
__ xor_(result_reg, Operand(temp_reg));
__ sub(result_reg, Operand(temp_reg));
__ bind(&done);
}
} else {
Label done;
__ cvttsd2si(result_reg, Operand(input_reg));
__ cvtsi2sd(xmm0, Operand(result_reg));
__ ucomisd(xmm0, input_reg);
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// The integer converted back is equal to the original. We
// only have to test if we got -0 as an input.
__ test(result_reg, Operand(result_reg));
__ j(not_zero, &done, Label::kNear);
__ movmskpd(result_reg, input_reg);
// Bit 0 contains the sign of the double in input_reg.
// If input was positive, we are ok and return 0, otherwise
// deoptimize.
__ and_(result_reg, 1);
DeoptimizeIf(not_zero, instr->environment());
}
__ bind(&done);
}
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
if (instr->hydrogen()->is_interval_check()) {
InstanceType first;
InstanceType last;
instr->hydrogen()->GetCheckInterval(&first, &last);
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(first));
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(not_equal, instr->environment());
} else {
DeoptimizeIf(below, instr->environment());
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
DeoptimizeIf(above, instr->environment());
}
}
} else {
uint8_t mask;
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
if (IsPowerOf2(mask)) {
ASSERT(tag == 0 || IsPowerOf2(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
DeoptimizeIf(not_equal, instr->environment());
}
}
}
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Handle<JSFunction> target = instr->hydrogen()->target();
if (instr->hydrogen()->target_in_new_space()) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(target);
__ cmp(reg, Operand::Cell(cell));
} else {
Operand operand = ToOperand(instr->value());
__ cmp(operand, target);
}
DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
LInstruction* instr) {
Label success;
__ CompareMap(reg, map, &success, mode);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampDoubleToUint8(value_reg, xmm0, result_reg);
}
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register value_reg = ToRegister(instr->result());
__ ClampUint8(value_reg);
}
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
// Check for heap number
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
// Heap number
__ bind(&heap_number);
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(xmm0, xmm1, input_reg);
__ jmp(&done, Label::kNear);
// smi
__ bind(&is_smi);
__ SmiUntag(input_reg);
__ ClampUint8(input_reg);
__ bind(&done);
}
void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
Register scratch = ToRegister(instr->scratch());
Register scratch2 = ToRegister(instr->scratch2());
Register scratch3 = ToRegister(instr->scratch3());
Label is_smi, done, heap_number, valid_exponent,
largest_value, zero_result, maybe_nan_or_infinity;
__ JumpIfSmi(input_reg, &is_smi);
// Check for heap number
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(equal, &heap_number, Label::kFar);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
__ jmp(&zero_result);
// Heap number
__ bind(&heap_number);
// Surprisingly, all of the hand-crafted bit-manipulations below are much
// faster than the x86 FPU built-in instruction, especially since "banker's
// rounding" would be additionally very expensive
// Get exponent word.
__ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
__ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
// Test for negative values --> clamp to zero
__ test(scratch, scratch);
__ j(negative, &zero_result);
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
__ shr(scratch2, HeapNumber::kExponentShift);
__ j(zero, &zero_result);
__ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
__ j(negative, &zero_result);
const uint32_t non_int8_exponent = 7;
__ cmp(scratch2, Immediate(non_int8_exponent + 1));
// If the exponent is too big, check for special values.
__ j(greater, &maybe_nan_or_infinity, Label::kNear);
__ bind(&valid_exponent);
// Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
// < 7. The shift bias is the number of bits to shift the mantissa such that
// with an exponent of 7 such the that top-most one is in bit 30, allowing
// detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
// 1).
int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
__ lea(result_reg, MemOperand(scratch2, shift_bias));
// Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
// top bits of the mantissa.
__ and_(scratch, HeapNumber::kMantissaMask);
// Put back the implicit 1 of the mantissa
__ or_(scratch, 1 << HeapNumber::kExponentShift);
// Shift up to round
__ shl_cl(scratch);
// Use "banker's rounding" to spec: If fractional part of number is 0.5, then
// use the bit in the "ones" place and add it to the "halves" place, which has
// the effect of rounding to even.
__ mov(scratch2, scratch);
const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
const uint32_t one_bit_shift = one_half_bit_shift + 1;
__ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
__ cmp(scratch2, Immediate(1 << one_half_bit_shift));
Label no_round;
__ j(less, &no_round);
Label round_up;
__ mov(scratch2, Immediate(1 << one_half_bit_shift));
__ j(greater, &round_up);
__ test(scratch3, scratch3);
__ j(not_zero, &round_up);
__ mov(scratch2, scratch);
__ and_(scratch2, Immediate(1 << one_bit_shift));
__ shr(scratch2, 1);
__ bind(&round_up);
__ add(scratch, scratch2);
__ j(overflow, &largest_value);
__ bind(&no_round);
__ shr(scratch, 23);
__ mov(result_reg, scratch);
__ jmp(&done, Label::kNear);
__ bind(&maybe_nan_or_infinity);
// Check for NaN/Infinity, all other values map to 255
__ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
__ j(not_equal, &largest_value, Label::kNear);
// Check for NaN, which differs from Infinity in that at least one mantissa
// bit is set.
__ and_(scratch, HeapNumber::kMantissaMask);
__ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
__ j(not_zero, &zero_result); // M!=0 --> NaN
// Infinity -> Fall through to map to 255.
__ bind(&largest_value);
__ mov(result_reg, Immediate(255));
__ jmp(&done, Label::kNear);
__ bind(&zero_result);
__ xor_(result_reg, result_reg);
__ jmp(&done);
// smi
__ bind(&is_smi);
if (!input_reg.is(result_reg)) {
__ mov(result_reg, input_reg);
}
__ SmiUntag(result_reg);
__ ClampUint8(result_reg);
__ bind(&done);
}
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->temp());
ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
ZoneList<Handle<Map> >* maps = instr->maps();
ASSERT(prototypes->length() == maps->length());
if (instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < maps->length(); i++) {
prototype_maps_.Add(maps->at(i), info()->zone());
}
} else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
}
}
}
void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
class DeferredAllocateObject: public LDeferredCode {
public:
DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LAllocateObject* instr_;
};
DeferredAllocateObject* deferred =
new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
ASSERT(initial_map->pre_allocated_property_fields() +
initial_map->unused_property_fields() -
initial_map->inobject_properties() == 0);
// Allocate memory for the object. The initial map might change when
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
__ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
TAG_OBJECT);
__ bind(deferred->exit());
if (FLAG_debug_code) {
Label is_in_new_space;
__ JumpIfInNewSpace(result, scratch, &is_in_new_space);
__ Abort("Allocated object is not in new-space");
__ bind(&is_in_new_space);
}
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(scratch, constructor);
__ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) {
__ AssertNotSmi(map);
__ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
instance_size >> kPointerSizeLog2);
__ Assert(equal, "Unexpected instance size");
__ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
initial_map->pre_allocated_property_fields());
__ Assert(equal, "Unexpected pre-allocated property fields count");
__ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
initial_map->unused_property_fields());
__ Assert(equal, "Unexpected unused property fields count");
__ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
initial_map->inobject_properties());
__ Assert(equal, "Unexpected in-object property fields count");
}
// Initialize map and fields of the newly allocated object.
ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
__ mov(FieldOperand(result, JSObject::kMapOffset), map);
__ mov(scratch, factory()->empty_fixed_array());
__ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
__ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
if (initial_map->inobject_properties() != 0) {
__ mov(scratch, factory()->undefined_value());
for (int i = 0; i < initial_map->inobject_properties(); i++) {
int property_offset = JSObject::kHeaderSize + i * kPointerSize;
__ mov(FieldOperand(result, property_offset), scratch);
}
}
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Set(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ push(Immediate(Smi::FromInt(instance_size)));
CallRuntimeFromDeferred(
Runtime::kAllocateInNewSpace, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LAllocate* instr_;
};
DeferredAllocate* deferred =
new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
// Allocate memory for the object.
AllocationFlags flags = TAG_OBJECT;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
__ SmiTag(size);
PushSafepointRegistersScope scope(this);
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
if (!size.is(result)) {
__ StoreToSafepointRegisterSlot(result, size);
}
__ push(size);
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
} else {
CallRuntimeFromDeferred(
Runtime::kAllocateInNewSpace, 1, instr, instr->context());
}
__ StoreToSafepointRegisterSlot(result, eax);
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
AllocationSiteMode allocation_site_mode =
instr->hydrogen()->allocation_site_mode();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,
// but the following masking takes care of that anyway.
__ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(ebx, Map::kElementsKindMask);
__ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
DeoptimizeIf(not_equal, instr->environment());
}
// Set up the parameters to the stub/runtime call and pick the right
// runtime function or stub to call. Boilerplate already exists,
// constant elements are never accessed, pass an empty fixed array.
int length = instr->hydrogen()->length();
if (instr->hydrogen()->IsCopyOnWrite()) {
ASSERT(instr->hydrogen()->depth() == 1);
__ LoadHeapObject(eax, literals);
__ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(ecx, Immediate(isolate()->factory()->empty_fixed_array()));
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
__ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(isolate()->factory()->empty_fixed_array()));
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(isolate()->factory()->empty_fixed_array()));
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
__ LoadHeapObject(eax, literals);
__ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(ecx, Immediate(isolate()->factory()->empty_fixed_array()));
FastCloneShallowArrayStub::Mode mode =
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
flags |= instr->hydrogen()->has_function()
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
// Set up the parameters to the stub/runtime call and pick the right
// runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
__ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
} else {
__ LoadHeapObject(eax, literals);
__ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
FastCloneShallowObjectStub stub(properties_count);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ push(eax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Label materialized;
// Registers will be used as follows:
// ecx = literals array.
// ebx = regexp literal.
// eax = regexp literal clone.
// esi = context.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
__ LoadHeapObject(ecx, instr->hydrogen()->literals());
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, factory()->undefined_value());
__ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in eax.
__ push(ecx);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(ebx, eax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ pop(ebx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
__ mov(edx, FieldOperand(ebx, i));
__ mov(ecx, FieldOperand(ebx, i + kPointerSize));
__ mov(FieldOperand(eax, i), edx);
__ mov(FieldOperand(eax, i + kPointerSize), ecx);
}
if ((size % (2 * kPointerSize)) != 0) {
__ mov(edx, FieldOperand(ebx, size - kPointerSize));
__ mov(FieldOperand(eax, size - kPointerSize), edx);
}
}
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
FastNewClosureStub stub(shared_info->language_mode(),
shared_info->is_generator());
__ push(Immediate(shared_info));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(esi);
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
? factory()->true_value()
: factory()->false_value()));
CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
void LCodeGen::DoTypeof(LTypeof* instr) {
LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition final_branch_condition =
EmitTypeofIs(true_label, false_label, input, instr->type_literal());
if (final_branch_condition != no_condition) {
EmitBranch(true_block, false_block, final_branch_condition);
}
}
Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
factory()->heap_number_map());
final_branch_condition = equal;
} else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label);
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = zero;
} else if (type_name->Equals(heap()->symbol_string())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->boolean_string())) {
__ cmp(input, factory()->true_value());
__ j(equal, true_label);
__ cmp(input, factory()->false_value());
final_branch_condition = equal;
} else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ cmp(input, factory()->null_value());
final_branch_condition = equal;
} else if (type_name->Equals(heap()->undefined_string())) {
__ cmp(input, factory()->undefined_value());
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
__ j(equal, true_label);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ cmp(input, factory()->null_value());
__ j(equal, true_label);
}
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
__ j(below, false_label);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(above, false_label);
// Check for undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = zero;
} else {
__ jmp(false_label);
}
return final_branch_condition;
}
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp);
EmitBranch(true_block, false_block, equal);
}
void LCodeGen::EmitIsConstructCall(Register temp) {
// Get the frame pointer for the calling frame.
__ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &check_frame_marker, Label::kNear);
__ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
}
void LCodeGen::EnsureSpaceForLazyDeopt() {
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
__ Nop(padding_size);
}
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
EnsureSpaceForLazyDeopt();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
DeoptimizeIf(no_condition, instr->environment());
}
void LCodeGen::DoDummyUse(LDummyUse* instr) {
// Nothing to see here, move on!
}
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
__ push(ToOperand(obj));
EmitPushTaggedOperand(key);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
// Create safepoint generator that will also ensure enough space in the
// reloc info for patching in deoptimization (since this is invoking a
// builtin)
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
class DeferredStackCheck: public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
// There is no LLazyBailout instruction for stack-checks. We have to
// prepare for lazy deoptimization explicitly here.
if (instr->hydrogen()->is_function_entry()) {
// Perform stack overflow check.
Label done;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &done, Label::kNear);
ASSERT(instr->context()->IsRegister());
ASSERT(ToRegister(instr->context()).is(esi));
StackCheckStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
new(zone()) DeferredStackCheck(this, instr);
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(below, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
// Don't record a deoptimization index for the safepoint here.
// This will be done explicitly when emitting call and the safepoint in
// the deferred code.
}
}
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// This is a pseudo-instruction that ensures that the environment here is
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(osr_pc_offset_ == -1);
osr_pc_offset_ = masm()->pc_offset();
}
void LCodeGen::DoIn(LIn* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
EmitPushTaggedOperand(key);
EmitPushTaggedOperand(obj);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(eax, isolate()->factory()->undefined_value());
DeoptimizeIf(equal, instr->environment());
__ cmp(eax, isolate()->factory()->null_value());
DeoptimizeIf(equal, instr->environment());
__ test(eax, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
DeoptimizeIf(below_equal, instr->environment());
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
DeoptimizeIf(not_equal, instr->environment());
__ bind(&use_cache);
}
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
__ cmp(result, Immediate(Smi::FromInt(0)));
__ j(not_equal, &load_cache);
__ mov(result, isolate()->factory()->empty_fixed_array());
__ jmp(&done);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ mov(result,
FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ mov(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Label out_of_object, done;
__ cmp(index, Immediate(0));
__ j(less, &out_of_object);
__ mov(object, FieldOperand(object,
index,
times_half_pointer_size,
JSObject::kHeaderSize));
__ jmp(&done, Label::kNear);
__ bind(&out_of_object);
__ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
__ neg(index);
// Index is now equal to out of object property index plus 1.
__ mov(object, FieldOperand(object,
index,
times_half_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
__ bind(&done);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32