diff --git a/BUILD.gn b/BUILD.gn index 9e7bec746d..6f1cee7091 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1455,63 +1455,6 @@ v8_source_set("v8_base") { "src/counters-inl.h", "src/counters.cc", "src/counters.h", - "src/crankshaft/compilation-phase.cc", - "src/crankshaft/compilation-phase.h", - "src/crankshaft/hydrogen-alias-analysis.h", - "src/crankshaft/hydrogen-bce.cc", - "src/crankshaft/hydrogen-bce.h", - "src/crankshaft/hydrogen-canonicalize.cc", - "src/crankshaft/hydrogen-canonicalize.h", - "src/crankshaft/hydrogen-check-elimination.cc", - "src/crankshaft/hydrogen-check-elimination.h", - "src/crankshaft/hydrogen-dce.cc", - "src/crankshaft/hydrogen-dce.h", - "src/crankshaft/hydrogen-dehoist.cc", - "src/crankshaft/hydrogen-dehoist.h", - "src/crankshaft/hydrogen-environment-liveness.cc", - "src/crankshaft/hydrogen-environment-liveness.h", - "src/crankshaft/hydrogen-escape-analysis.cc", - "src/crankshaft/hydrogen-escape-analysis.h", - "src/crankshaft/hydrogen-flow-engine.h", - "src/crankshaft/hydrogen-gvn.cc", - "src/crankshaft/hydrogen-gvn.h", - "src/crankshaft/hydrogen-infer-representation.cc", - "src/crankshaft/hydrogen-infer-representation.h", - "src/crankshaft/hydrogen-infer-types.cc", - "src/crankshaft/hydrogen-infer-types.h", - "src/crankshaft/hydrogen-instructions.cc", - "src/crankshaft/hydrogen-instructions.h", - "src/crankshaft/hydrogen-load-elimination.cc", - "src/crankshaft/hydrogen-load-elimination.h", - "src/crankshaft/hydrogen-mark-unreachable.cc", - "src/crankshaft/hydrogen-mark-unreachable.h", - "src/crankshaft/hydrogen-range-analysis.cc", - "src/crankshaft/hydrogen-range-analysis.h", - "src/crankshaft/hydrogen-redundant-phi.cc", - "src/crankshaft/hydrogen-redundant-phi.h", - "src/crankshaft/hydrogen-removable-simulates.cc", - "src/crankshaft/hydrogen-removable-simulates.h", - "src/crankshaft/hydrogen-representation-changes.cc", - "src/crankshaft/hydrogen-representation-changes.h", - "src/crankshaft/hydrogen-sce.cc", - "src/crankshaft/hydrogen-sce.h", - "src/crankshaft/hydrogen-store-elimination.cc", - "src/crankshaft/hydrogen-store-elimination.h", - "src/crankshaft/hydrogen-types.cc", - "src/crankshaft/hydrogen-types.h", - "src/crankshaft/hydrogen-uint32-analysis.cc", - "src/crankshaft/hydrogen-uint32-analysis.h", - "src/crankshaft/hydrogen.cc", - "src/crankshaft/hydrogen.h", - "src/crankshaft/lithium-allocator-inl.h", - "src/crankshaft/lithium-allocator.cc", - "src/crankshaft/lithium-allocator.h", - "src/crankshaft/lithium-codegen.cc", - "src/crankshaft/lithium-codegen.h", - "src/crankshaft/lithium-inl.h", - "src/crankshaft/lithium.cc", - "src/crankshaft/lithium.h", - "src/crankshaft/unique.h", "src/date.cc", "src/date.h", "src/dateparser-inl.h", @@ -2066,12 +2009,6 @@ v8_source_set("v8_base") { "src/compiler/ia32/instruction-codes-ia32.h", "src/compiler/ia32/instruction-scheduler-ia32.cc", "src/compiler/ia32/instruction-selector-ia32.cc", - "src/crankshaft/ia32/lithium-codegen-ia32.cc", - "src/crankshaft/ia32/lithium-codegen-ia32.h", - "src/crankshaft/ia32/lithium-gap-resolver-ia32.cc", - "src/crankshaft/ia32/lithium-gap-resolver-ia32.h", - "src/crankshaft/ia32/lithium-ia32.cc", - "src/crankshaft/ia32/lithium-ia32.h", "src/debug/ia32/debug-ia32.cc", "src/full-codegen/ia32/full-codegen-ia32.cc", "src/ia32/assembler-ia32-inl.h", @@ -2106,12 +2043,6 @@ v8_source_set("v8_base") { "src/compiler/x64/instruction-selector-x64.cc", "src/compiler/x64/unwinding-info-writer-x64.cc", "src/compiler/x64/unwinding-info-writer-x64.h", - "src/crankshaft/x64/lithium-codegen-x64.cc", - "src/crankshaft/x64/lithium-codegen-x64.h", - "src/crankshaft/x64/lithium-gap-resolver-x64.cc", - "src/crankshaft/x64/lithium-gap-resolver-x64.h", - "src/crankshaft/x64/lithium-x64.cc", - "src/crankshaft/x64/lithium-x64.h", "src/debug/x64/debug-x64.cc", "src/full-codegen/x64/full-codegen-x64.cc", "src/ic/x64/access-compiler-x64.cc", @@ -2172,12 +2103,6 @@ v8_source_set("v8_base") { "src/compiler/arm/instruction-selector-arm.cc", "src/compiler/arm/unwinding-info-writer-arm.cc", "src/compiler/arm/unwinding-info-writer-arm.h", - "src/crankshaft/arm/lithium-arm.cc", - "src/crankshaft/arm/lithium-arm.h", - "src/crankshaft/arm/lithium-codegen-arm.cc", - "src/crankshaft/arm/lithium-codegen-arm.h", - "src/crankshaft/arm/lithium-gap-resolver-arm.cc", - "src/crankshaft/arm/lithium-gap-resolver-arm.h", "src/debug/arm/debug-arm.cc", "src/full-codegen/arm/full-codegen-arm.cc", "src/ic/arm/access-compiler-arm.cc", @@ -2226,15 +2151,6 @@ v8_source_set("v8_base") { "src/compiler/arm64/instruction-selector-arm64.cc", "src/compiler/arm64/unwinding-info-writer-arm64.cc", "src/compiler/arm64/unwinding-info-writer-arm64.h", - "src/crankshaft/arm64/delayed-masm-arm64-inl.h", - "src/crankshaft/arm64/delayed-masm-arm64.cc", - "src/crankshaft/arm64/delayed-masm-arm64.h", - "src/crankshaft/arm64/lithium-arm64.cc", - "src/crankshaft/arm64/lithium-arm64.h", - "src/crankshaft/arm64/lithium-codegen-arm64.cc", - "src/crankshaft/arm64/lithium-codegen-arm64.h", - "src/crankshaft/arm64/lithium-gap-resolver-arm64.cc", - "src/crankshaft/arm64/lithium-gap-resolver-arm64.h", "src/debug/arm64/debug-arm64.cc", "src/full-codegen/arm64/full-codegen-arm64.cc", "src/ic/arm64/access-compiler-arm64.cc", @@ -2249,12 +2165,6 @@ v8_source_set("v8_base") { "src/compiler/mips/instruction-codes-mips.h", "src/compiler/mips/instruction-scheduler-mips.cc", "src/compiler/mips/instruction-selector-mips.cc", - "src/crankshaft/mips/lithium-codegen-mips.cc", - "src/crankshaft/mips/lithium-codegen-mips.h", - "src/crankshaft/mips/lithium-gap-resolver-mips.cc", - "src/crankshaft/mips/lithium-gap-resolver-mips.h", - "src/crankshaft/mips/lithium-mips.cc", - "src/crankshaft/mips/lithium-mips.h", "src/debug/mips/debug-mips.cc", "src/full-codegen/mips/full-codegen-mips.cc", "src/ic/mips/access-compiler-mips.cc", @@ -2288,12 +2198,6 @@ v8_source_set("v8_base") { "src/compiler/mips64/instruction-codes-mips64.h", "src/compiler/mips64/instruction-scheduler-mips64.cc", "src/compiler/mips64/instruction-selector-mips64.cc", - "src/crankshaft/mips64/lithium-codegen-mips64.cc", - "src/crankshaft/mips64/lithium-codegen-mips64.h", - "src/crankshaft/mips64/lithium-gap-resolver-mips64.cc", - "src/crankshaft/mips64/lithium-gap-resolver-mips64.h", - "src/crankshaft/mips64/lithium-mips64.cc", - "src/crankshaft/mips64/lithium-mips64.h", "src/debug/mips64/debug-mips64.cc", "src/full-codegen/mips64/full-codegen-mips64.cc", "src/ic/mips64/access-compiler-mips64.cc", @@ -2327,12 +2231,6 @@ v8_source_set("v8_base") { "src/compiler/ppc/instruction-codes-ppc.h", "src/compiler/ppc/instruction-scheduler-ppc.cc", "src/compiler/ppc/instruction-selector-ppc.cc", - "src/crankshaft/ppc/lithium-codegen-ppc.cc", - "src/crankshaft/ppc/lithium-codegen-ppc.h", - "src/crankshaft/ppc/lithium-gap-resolver-ppc.cc", - "src/crankshaft/ppc/lithium-gap-resolver-ppc.h", - "src/crankshaft/ppc/lithium-ppc.cc", - "src/crankshaft/ppc/lithium-ppc.h", "src/debug/ppc/debug-ppc.cc", "src/full-codegen/ppc/full-codegen-ppc.cc", "src/ic/ppc/access-compiler-ppc.cc", @@ -2366,12 +2264,6 @@ v8_source_set("v8_base") { "src/compiler/s390/instruction-codes-s390.h", "src/compiler/s390/instruction-scheduler-s390.cc", "src/compiler/s390/instruction-selector-s390.cc", - "src/crankshaft/s390/lithium-codegen-s390.cc", - "src/crankshaft/s390/lithium-codegen-s390.h", - "src/crankshaft/s390/lithium-gap-resolver-s390.cc", - "src/crankshaft/s390/lithium-gap-resolver-s390.h", - "src/crankshaft/s390/lithium-s390.cc", - "src/crankshaft/s390/lithium-s390.h", "src/debug/s390/debug-s390.cc", "src/full-codegen/s390/full-codegen-s390.cc", "src/ic/s390/access-compiler-s390.cc", @@ -2405,12 +2297,6 @@ v8_source_set("v8_base") { "src/compiler/x87/instruction-codes-x87.h", "src/compiler/x87/instruction-scheduler-x87.cc", "src/compiler/x87/instruction-selector-x87.cc", - "src/crankshaft/x87/lithium-codegen-x87.cc", - "src/crankshaft/x87/lithium-codegen-x87.h", - "src/crankshaft/x87/lithium-gap-resolver-x87.cc", - "src/crankshaft/x87/lithium-gap-resolver-x87.h", - "src/crankshaft/x87/lithium-x87.cc", - "src/crankshaft/x87/lithium-x87.h", "src/debug/x87/debug-x87.cc", "src/full-codegen/x87/full-codegen-x87.cc", "src/ic/x87/access-compiler-x87.cc", diff --git a/src/compiler.cc b/src/compiler.cc index 684c54b97c..b2cc7170e4 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -16,10 +16,10 @@ #include "src/bootstrapper.h" #include "src/codegen.h" #include "src/compilation-cache.h" +#include "src/compilation-info.h" #include "src/compiler-dispatcher/compiler-dispatcher.h" #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h" #include "src/compiler/pipeline.h" -#include "src/crankshaft/hydrogen.h" #include "src/debug/debug.h" #include "src/debug/liveedit.h" #include "src/frames-inl.h" @@ -31,6 +31,7 @@ #include "src/log-inl.h" #include "src/messages.h" #include "src/objects/map.h" +#include "src/parsing/parse-info.h" #include "src/parsing/parsing.h" #include "src/parsing/rewriter.h" #include "src/parsing/scanner-character-streams.h" @@ -203,11 +204,6 @@ void CompilationJob::RecordOptimizedCompilationStats() const { PrintF("Compiled: %d functions with %d byte source size in %fms.\n", compiled_functions, code_size, compilation_time); } - if (FLAG_hydrogen_stats) { - isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_, - time_taken_to_execute_, - time_taken_to_finalize_); - } } Isolate* CompilationJob::isolate() const { return info()->isolate(); } diff --git a/src/crankshaft/OWNERS b/src/crankshaft/OWNERS deleted file mode 100644 index 9f73266685..0000000000 --- a/src/crankshaft/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -set noparent - -bmeurer@chromium.org -danno@chromium.org -jarin@chromium.org -jkummerow@chromium.org -verwaest@chromium.org - -# COMPONENT: Blink>JavaScript>Compiler diff --git a/src/crankshaft/arm/OWNERS b/src/crankshaft/arm/OWNERS deleted file mode 100644 index 906a5ce641..0000000000 --- a/src/crankshaft/arm/OWNERS +++ /dev/null @@ -1 +0,0 @@ -rmcilroy@chromium.org diff --git a/src/crankshaft/arm/lithium-arm.cc b/src/crankshaft/arm/lithium-arm.cc deleted file mode 100644 index 1cc4283b90..0000000000 --- a/src/crankshaft/arm/lithium-arm.cc +++ /dev/null @@ -1,2381 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/arm/lithium-arm.h" - -#include - -#include "src/crankshaft/arm/lithium-codegen-arm.h" -#include "src/crankshaft/lithium-inl.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || - LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || - operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); - } -} -#endif - - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-d"; - case Token::SUB: return "sub-d"; - case Token::MUL: return "mul-d"; - case Token::DIV: return "div-d"; - case Token::MOD: return "mod-d"; - default: - UNREACHABLE(); - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-t"; - case Token::SUB: return "sub-t"; - case Token::MUL: return "mul-t"; - case Token::MOD: return "mod-t"; - case Token::DIV: return "div-t"; - case Token::BIT_AND: return "bit-and-t"; - case Token::BIT_OR: return "bit-or-t"; - case Token::BIT_XOR: return "bit-xor-t"; - case Token::ROR: return "ror-t"; - case Token::SHL: return "shl-t"; - case Token::SAR: return "sar-t"; - case Token::SHR: return "shr-t"; - default: - UNREACHABLE(); - } -} - - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), - true_block_id(), false_block_id()); -} - - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - stream->Add(" length "); - length()->PrintTo(stream); - stream->Add(" index "); - index()->PrintTo(stream); -} - - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access() << " <- "; - stream->Add(os.str().c_str()); - value()->PrintTo(stream); -} - - -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", base_offset()); - } else { - stream->Add("]"); - } -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", base_offset()); - } else { - stream->Add("] <- "); - } - - if (value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - value()->PrintTo(stream); - } -} - - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); -} - - -int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { - // Skip a slot if for a double-width slot. - if (kind == DOUBLE_REGISTERS) current_frame_slots_++; - return current_frame_slots_++; -} - - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - int index = GetNextSpillIndex(kind); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new(zone()) LPlatformChunk(info(), graph()); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) { - return Use(value, ToUnallocated(reg)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE)); -} - - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - - -LOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, int index) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed( - LTemplateResultInstruction<1>* instr, Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixedDouble( - LTemplateResultInstruction<1>* instr, DoubleRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = - (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(zone())); - return instr; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LUnallocated* LChunkBuilder::TempDoubleRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new(zone()) LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new(zone()) LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); - } - } else { - right = UseRegisterAtStart(right_value); - } - - // Shift operations can only deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result = - DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; - } else { - return DoArithmeticT(op, instr); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (op == Token::MOD) { - LOperand* left = UseFixedDouble(instr->left(), d0); - LOperand* right = UseFixedDouble(instr->right(), d1); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return MarkAsCall(DefineFixedDouble(result, d0), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineAsRegister(result); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - HValue* left = instr->left(); - HValue* right = instr->right(); - DCHECK(left->representation().IsTagged()); - DCHECK(right->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left_operand = UseFixed(left, r1); - LOperand* right_operand = UseFixed(right, r0); - LArithmeticT* result = - new(zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - DCHECK(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new(zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new(zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new(zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - instr = new(zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()); -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - ToBooleanHints expected = instr->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || - type.IsJSArray() || type.IsHeapNumber() || type.IsString(); - LInstruction* branch = new(zone()) LBranch(UseRegister(value)); - if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) && - (expected & ToBooleanHint::kNeedsMap)) || - expected != ToBooleanHint::kAny)) { - branch = AssignEnvironment(branch); - } - return branch; -} - - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new(zone()) LDebugBreak(); -} - - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new(zone()) LCmpMapAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) { - info()->MarkAsRequiresFrame(); - LOperand* value = UseRegister(instr->value()); - return DefineAsRegister(new(zone()) LArgumentsLength(value)); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsElements); -} - - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LHasInPrototypeChainAndBranch* result = - new (zone()) LHasInPrototypeChainAndBranch(object, prototype); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegisterAtStart(instr->receiver()); - LOperand* function = UseRegisterAtStart(instr->function()); - LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), r1); - LOperand* receiver = UseFixed(instr->receiver(), r0); - LOperand* length = UseFixed(instr->length(), r2); - LOperand* elements = UseFixed(instr->elements(), r3); - LApplyArguments* result = new(zone()) LApplyArguments(function, - receiver, - length, - elements); - return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - for (int i = 0; i < argc; ++i) { - LOperand* argument = Use(instr->argument(i)); - AddInstruction(new(zone()) LPushArgument(argument), instr); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoStoreCodeEntry( - HStoreCodeEntry* store_code_entry) { - LOperand* function = UseRegister(store_code_entry->function()); - LOperand* code_object = UseTempRegister(store_code_entry->code_object()); - return new(zone()) LStoreCodeEntry(function, code_object); -} - - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister( - new(zone()) LInnerAllocatedObject(base_object, offset)); -} - - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() - ? NULL - : DefineAsRegister(new(zone()) LThisFunction); -} - - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new(zone()) LContext, cp); - } - - return DefineAsRegister(new(zone()) LContext); -} - - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); -} - - -LInstruction* LChunkBuilder::DoCallWithDescriptor( - HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), cp); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - for (; i < descriptor.GetParameterCount(); i++) { - op = UseAny(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - AddInstruction(new (zone()) LPushArgument(op), instr); - } - - LCallWithDescriptor* result = new(zone()) LCallWithDescriptor( - descriptor, ops, zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* function = UseFixed(instr->function(), r1); - LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathFloor: - return DoMathFloor(instr); - case kMathRound: - return DoMathRound(instr); - case kMathFround: - return DoMathFround(instr); - case kMathAbs: - return DoMathAbs(instr); - case kMathLog: - return DoMathLog(instr); - case kMathCos: - return DoMathCos(instr); - case kMathSin: - return DoMathSin(instr); - case kMathExp: - return DoMathExp(instr); - case kMathSqrt: - return DoMathSqrt(instr); - case kMathPowHalf: - return DoMathPowHalf(instr); - case kMathClz32: - return DoMathClz32(instr); - default: - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFloor* result = new(zone()) LMathFloor(input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); -} - - -LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LOperand* temp = TempDoubleRegister(); - LMathRound* result = new(zone()) LMathRound(input, temp); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { - Representation r = instr->value()->representation(); - LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) - ? NULL - : UseFixed(instr->context(), cp); - LOperand* input = UseRegister(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LMathAbs(context, input)); - if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; -} - - -LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr); -} - - -LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathClz32* result = new(zone()) LMathClz32(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr); -} - -LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr); -} - -LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr); -} - - -LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathSqrt* result = new(zone()) LMathSqrt(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathPowHalf* result = new(zone()) LMathPowHalf(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* constructor = UseFixed(instr->constructor(), r1); - LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr); -} - - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - return DefineAsRegister(new(zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI( - dividend, divisor)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = - CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow) && - (!CpuFeatures::IsSupported(SUDIV) || - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) || - (!instr->IsMathFloorOfDiv() && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? - NULL : TempRegister(); - LInstruction* result = DefineAsRegister( - new(zone()) LFlooringDivByConstI(dividend, divisor, temp)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = - CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister(); - LInstruction* result = - DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor, temp)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow) && - (!CpuFeatures::IsSupported(SUDIV) || - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)))) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( - dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LModByConstI( - dividend, divisor)); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = - CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister(); - LOperand* temp2 = - CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister(); - LInstruction* result = DefineAsRegister(new(zone()) LModI( - dividend, divisor, temp, temp2)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoModByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoModByConstI(instr); - } else { - return DoModI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - HValue* left = instr->BetterLeftOperand(); - HValue* right = instr->BetterRightOperand(); - LOperand* left_op; - LOperand* right_op; - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); - - int32_t constant_value = 0; - if (right->IsConstant()) { - HConstant* constant = HConstant::cast(right); - constant_value = constant->Integer32Value(); - // Constants -1, 0 and 1 can be optimized if the result can overflow. - // For other constants, it can be optimized only without overflow. - if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) { - left_op = UseRegisterAtStart(left); - right_op = UseConstant(right); - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - LMulI* mul = new(zone()) LMulI(left_op, right_op); - if (right_op->IsConstantOperand() - ? ((can_overflow && constant_value == -1) || - (bailout_on_minus_zero && constant_value <= 0)) - : (can_overflow || bailout_on_minus_zero)) { - AssignEnvironment(mul); - } - return DefineAsRegister(mul); - - } else if (instr->representation().IsDouble()) { - if (instr->HasOneUse() && (instr->uses().value()->IsAdd() || - instr->uses().value()->IsSub())) { - HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value()); - - if (use->IsAdd() && instr == use->left()) { - // This mul is the lhs of an add. The add and mul will be folded into a - // multiply-add in DoAdd. - return NULL; - } - if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) { - // This mul is the rhs of an add, where the lhs is not another mul. - // The add and mul will be folded into a multiply-add in DoAdd. - return NULL; - } - if (instr == use->right() && use->IsSub()) { - // This mul is the rhs of a sub. The sub and mul will be folded into a - // multiply-sub in DoSub. - return NULL; - } - } - - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - - if (instr->left()->IsConstant()) { - // If lhs is constant, do reverse subtraction instead. - return DoRSub(instr); - } - - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LSubI* sub = new(zone()) LSubI(left, right); - LInstruction* result = DefineAsRegister(sub); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - if (instr->right()->IsMul() && instr->right()->HasOneUse()) { - return DoMultiplySub(instr->left(), HMul::cast(instr->right())); - } - - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoRSub(HSub* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - - // Note: The lhs of the subtraction becomes the rhs of the - // reverse-subtraction. - LOperand* left = UseRegisterAtStart(instr->right()); - LOperand* right = UseOrConstantAtStart(instr->left()); - LRSubI* rsb = new(zone()) LRSubI(left, right); - LInstruction* result = DefineAsRegister(rsb); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { - LOperand* multiplier_op = UseRegisterAtStart(mul->left()); - LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); - LOperand* addend_op = UseRegisterAtStart(addend); - return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op, - multiplicand_op)); -} - - -LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) { - LOperand* minuend_op = UseRegisterAtStart(minuend); - LOperand* multiplier_op = UseRegisterAtStart(mul->left()); - LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); - - return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op, - multiplier_op, - multiplicand_op)); -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - LAddI* add = new(zone()) LAddI(left, right); - LInstruction* result = DefineAsRegister(add); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LAddI* add = new(zone()) LAddI(left, right); - LInstruction* result = DefineAsRegister(add); - return result; - } else if (instr->representation().IsDouble()) { - if (instr->left()->IsMul() && instr->left()->HasOneUse()) { - return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); - } - - if (instr->right()->IsMul() && instr->right()->HasOneUse()) { - DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse()); - return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); - } - - return DoArithmeticD(Token::ADD, instr); - } else { - return DoArithmeticT(Token::ADD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - return DefineAsRegister(new(zone()) LMathMinMax(left, right)); -} - - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - DCHECK(instr->representation().IsDouble()); - // We call a C function for double power. It can't trigger a GC. - // We need to use fixed result register for the call. - Representation exponent_type = instr->right()->representation(); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseFixedDouble(instr->left(), d0); - LOperand* right = - exponent_type.IsDouble() - ? UseFixedDouble(instr->right(), d1) - : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent()); - LPower* result = new(zone()) LPower(left, right); - return MarkAsCall(DefineFixedDouble(result, d2), - instr, - CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r1); - LOperand* right = UseFixed(instr->right(), r0); - LCmpT* result = new(zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } -} - - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LCmpHoleAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new(zone()) LIsStringAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsSmiAndBranch(Use(instr->value())); -} - - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LIsUndetectableAndBranch(value, TempRegister()); -} - - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r1); - LOperand* right = UseFixed(instr->right(), r0); - LStringCompareAndBranch* result = - new(zone()) LStringCompareAndBranch(context, left, right); - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LHasInstanceTypeAndBranch(value); -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegister(instr->value()); - return new (zone()) LClassOfTestAndBranch(value, TempRegister()); -} - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index)); -} - - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegisterAtStart(instr->index()) - : UseRegisterOrConstantAtStart(instr->index()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL; - return new(zone()) LSeqStringSetChar(context, string, index, value); -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseRegisterOrConstantAtStart(instr->length()) - : UseRegisterAtStart(instr->length()); - LInstruction* result = new(zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { - return NULL; -} - - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new(zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempDoubleRegister(); - LInstruction* result = - DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LUnallocated* result_temp = TempRegister(); - LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); - return AssignPointerMap(Define(result, result_temp)); - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - return AssignEnvironment( - DefineAsRegister(new(zone()) LDoubleToSmi(value))); - } else { - DCHECK(to.IsInteger32()); - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); - if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); - return result; - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - if (!instr->CheckFlag(HValue::kCanOverflow)) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new(zone()) LSmiTag(value)); - } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } else { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); - } else { - return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); - } - } - } - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckInstanceType(value); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckValue(value)); -} - - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - return DefineAsRegister(new(zone()) LClampDToUint8(reg)); - } else if (input_rep.IsInteger32()) { - return DefineAsRegister(new(zone()) LClampIToUint8(reg)); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - // Register allocator doesn't (yet) support allocation of double - // temps. Reserve d1 explicitly. - LClampTToUint8* result = - new(zone()) LClampTToUint8(reg, TempDoubleRegister()); - return AssignEnvironment(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() - ? UseFixed(instr->context(), cp) - : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn(UseFixed(instr->value(), r0), context, - parameter_count); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new(zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new(zone()) LConstantI); - } else if (r.IsDouble()) { - return DefineAsRegister(new(zone()) LConstantD); - } else if (r.IsExternal()) { - return DefineAsRegister(new(zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new(zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context; - LOperand* value; - if (instr->NeedsWriteBarrier()) { - context = UseTempRegister(instr->context()); - value = UseTempRegister(instr->value()); - } else { - context = UseRegister(instr->context()); - value = UseRegister(instr->value()); - } - LInstruction* result = new(zone()) LStoreContextSlot(context, value); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - LOperand* obj = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LLoadNamedField(obj)); -} - - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - return AssignEnvironment(DefineAsRegister( - new(zone()) LLoadFunctionPrototype(UseRegister(instr->function())))); -} - - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new(zone()) LLoadRoot); -} - - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK(instr->key()->representation().IsSmiOrInteger32()); - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LInstruction* result = NULL; - - if (!instr->is_fixed_typed_array()) { - LOperand* obj = NULL; - if (instr->representation().IsDouble()) { - obj = UseRegister(instr->elements()); - } else { - DCHECK(instr->representation().IsSmiOrTagged()); - obj = UseRegisterAtStart(instr->elements()); - } - result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr)); - } else { - DCHECK( - (instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(elements_kind)) || - (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(elements_kind))); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - result = DefineAsRegister( - new (zone()) LLoadKeyed(backing_store, key, backing_store_owner)); - } - - bool needs_environment; - if (instr->is_fixed_typed_array()) { - // see LCodeGen::DoLoadKeyedExternalArray - needs_environment = elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32); - } else { - // see LCodeGen::DoLoadKeyedFixedDoubleArray and - // LCodeGen::DoLoadKeyedFixedArray - needs_environment = - instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub()); - } - - if (needs_environment) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - if (!instr->is_fixed_typed_array()) { - DCHECK(instr->elements()->representation().IsTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* object = NULL; - LOperand* key = NULL; - LOperand* val = NULL; - - if (instr->value()->representation().IsDouble()) { - object = UseRegisterAtStart(instr->elements()); - val = UseRegister(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } else { - DCHECK(instr->value()->representation().IsSmiOrTagged()); - if (needs_write_barrier) { - object = UseTempRegister(instr->elements()); - val = UseTempRegister(instr->value()); - key = UseTempRegister(instr->key()); - } else { - object = UseRegisterAtStart(instr->elements()); - val = UseRegisterAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } - } - - return new (zone()) LStoreKeyed(object, key, val, nullptr); - } - - DCHECK( - (instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); - DCHECK(instr->elements()->representation().IsExternal()); - LOperand* val = UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner); -} - - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LOperand* new_map_reg = TempRegister(); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); - return result; - } else { - LOperand* object = UseFixed(instr->object(), r0); - LOperand* context = UseFixed(instr->context(), cp); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, context, NULL); - return MarkAsCall(result, instr); - } -} - - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp = TempRegister(); - LTrapAllocationMemento* result = - new(zone()) LTrapAllocationMemento(object, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = Use(instr->object()); - LOperand* elements = Use(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, r0); - return AssignPointerMap(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool is_in_object = instr->access().IsInobject(); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = instr->has_transition() && - instr->NeedsWriteBarrierForMap(); - - LOperand* obj; - if (needs_write_barrier) { - obj = is_in_object - ? UseRegister(instr->object()) - : UseTempRegister(instr->object()); - } else { - obj = needs_write_barrier_for_map - ? UseRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - } - - LOperand* val; - if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - } else if (instr->field_representation().IsDouble()) { - val = UseRegisterAtStart(instr->value()); - } else { - val = UseRegister(instr->value()); - } - - // We need a temporary register for write barrier of the map field. - LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; - - return new(zone()) LStoreNamedField(obj, val, temp); -} - - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r1); - LOperand* right = UseFixed(instr->right(), r0); - return MarkAsCall( - DefineFixed(new(zone()) LStringAdd(context, left, right), r0), - instr); -} - - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseTempRegister(instr->string()); - LOperand* index = UseTempRegister(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new(zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new(zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = UseRegisterOrConstant(instr->size()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - if (instr->IsAllocationFolded()) { - LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new(zone()) LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new(zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk()->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kTooManySpillSlotsNeededForOSR); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseRegisterOrConstantAtStart(instr->length()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* value = UseFixed(instr->value(), r3); - LTypeof* result = new (zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value())); -} - - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new(zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new(zone()) LStackCheck(context))); - } -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new(zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = current_block_->last_environment()-> - DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - - return pop; -} - - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->enumerable(), r0); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map))); -} - - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegisterAtStart(instr->map()); - return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); -} - - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* index = UseTempRegister(instr->index()); - LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/arm/lithium-arm.h b/src/crankshaft/arm/lithium-arm.h deleted file mode 100644 index fede1c1bda..0000000000 --- a/src/crankshaft/arm/lithium-arm.h +++ /dev/null @@ -1,2491 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_ -#define V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckNonSmi) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathCos) \ - V(MathSin) \ - V(MathExp) \ - V(MathFloor) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(MultiplyAddD) \ - V(MultiplySubD) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArgument) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(RSubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { \ - return H##type::cast(hydrogen_value()); \ - } - - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) { - } - - virtual ~LInstruction() {} - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { - // Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) - kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - - // Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall(); - } - - // Interface to the register allocator and iterators. - bool IsMarkedAsCall() const { return IsCall(); } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - -#ifdef DEBUG - void VerifyCall(); -#endif - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - - private: - // Iterator support. - friend class InputIterator; - - friend class TempIterator; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - class IsCallBits: public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int bit_field_; -}; - - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return R != 0 && result() != NULL; } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - protected: - EmbeddedContainer results_; -}; - - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) - : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const override { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new(zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - - private: - HBasicBlock* block_; -}; - - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - LLazyBailout() : gap_instructions_size_(0) { } - - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") - - void set_gap_instructions_size(int gap_instructions_size) { - gap_instructions_size_ = gap_instructions_size; - } - int gap_instructions_size() { return gap_instructions_size_; } - - private: - int gap_instructions_size_; -}; - - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { - inputs_[0] = value; - } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) - : LGap(block), replacement_(NULL) { } - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -template -class LControlInstruction : public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) { } - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } - - Label* false_label_; - Label* true_label_; -}; - - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> { - public: - LWrapReceiver(LOperand* receiver, LOperand* function) { - inputs_[0] = receiver; - inputs_[1] = function; - } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } -}; - - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, - LOperand* receiver, - LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } -}; - - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { - inputs_[0] = elements; - } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LModByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModI final : public LTemplateInstruction<1, 2, 2> { - public: - LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - - -class LMulI final : public LTemplateInstruction<1, 2, 0> { - public: - LMulI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -// Instruction for computing multiplier * multiplicand + addend. -class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> { - public: - LMultiplyAddD(LOperand* addend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = addend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* addend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") -}; - - -// Instruction for computing minuend - multiplier * multiplicand. -class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> { - public: - LMultiplySubD(LOperand* minuend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = minuend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* minuend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d") -}; - - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { - return hydrogen()->representation().IsDouble(); - } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LMathFloor final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloor(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathRound final : public LTemplateInstruction<1, 1, 1> { - public: - LMathRound(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathFround final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFround(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - - -class LMathAbs final : public LTemplateInstruction<1, 2, 0> { - public: - LMathAbs(LOperand* context, LOperand* value) { - inputs_[1] = context; - inputs_[0] = value; - } - - LOperand* context() { return inputs_[1]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathLog final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathLog(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - - -class LMathClz32 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathClz32(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - -class LMathCos final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathCos(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - -class LMathSin final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSin(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - -class LMathExp final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathExp(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - - -class LMathSqrt final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSqrt(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - - -class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathPowHalf(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) -}; - - -class LCmpHoleAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LHasInstanceTypeAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 1> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) { - inputs_[0] = object; - inputs_[1] = prototype; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return hydrogen()->op(); } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) -}; - - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LRSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LRSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - - -class LConstantD final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - double value() const { return hydrogen()->DoubleValue(); } - uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); } -}; - - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - - -class LBranch final : public LControlInstruction<1, 0> { - public: - explicit LBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LCmpMapAndBranch final : public LControlInstruction<1, 1> { - public: - LCmpMapAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> { - public: - LSeqStringGetChar(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; - } - - LOperand* string() const { return inputs_[0]; } - LOperand* index() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> { - public: - LSeqStringSetChar(LOperand* context, - LOperand* string, - LOperand* index, - LOperand* value) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - } - - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LMathMinMax final : public LTemplateInstruction<1, 2, 0> { - public: - LMathMinMax(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) - : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, - LOperand* context, - LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - Token::Value op() const { return op_; } - - Opcode opcode() const override { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - LOperand* value() { return inputs_[0]; } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - LOperand* parameter_count() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") -}; - - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadFunctionPrototype(LOperand* function) { - inputs_[0] = function; - } - - LOperand* function() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) -}; - - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - - -class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = backing_store_owner; - } - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* backing_store_owner() { return inputs_[2]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreContextSlot(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LPushArgument final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LPushArgument(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) { } - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object) { - inputs_[0] = function; - inputs_[1] = code_object; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : descriptor_(descriptor), - inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - const CallInterfaceDescriptor descriptor() { return descriptor_; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - CallInterfaceDescriptor descriptor_; - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LUint32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - - -class LNumberTagI final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") -}; - - -class LNumberTagU final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - - -class LNumberTagD final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToI(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 2> { - public: - LTaggedToI(LOperand* value, - LOperand* temp, - LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LNumberUntagD(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) - : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - bool needs_check() const { return needs_check_; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - private: - bool needs_check_; -}; - - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { - inputs_[0] = object; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; - - Representation representation() const { - return hydrogen()->field_representation(); - } -}; - - -class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> { - public: - LStoreKeyed(LOperand* object, LOperand* key, LOperand* value, - LOperand* backing_store_owner) { - inputs_[0] = object; - inputs_[1] = key; - inputs_[2] = value; - inputs_[3] = backing_store_owner; - } - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - LOperand* backing_store_owner() { return inputs_[3]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) - - void PrintDataTo(StringStream* stream) override; - bool NeedsCanonicalization() { - if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() || - hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) { - return false; - } - return hydrogen()->NeedsCanonicalization(); - } - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> { - public: - LTransitionElementsKind(LOperand* object, - LOperand* context, - LOperand* new_map_temp) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = new_map_temp; - } - - LOperand* context() { return inputs_[1]; } - LOperand* object() { return inputs_[0]; } - LOperand* new_map_temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() { return hydrogen()->from_kind(); } - ElementsKind to_kind() { return hydrogen()->to_kind(); } -}; - - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> { - public: - LTrapAllocationMemento(LOperand* object, - LOperand* temp) { - inputs_[0] = object; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, - "trap-allocation-memento") -}; - - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; } - - LOperand* view() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckInstanceType(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - - -class LCheckMaps final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckMaps(LOperand* value = NULL) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampDToUint8(LOperand* unclamped) { - inputs_[0] = unclamped; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* unclamped) { - inputs_[0] = unclamped; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - - -class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* unclamped, LOperand* temp) { - inputs_[0] = unclamped; - temps_[0] = temp; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") -}; - - -class LAllocate final : public LTemplateInstruction<1, 2, 2> { - public: - LAllocate(LOperand* context, - LOperand* size, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 2> { - public: - LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { - inputs_[0] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* size() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIsAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LTypeofIsAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - LOsrEntry() {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { - inputs_[0] = map; - } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { - return HForInCacheArray::cast(this->hydrogen_value())->idx(); - } -}; - - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> { - public: - LCheckMapValue(LOperand* value, LOperand* map) { - inputs_[0] = value; - inputs_[1] = map; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) - : LChunk(info, graph) { } - - int GetNextSpillIndex(RegisterKind kind); - LOperand* GetNextSpillSlot(RegisterKind kind); -}; - - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); - LInstruction* DoMultiplySub(HValue* minuend, HMul* mul); - LInstruction* DoRSub(HSub* instr); - - static bool HasMagicNumberForDivisor(int32_t divisor); - - LInstruction* DoMathFloor(HUnaryMathOperation* instr); - LInstruction* DoMathRound(HUnaryMathOperation* instr); - LInstruction* DoMathFround(HUnaryMathOperation* instr); - LInstruction* DoMathAbs(HUnaryMathOperation* instr); - LInstruction* DoMathLog(HUnaryMathOperation* instr); - LInstruction* DoMathCos(HUnaryMathOperation* instr); - LInstruction* DoMathSin(HUnaryMathOperation* instr); - LInstruction* DoMathExp(HUnaryMathOperation* instr); - LInstruction* DoMathSqrt(HUnaryMathOperation* instr); - LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); - LInstruction* DoMathClz32(HUnaryMathOperation* instr); - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HDiv* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(DoubleRegister reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, - DoubleRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register that may be trashed. - MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - - // An input operand in a register or stack slot. - MUST_USE_RESULT LOperand* Use(HValue* value); - MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - - // An input operand in a register, stack slot or a constant operand. - MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // An input operand in a constant operand. - MUST_USE_RESULT LOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - MUST_USE_RESULT LOperand* UseAny(HValue* value) override; - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - MUST_USE_RESULT LUnallocated* TempDoubleRegister(); - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg); - LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, - DoubleRegister reg); - LInstruction* AssignEnvironment(LInstruction* instr); - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - // By default we assume that instruction sequences generated for calls - // cannot deoptimize eagerly and we do not attach environment to this - // instruction. - LInstruction* MarkAsCall( - LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, - HBinaryOperation* instr); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_ diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc deleted file mode 100644 index a028f20cd5..0000000000 --- a/src/crankshaft/arm/lithium-codegen-arm.cc +++ /dev/null @@ -1,5329 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/arm/lithium-codegen-arm.h" - -#include "src/assembler-inl.h" -#include "src/base/bits.h" -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/crankshaft/arm/lithium-gap-resolver-arm.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, - LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), - pointers_(pointers), - deopt_mode_(mode) { } - virtual ~SafepointGenerator() {} - - void BeforeCall(int call_size) const override {} - - void AfterCall() const override { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // NONE indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::NONE); - - return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && - GenerateJumpTable() && GenerateSafepointTable(); -} - - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); -} - - -void LCodeGen::SaveCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ vstr(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -void LCodeGen::RestoreCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Restore clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ vldr(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - - // r1: Callee's JS function. - // cp: Callee's context. - // pp: Callee's constant pool pointer (if enabled) - // fp: Caller's frame pointer. - // lr: Caller's pc. - } - - info()->set_prologue_offset(masm_->pc_offset()); - if (NeedsEagerFrame()) { - if (info()->IsStub()) { - __ StubPrologue(StackFrame::STUB); - } else { - __ Prologue(info()->GeneratePreagedPrologue()); - } - frame_is_built_ = true; - } - - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - if (slots > 0) { - if (FLAG_debug_code) { - __ sub(sp, sp, Operand(slots * kPointerSize)); - __ push(r0); - __ push(r1); - __ add(r0, sp, Operand(slots * kPointerSize)); - __ mov(r1, Operand(kSlotsZapValue)); - Label loop; - __ bind(&loop); - __ sub(r0, r0, Operand(kPointerSize)); - __ str(r1, MemOperand(r0, 2 * kPointerSize)); - __ cmp(r0, sp); - __ b(ne, &loop); - __ pop(r1); - __ pop(r0); - } else { - __ sub(sp, sp, Operand(slots * kPointerSize)); - } - } - - if (info()->saves_caller_doubles()) { - SaveCallerDoubles(); - } - return !is_aborted(); -} - - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Possibly allocate a local context. - if (info()->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is in r1. - int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ push(r1); - __ Push(info()->scope()->scope_info()); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), - Operand(slots)); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of the FastNewFunctionContext builtin is always in new space. - need_write_barrier = false; - } else { - __ push(r1); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - - // Context is returned in both r0 and cp. It replaces the context - // passed to us. It's saved in the stack and kept live in cp. - __ mov(cp, r0); - __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset)); - // Copy any necessary parameters into the context. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ ldr(r0, MemOperand(fp, parameter_offset)); - // Store it in the context. - MemOperand target = ContextMemOperand(cp, var->index()); - __ str(r0, target); - // Update the write barrier. This clobbers r3 and r0. - if (need_write_barrier) { - __ RecordWriteContextSlot( - cp, - target.offset(), - r0, - r3, - GetLinkRegisterState(), - kSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(cp, r0, &done); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } -} - - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment(";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), - code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - __ bind(code->entry()); - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ PushCommonFrame(scratch0()); - Comment(";;; Deferred code"); - } - code->Generate(); - if (NeedsDeferredFrame()) { - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - __ PopCommonFrame(scratch0()); - frame_is_built_ = false; - } - __ jmp(code->exit()); - } - } - - // Force constant pool emission at the end of the deferred code to make - // sure that no constant pools are emitted after. - masm()->CheckConstPool(true, false); - - return !is_aborted(); -} - - -bool LCodeGen::GenerateJumpTable() { - // Check that the jump table is accessible from everywhere in the function - // code, i.e. that offsets to the table can be encoded in the 24bit signed - // immediate of a branch instruction. - // To simplify we consider the code size from the first instruction to the - // end of the jump table. We also don't consider the pc load delta. - // Each entry in the jump table generates one instruction and inlines one - // 32bit data after it. - if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + - jump_table_.length() * 7)) { - Abort(kGeneratedCodeIsTooLarge); - } - - if (jump_table_.length() > 0) { - Label needs_frame, call_deopt_entry; - - Comment(";;; -------------------- Jump table --------------------"); - Address base = jump_table_[0].address; - - Register entry_offset = scratch0(); - - int length = jump_table_.length(); - for (int i = 0; i < length; i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); - - DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type); - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - - // Second-level deopt table entries are contiguous and small, so instead - // of loading the full, absolute address of each one, load an immediate - // offset which will be added to the base address later. - __ mov(entry_offset, Operand(entry - base)); - - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - Comment(";;; call deopt with frame"); - __ PushCommonFrame(); - __ bl(&needs_frame); - } else { - __ bl(&call_deopt_entry); - } - masm()->CheckConstPool(false, false); - } - - if (needs_frame.is_linked()) { - __ bind(&needs_frame); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ push(ip); - DCHECK(info()->IsStub()); - } - - Comment(";;; call deopt"); - __ bind(&call_deopt_entry); - - if (info()->saves_caller_doubles()) { - DCHECK(info()->IsStub()); - RestoreCallerDoubles(); - } - - // Add the base address to the offset previously loaded in entry_offset. - __ add(entry_offset, entry_offset, - Operand(ExternalReference::ForDeoptEntry(base))); - __ bx(entry_offset); - } - - // Force constant pool emission at the end of the deopt jump table to make - // sure that no constant pools are emitted after. - masm()->CheckConstPool(true, false); - - // The deoptimization jump table is the last part of the instruction - // sequence. Mark the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - - -Register LCodeGen::ToRegister(int code) const { - return Register::from_code(code); -} - - -DwVfpRegister LCodeGen::ToDoubleRegister(int code) const { - return DwVfpRegister::from_code(code); -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - DCHECK(op->IsRegister()); - return ToRegister(op->index()); -} - - -Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { - if (op->IsRegister()) { - return ToRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk_->LookupConstant(const_op); - Handle literal = constant->handle(isolate()); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - AllowDeferredHandleDereference get_number; - DCHECK(literal->IsNumber()); - __ mov(scratch, Operand(static_cast(literal->Number()))); - } else if (r.IsDouble()) { - Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); - } else { - DCHECK(r.IsSmiOrTagged()); - __ Move(scratch, literal); - } - return scratch; - } else if (op->IsStackSlot()) { - __ ldr(scratch, ToMemOperand(op)); - return scratch; - } - UNREACHABLE(); -} - - -DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - DCHECK(op->IsDoubleRegister()); - return ToDoubleRegister(op->index()); -} - - -DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DwVfpRegister dbl_scratch) { - if (op->IsDoubleRegister()) { - return ToDoubleRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk_->LookupConstant(const_op); - Handle literal = constant->handle(isolate()); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - DCHECK(literal->IsNumber()); - __ mov(ip, Operand(static_cast(literal->Number()))); - __ vmov(flt_scratch, ip); - __ vcvt_f64_s32(dbl_scratch, flt_scratch); - return dbl_scratch; - } else if (r.IsDouble()) { - Abort(kUnsupportedDoubleImmediate); - } else if (r.IsTagged()) { - Abort(kUnsupportedTaggedImmediate); - } - } else if (op->IsStackSlot()) { - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(op)); - MemOperand mem_op = ToMemOperand(op); - __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); - return dbl_scratch; - } - UNREACHABLE(); -} - - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - - -bool LCodeGen::IsInteger32(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - - -bool LCodeGen::IsSmi(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - return ToRepresentation(op, Representation::Integer32()); -} - - -int32_t LCodeGen::ToRepresentation(LConstantOperand* op, - const Representation& r) const { - HConstant* constant = chunk_->LookupConstant(op); - int32_t value = constant->Integer32Value(); - if (r.IsInteger32()) return value; - DCHECK(r.IsSmiOrTagged()); - return reinterpret_cast(Smi::FromInt(value)); -} - - -Smi* LCodeGen::ToSmi(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - return Smi::FromInt(constant->Integer32Value()); -} - - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - - -Operand LCodeGen::ToOperand(LOperand* op) { - if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk()->LookupConstant(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsSmi()) { - DCHECK(constant->HasSmiValue()); - return Operand(Smi::FromInt(constant->Integer32Value())); - } else if (r.IsInteger32()) { - DCHECK(constant->HasInteger32Value()); - return Operand(constant->Integer32Value()); - } else if (r.IsDouble()) { - Abort(kToOperandUnsupportedDoubleImmediate); - } - DCHECK(r.IsTagged()); - return Operand(constant->handle(isolate())); - } else if (op->IsRegister()) { - return Operand(ToRegister(op)); - } else if (op->IsDoubleRegister()) { - Abort(kToOperandIsDoubleRegisterUnimplemented); - return Operand::Zero(); - } - // Stack slots not implemented, use ToMemOperand instead. - UNREACHABLE(); -} - - -static int ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize; -} - - -MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index())); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); - } -} - - -MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { - DCHECK(op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand( - sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); - } -} - - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation( - environment, translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); - } -} - - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, - dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - DoubleRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -int LCodeGen::CallCodeSize(Handle code, RelocInfo::Mode mode) { - int size = masm()->CallSize(code, mode); - if (code->kind() == Code::COMPARE_IC) { - size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric. - } - return size; -} - - -void LCodeGen::CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - TargetAddressStorageMode storage_mode) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode); -} - - -void LCodeGen::CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode, - TargetAddressStorageMode storage_mode) { - DCHECK(instr != NULL); - // Block literal pool emission to ensure nop indicating no inlined smi code - // is in the correct position. - Assembler::BlockConstPoolScope block_const_pool(masm()); - __ Call(code, mode, al, storage_mode, false); - RecordSafepointWithLazyDeopt(instr, safepoint_mode); - - // Signal that we don't inline smi code before these stubs in the - // optimizing code generator. - if (code->kind() == Code::COMPARE_IC) { - __ nop(); - } -} - - -void LCodeGen::CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - - __ CallRuntime(function, num_arguments, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - __ Move(cp, ToRegister(context)); - } else if (context->IsStackSlot()) { - __ ldr(cp, ToMemOperand(context)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ Move(cp, Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context) { - LoadContextFromDeferred(context); - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters( - instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, - translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, zone()); - } -} - -void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - return; - } - - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { - Register scratch = scratch0(); - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - - // Store the condition on the stack if necessary - if (condition != al) { - __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition)); - __ mov(scratch, Operand(1), LeaveCC, condition); - __ push(scratch); - } - - __ push(r1); - __ mov(scratch, Operand(count)); - __ ldr(r1, MemOperand(scratch)); - __ sub(r1, r1, Operand(1), SetCC); - __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq); - __ str(r1, MemOperand(scratch)); - __ pop(r1); - - if (condition != al) { - // Clean up the stack before the deoptimizer call - __ pop(scratch); - } - - __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq); - - // 'Restore' the condition in a slightly hacky way. (It would be better - // to use 'msr' and 'mrs' instructions here, but they are not supported by - // our ARM simulator). - if (condition != al) { - condition = ne; - __ cmp(scratch, Operand::Zero()); - } - } - - if (info()->ShouldTrapOnDeopt()) { - __ stop("trap_on_deopt", condition); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - // Go through jump table if we need to handle condition, build frame, or - // restore caller doubles. - if (condition == al && frame_is_built_ && - !info()->saves_caller_doubles()) { - DeoptComment(deopt_info); - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - } else { - Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, - !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - __ b(condition, &jump_table_.last().label); - } -} - -void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason) { - Deoptimizer::BailoutType bailout_type = info()->IsStub() - ? Deoptimizer::LAZY - : Deoptimizer::EAGER; - DeoptimizeIf(condition, instr, deopt_reason, bailout_type); -} - - -void LCodeGen::RecordSafepointWithLazyDeopt( - LInstruction* instr, SafepointMode safepoint_mode) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kLazyDeopt); - } -} - - -void LCodeGen::RecordSafepoint( - LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode deopt_mode) { - DCHECK(expected_safepoint_kind_ == kind); - - const ZoneList* operands = pointers->GetNormalizedOperands(); - Safepoint safepoint = safepoints_.DefineSafepoint(masm(), - kind, arguments, deopt_mode); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); -} - - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, deopt_mode); -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint( - pointers, Safepoint::kWithRegisters, arguments, deopt_mode); -} - - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, - label->hydrogen_value()->id(), - label->block_id(), - LabelType(label)); - __ bind(label->label()); - current_block_ = label->block_id(); - DoGap(label); -} - - -void LCodeGen::DoParallelMove(LParallelMove* move) { - resolver_.Resolve(move); -} - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; - i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } -} - - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { - DoGap(instr); -} - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - Label dividend_is_not_negative, done; - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ cmp(dividend, Operand::Zero()); - __ b(pl, ÷nd_is_not_negative); - // Note that this is correct even for kMinInt operands. - __ rsb(dividend, dividend, Operand::Zero()); - __ and_(dividend, dividend, Operand(mask)); - __ rsb(dividend, dividend, Operand::Zero(), SetCC); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - __ b(&done); - } - - __ bind(÷nd_is_not_negative); - __ and_(dividend, dividend, Operand(mask)); - __ bind(&done); -} - - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - __ mov(ip, Operand(Abs(divisor))); - __ smull(result, ip, result, ip); - __ sub(result, dividend, result, SetCC); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ b(ne, &remainder_not_zero); - __ cmp(dividend, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(&remainder_not_zero); - } -} - - -void LCodeGen::DoModI(LModI* instr) { - HMod* hmod = instr->hydrogen(); - if (CpuFeatures::IsSupported(SUDIV)) { - CpuFeatureScope scope(masm(), SUDIV); - - Register left_reg = ToRegister(instr->left()); - Register right_reg = ToRegister(instr->right()); - Register result_reg = ToRegister(instr->result()); - - Label done; - // Check for x % 0, sdiv might signal an exception. We have to deopt in this - // case because we can't return a NaN. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right_reg, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we - // want. We have to deopt if we care about -0, because we can't return that. - if (hmod->CheckFlag(HValue::kCanOverflow)) { - Label no_overflow_possible; - __ cmp(left_reg, Operand(kMinInt)); - __ b(ne, &no_overflow_possible); - __ cmp(right_reg, Operand(-1)); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } else { - __ b(ne, &no_overflow_possible); - __ mov(result_reg, Operand::Zero()); - __ jmp(&done); - } - __ bind(&no_overflow_possible); - } - - // For 'r3 = r1 % r2' we can have the following ARM code: - // sdiv r3, r1, r2 - // mls r3, r3, r2, r1 - - __ sdiv(result_reg, left_reg, right_reg); - __ Mls(result_reg, result_reg, right_reg, left_reg); - - // If we care about -0, test if the dividend is <0 and the result is 0. - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ cmp(result_reg, Operand::Zero()); - __ b(ne, &done); - __ cmp(left_reg, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - __ bind(&done); - - } else { - // General case, without any SDIV support. - Register left_reg = ToRegister(instr->left()); - Register right_reg = ToRegister(instr->right()); - Register result_reg = ToRegister(instr->result()); - Register scratch = scratch0(); - DCHECK(!scratch.is(left_reg)); - DCHECK(!scratch.is(right_reg)); - DCHECK(!scratch.is(result_reg)); - DwVfpRegister dividend = ToDoubleRegister(instr->temp()); - DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); - DCHECK(!divisor.is(dividend)); - LowDwVfpRegister quotient = double_scratch0(); - DCHECK(!quotient.is(dividend)); - DCHECK(!quotient.is(divisor)); - - Label done; - // Check for x % 0, we have to deopt in this case because we can't return a - // NaN. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right_reg, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - __ Move(result_reg, left_reg); - // Load the arguments in VFP registers. The divisor value is preloaded - // before. Be careful that 'right_reg' is only live on entry. - // TODO(svenpanne) The last comments seems to be wrong nowadays. - __ vmov(double_scratch0().low(), left_reg); - __ vcvt_f64_s32(dividend, double_scratch0().low()); - __ vmov(double_scratch0().low(), right_reg); - __ vcvt_f64_s32(divisor, double_scratch0().low()); - - // We do not care about the sign of the divisor. Note that we still handle - // the kMinInt % -1 case correctly, though. - __ vabs(divisor, divisor); - // Compute the quotient and round it to a 32bit integer. - __ vdiv(quotient, dividend, divisor); - __ vcvt_s32_f64(quotient.low(), quotient); - __ vcvt_f64_s32(quotient, quotient.low()); - - // Compute the remainder in result. - __ vmul(double_scratch0(), divisor, quotient); - __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); - __ vmov(scratch, double_scratch0().low()); - __ sub(result_reg, left_reg, scratch, SetCC); - - // If we care about -0, test if the dividend is <0 and the result is 0. - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ b(ne, &done); - __ cmp(left_reg, Operand::Zero()); - DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); - } - __ bind(&done); - } -} - - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ cmp(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - __ cmp(dividend, Operand(kMinInt)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1) { - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - __ tst(dividend, Operand(mask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); - } - - if (divisor == -1) { // Nice shortcut, not needed for correctness. - __ rsb(result, dividend, Operand(0)); - return; - } - int32_t shift = WhichPowerOf2Abs(divisor); - if (shift == 0) { - __ mov(result, dividend); - } else if (shift == 1) { - __ add(result, dividend, Operand(dividend, LSR, 31)); - } else { - __ mov(result, Operand(dividend, ASR, 31)); - __ add(result, dividend, Operand(result, LSR, 32 - shift)); - } - if (shift > 0) __ mov(result, Operand(result, ASR, shift)); - if (divisor < 0) __ rsb(result, result, Operand(0)); -} - - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ cmp(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ rsb(result, result, Operand::Zero()); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - __ mov(ip, Operand(divisor)); - __ smull(scratch0(), ip, result, ip); - __ sub(scratch0(), scratch0(), dividend, SetCC); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); - } -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - Register result = ToRegister(instr->result()); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(divisor, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label positive; - if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { - // Do the test only if it hadn't be done above. - __ cmp(divisor, Operand::Zero()); - } - __ b(pl, &positive); - __ cmp(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - __ bind(&positive); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && - (!CpuFeatures::IsSupported(SUDIV) || - !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - // We don't need to check for overflow when truncating with sdiv - // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. - __ cmp(dividend, Operand(kMinInt)); - __ cmp(divisor, Operand(-1), eq); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } - - if (CpuFeatures::IsSupported(SUDIV)) { - CpuFeatureScope scope(masm(), SUDIV); - __ sdiv(result, dividend, divisor); - } else { - DoubleRegister vleft = ToDoubleRegister(instr->temp()); - DoubleRegister vright = double_scratch0(); - __ vmov(double_scratch0().low(), dividend); - __ vcvt_f64_s32(vleft, double_scratch0().low()); - __ vmov(double_scratch0().low(), divisor); - __ vcvt_f64_s32(vright, double_scratch0().low()); - __ vdiv(vleft, vleft, vright); // vleft now contains the result. - __ vcvt_s32_f64(double_scratch0().low(), vleft); - __ vmov(result, double_scratch0().low()); - } - - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - // Compute remainder and deopt if it's not zero. - Register remainder = scratch0(); - __ Mls(remainder, result, divisor, dividend); - __ cmp(remainder, Operand::Zero()); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); - } -} - - -void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { - DwVfpRegister addend = ToDoubleRegister(instr->addend()); - DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); - DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - - // This is computed in-place. - DCHECK(addend.is(ToDoubleRegister(instr->result()))); - - __ vmla(addend, multiplier, multiplicand); -} - - -void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { - DwVfpRegister minuend = ToDoubleRegister(instr->minuend()); - DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); - DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - - // This is computed in-place. - DCHECK(minuend.is(ToDoubleRegister(instr->result()))); - - __ vmls(minuend, multiplier, multiplicand); -} - - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - Register result = ToRegister(instr->result()); - int32_t divisor = instr->divisor(); - - // If the divisor is 1, return the dividend. - if (divisor == 1) { - __ Move(result, dividend); - return; - } - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - int32_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 1) { - __ mov(result, Operand(dividend, ASR, shift)); - return; - } - - // If the divisor is negative, we have to negate and handle edge cases. - __ rsb(result, dividend, Operand::Zero(), SetCC); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - // Dividing by -1 is basically negation, unless we overflow. - if (divisor == -1) { - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } - return; - } - - // If the negation could not overflow, simply shifting is OK. - if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - __ mov(result, Operand(result, ASR, shift)); - return; - } - - __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); - __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); -} - - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ cmp(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ rsb(result, result, Operand::Zero()); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister(instr->temp()); - DCHECK(!temp.is(dividend) && !temp.is(result)); - Label needs_adjustment, done; - __ cmp(dividend, Operand::Zero()); - __ b(divisor > 0 ? lt : gt, &needs_adjustment); - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ rsb(result, result, Operand::Zero()); - __ jmp(&done); - __ bind(&needs_adjustment); - __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); - __ TruncatingDiv(result, temp, Abs(divisor)); - if (divisor < 0) __ rsb(result, result, Operand::Zero()); - __ sub(result, result, Operand(1)); - __ bind(&done); -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register left = ToRegister(instr->dividend()); - Register right = ToRegister(instr->divisor()); - Register result = ToRegister(instr->result()); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label positive; - if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { - // Do the test only if it hadn't be done above. - __ cmp(right, Operand::Zero()); - } - __ b(pl, &positive); - __ cmp(left, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - __ bind(&positive); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && - (!CpuFeatures::IsSupported(SUDIV) || - !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - // We don't need to check for overflow when truncating with sdiv - // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. - __ cmp(left, Operand(kMinInt)); - __ cmp(right, Operand(-1), eq); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } - - if (CpuFeatures::IsSupported(SUDIV)) { - CpuFeatureScope scope(masm(), SUDIV); - __ sdiv(result, left, right); - } else { - DoubleRegister vleft = ToDoubleRegister(instr->temp()); - DoubleRegister vright = double_scratch0(); - __ vmov(double_scratch0().low(), left); - __ vcvt_f64_s32(vleft, double_scratch0().low()); - __ vmov(double_scratch0().low(), right); - __ vcvt_f64_s32(vright, double_scratch0().low()); - __ vdiv(vleft, vleft, vright); // vleft now contains the result. - __ vcvt_s32_f64(double_scratch0().low(), vleft); - __ vmov(result, double_scratch0().low()); - } - - Label done; - Register remainder = scratch0(); - __ Mls(remainder, result, right, left); - __ cmp(remainder, Operand::Zero()); - __ b(eq, &done); - __ eor(remainder, remainder, Operand(right)); - __ add(result, result, Operand(remainder, ASR, 31)); - __ bind(&done); -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register result = ToRegister(instr->result()); - // Note that result may alias left. - Register left = ToRegister(instr->left()); - LOperand* right_op = instr->right(); - - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (right_op->IsConstantOperand()) { - int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); - - if (bailout_on_minus_zero && (constant < 0)) { - // The case of a null constant will be handled separately. - // If constant is negative and left is null, the result should be -0. - __ cmp(left, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - switch (constant) { - case -1: - if (overflow) { - __ rsb(result, left, Operand::Zero(), SetCC); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ rsb(result, left, Operand::Zero()); - } - break; - case 0: - if (bailout_on_minus_zero) { - // If left is strictly negative and the constant is null, the - // result is -0. Deoptimize if required, otherwise return 0. - __ cmp(left, Operand::Zero()); - DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); - } - __ mov(result, Operand::Zero()); - break; - case 1: - __ Move(result, left); - break; - default: - // Multiplying by powers of two and powers of two plus or minus - // one can be done faster with shifted operands. - // For other constants we emit standard code. - int32_t mask = constant >> 31; - uint32_t constant_abs = (constant + mask) ^ mask; - - if (base::bits::IsPowerOfTwo32(constant_abs)) { - int32_t shift = WhichPowerOf2(constant_abs); - __ mov(result, Operand(left, LSL, shift)); - // Correct the sign of the result is the constant is negative. - if (constant < 0) __ rsb(result, result, Operand::Zero()); - } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { - int32_t shift = WhichPowerOf2(constant_abs - 1); - __ add(result, left, Operand(left, LSL, shift)); - // Correct the sign of the result is the constant is negative. - if (constant < 0) __ rsb(result, result, Operand::Zero()); - } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { - int32_t shift = WhichPowerOf2(constant_abs + 1); - __ rsb(result, left, Operand(left, LSL, shift)); - // Correct the sign of the result is the constant is negative. - if (constant < 0) __ rsb(result, result, Operand::Zero()); - } else { - // Generate standard code. - __ mov(ip, Operand(constant)); - __ mul(result, left, ip); - } - } - - } else { - DCHECK(right_op->IsRegister()); - Register right = ToRegister(right_op); - - if (overflow) { - Register scratch = scratch0(); - // scratch:result = left * right. - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ smull(result, scratch, result, right); - } else { - __ smull(result, scratch, left, right); - } - __ cmp(scratch, Operand(result, ASR, 31)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - } else { - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ mul(result, result, right); - } else { - __ mul(result, left, right); - } - } - - if (bailout_on_minus_zero) { - Label done; - __ teq(left, Operand(right)); - __ b(pl, &done); - // Bail out if the result is minus zero. - __ cmp(result, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } -} - - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left_op = instr->left(); - LOperand* right_op = instr->right(); - DCHECK(left_op->IsRegister()); - Register left = ToRegister(left_op); - Register result = ToRegister(instr->result()); - Operand right(no_reg); - - if (right_op->IsStackSlot()) { - right = Operand(EmitLoadRegister(right_op, ip)); - } else { - DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); - right = ToOperand(right_op); - } - - switch (instr->op()) { - case Token::BIT_AND: - __ and_(result, left, right); - break; - case Token::BIT_OR: - __ orr(result, left, right); - break; - case Token::BIT_XOR: - if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { - __ mvn(result, Operand(left)); - } else { - __ eor(result, left, right); - } - break; - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so - // result may alias either of them. - LOperand* right_op = instr->right(); - Register left = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - if (right_op->IsRegister()) { - // Mask the right_op operand. - __ and_(scratch, ToRegister(right_op), Operand(0x1F)); - switch (instr->op()) { - case Token::ROR: - __ mov(result, Operand(left, ROR, scratch)); - break; - case Token::SAR: - __ mov(result, Operand(left, ASR, scratch)); - break; - case Token::SHR: - if (instr->can_deopt()) { - __ mov(result, Operand(left, LSR, scratch), SetCC); - DeoptimizeIf(mi, instr, DeoptimizeReason::kNegativeValue); - } else { - __ mov(result, Operand(left, LSR, scratch)); - } - break; - case Token::SHL: - __ mov(result, Operand(left, LSL, scratch)); - break; - default: - UNREACHABLE(); - break; - } - } else { - // Mask the right_op operand. - int value = ToInteger32(LConstantOperand::cast(right_op)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::ROR: - if (shift_count != 0) { - __ mov(result, Operand(left, ROR, shift_count)); - } else { - __ Move(result, left); - } - break; - case Token::SAR: - if (shift_count != 0) { - __ mov(result, Operand(left, ASR, shift_count)); - } else { - __ Move(result, left); - } - break; - case Token::SHR: - if (shift_count != 0) { - __ mov(result, Operand(left, LSR, shift_count)); - } else { - if (instr->can_deopt()) { - __ tst(left, Operand(0x80000000)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue); - } - __ Move(result, left); - } - break; - case Token::SHL: - if (shift_count != 0) { - if (instr->hydrogen_value()->representation().IsSmi() && - instr->can_deopt()) { - if (shift_count != 1) { - __ mov(result, Operand(left, LSL, shift_count - 1)); - __ SmiTag(result, result, SetCC); - } else { - __ SmiTag(result, left, SetCC); - } - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ mov(result, Operand(left, LSL, shift_count)); - } - } else { - __ Move(result, left); - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - SBit set_cond = can_overflow ? SetCC : LeaveCC; - - if (right->IsStackSlot()) { - Register right_reg = EmitLoadRegister(right, ip); - __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); - } else { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); - } - - if (can_overflow) { - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoRSubI(LRSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - SBit set_cond = can_overflow ? SetCC : LeaveCC; - - if (right->IsStackSlot()) { - Register right_reg = EmitLoadRegister(right, ip); - __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); - } else { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); - } - - if (can_overflow) { - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - __ mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - DCHECK(instr->result()->IsDoubleRegister()); - DwVfpRegister result = ToDoubleRegister(instr->result()); -#if V8_HOST_ARCH_IA32 - // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator - // builds. - uint64_t bits = instr->bits(); - if ((bits & V8_UINT64_C(0x7FF8000000000000)) == - V8_UINT64_C(0x7FF0000000000000)) { - uint32_t lo = static_cast(bits); - uint32_t hi = static_cast(bits >> 32); - __ mov(ip, Operand(lo)); - __ mov(scratch0(), Operand(hi)); - __ vmov(result, ip, scratch0()); - return; - } -#endif - double v = instr->value(); - __ Vmov(result, v, scratch0()); -} - - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ Move(ToRegister(instr->result()), object); -} - - -MemOperand LCodeGen::BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToInteger32(LConstantOperand::cast(index)); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldMemOperand(string, SeqString::kHeaderSize + offset); - } - Register scratch = scratch0(); - DCHECK(!scratch.is(string)); - DCHECK(!scratch.is(ToRegister(index))); - if (encoding == String::ONE_BYTE_ENCODING) { - __ add(scratch, string, Operand(ToRegister(index))); - } else { - STATIC_ASSERT(kUC16Size == 2); - __ add(scratch, string, Operand(ToRegister(index), LSL, 1)); - } - return FieldMemOperand(scratch, SeqString::kHeaderSize); -} - - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - if (FLAG_debug_code) { - Register scratch = scratch0(); - __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - __ and_(scratch, scratch, - Operand(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(eq, kUnexpectedStringType); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ ldrb(result, operand); - } else { - __ ldrh(result, operand); - } -} - - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register value = ToRegister(instr->value()); - - if (FLAG_debug_code) { - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ strb(value, operand); - } else { - __ strh(value, operand); - } -} - - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - SBit set_cond = can_overflow ? SetCC : LeaveCC; - - if (right->IsStackSlot()) { - Register right_reg = EmitLoadRegister(right, ip); - __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); - } else { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); - } - - if (can_overflow) { - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - HMathMinMax::Operation operation = instr->hydrogen()->operation(); - if (instr->hydrogen()->representation().IsSmiOrInteger32()) { - Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; - Register left_reg = ToRegister(left); - Operand right_op = (right->IsRegister() || right->IsConstantOperand()) - ? ToOperand(right) - : Operand(EmitLoadRegister(right, ip)); - Register result_reg = ToRegister(instr->result()); - __ cmp(left_reg, right_op); - __ Move(result_reg, left_reg, condition); - __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - DwVfpRegister left_reg = ToDoubleRegister(left); - DwVfpRegister right_reg = ToDoubleRegister(right); - DwVfpRegister result_reg = ToDoubleRegister(instr->result()); - Label result_is_nan, return_left, return_right, check_zero, done; - __ VFPCompareAndSetFlags(left_reg, right_reg); - if (operation == HMathMinMax::kMathMin) { - __ b(mi, &return_left); - __ b(gt, &return_right); - } else { - __ b(mi, &return_right); - __ b(gt, &return_left); - } - __ b(vs, &result_is_nan); - // Left equals right => check for -0. - __ VFPCompareAndSetFlags(left_reg, 0.0); - if (left_reg.is(result_reg) || right_reg.is(result_reg)) { - __ b(ne, &done); // left == right != 0. - } else { - __ b(ne, &return_left); // left == right != 0. - } - // At this point, both left and right are either 0 or -0. - if (operation == HMathMinMax::kMathMin) { - // We could use a single 'vorr' instruction here if we had NEON support. - // The algorithm is: -((-L) + (-R)), which in case of L and R being - // different registers is most efficiently expressed as -((-L) - R). - __ vneg(left_reg, left_reg); - if (left_reg.is(right_reg)) { - __ vadd(result_reg, left_reg, right_reg); - } else { - __ vsub(result_reg, left_reg, right_reg); - } - __ vneg(result_reg, result_reg); - } else { - // Since we operate on +0 and/or -0, vadd and vand have the same effect; - // the decision for vadd is easy because vand is a NEON instruction. - __ vadd(result_reg, left_reg, right_reg); - } - __ b(&done); - - __ bind(&result_is_nan); - __ vadd(result_reg, left_reg, right_reg); - __ b(&done); - - __ bind(&return_right); - __ Move(result_reg, right_reg); - if (!left_reg.is(result_reg)) { - __ b(&done); - } - - __ bind(&return_left); - __ Move(result_reg, left_reg); - - __ bind(&done); - } -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DwVfpRegister left = ToDoubleRegister(instr->left()); - DwVfpRegister right = ToDoubleRegister(instr->right()); - DwVfpRegister result = ToDoubleRegister(instr->result()); - switch (instr->op()) { - case Token::ADD: - __ vadd(result, left, right); - break; - case Token::SUB: - __ vsub(result, left, right); - break; - case Token::MUL: - __ vmul(result, left, right); - break; - case Token::DIV: - __ vdiv(result, left, right); - break; - case Token::MOD: { - __ PrepareCallCFunction(0, 2, scratch0()); - __ MovToFloatParameters(left, right); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), - 0, 2); - // Move the result in the double result register. - __ MovFromFloatResult(result); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r1)); - DCHECK(ToRegister(instr->right()).is(r0)); - DCHECK(ToRegister(instr->result()).is(r0)); - - UNREACHABLE(); -} - - -template -void LCodeGen::EmitBranch(InstrType instr, Condition condition) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - - if (right_block == left_block || condition == al) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); - } else if (right_block == next_block) { - __ b(condition, chunk_->GetAssemblyLabel(left_block)); - } else { - __ b(condition, chunk_->GetAssemblyLabel(left_block)); - __ b(chunk_->GetAssemblyLabel(right_block)); - } -} - - -template -void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition) { - int true_block = instr->TrueDestination(chunk_); - __ b(condition, chunk_->GetAssemblyLabel(true_block)); -} - - -template -void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { - int false_block = instr->FalseDestination(chunk_); - __ b(condition, chunk_->GetAssemblyLabel(false_block)); -} - - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { - __ stop("LBreak"); -} - - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsInteger32() || r.IsSmi()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - __ cmp(reg, Operand::Zero()); - EmitBranch(instr, ne); - } else if (r.IsDouble()) { - DCHECK(!info()->IsStub()); - DwVfpRegister reg = ToDoubleRegister(instr->value()); - // Test the double value. Zero and NaN are false. - __ VFPCompareAndSetFlags(reg, 0.0); - __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) - EmitBranch(instr, ne); - } else { - DCHECK(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ CompareRoot(reg, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - __ cmp(reg, Operand::Zero()); - EmitBranch(instr, ne); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, al); - } else if (type.IsHeapNumber()) { - DCHECK(!info()->IsStub()); - DwVfpRegister dbl_scratch = double_scratch0(); - __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); - // Test the double value. Zero and NaN are false. - __ VFPCompareAndSetFlags(dbl_scratch, 0.0); - __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) - EmitBranch(instr, ne); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); - __ cmp(ip, Operand::Zero()); - EmitBranch(instr, ne); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - // Avoid deopts in the case where we've never executed this path before. - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); - __ b(eq, instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kBoolean) { - // Boolean -> its value. - __ CompareRoot(reg, Heap::kTrueValueRootIndex); - __ b(eq, instr->TrueLabel(chunk_)); - __ CompareRoot(reg, Heap::kFalseValueRootIndex); - __ b(eq, instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ CompareRoot(reg, Heap::kNullValueRootIndex); - __ b(eq, instr->FalseLabel(chunk_)); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - __ cmp(reg, Operand::Zero()); - __ b(eq, instr->FalseLabel(chunk_)); - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a Smi -> deopt. - __ SmiTst(reg); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi); - } - - const Register map = scratch0(); - if (expected & ToBooleanHint::kNeedsMap) { - __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); - - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); - __ tst(ip, Operand(1 << Map::kIsUndetectable)); - __ b(ne, instr->FalseLabel(chunk_)); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE); - __ b(ge, instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); - __ b(ge, ¬_string); - __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); - __ cmp(ip, Operand::Zero()); - __ b(ne, instr->TrueLabel(chunk_)); - __ b(instr->FalseLabel(chunk_)); - __ bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - __ CompareInstanceType(map, ip, SYMBOL_TYPE); - __ b(eq, instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kHeapNumber) { - // heap number -> false iff +0, -0, or NaN. - DwVfpRegister dbl_scratch = double_scratch0(); - Label not_heap_number; - __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); - __ b(ne, ¬_heap_number); - __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); - __ VFPCompareAndSetFlags(dbl_scratch, 0.0); - __ cmp(r0, r0, vs); // NaN -> false. - __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. - __ b(instr->TrueLabel(chunk_)); - __ bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject); - } - } - } -} - - -void LCodeGen::EmitGoto(int block) { - if (!IsNextEmittedBlock(block)) { - __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); - } -} - - -void LCodeGen::DoGoto(LGoto* instr) { - EmitGoto(instr->block_id()); -} - - -Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { - Condition cond = kNoCondition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = eq; - break; - case Token::NE: - case Token::NE_STRICT: - cond = ne; - break; - case Token::LT: - cond = is_unsigned ? lo : lt; - break; - case Token::GT: - cond = is_unsigned ? hi : gt; - break; - case Token::LTE: - cond = is_unsigned ? ls : le; - break; - case Token::GTE: - cond = is_unsigned ? hs : ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cond = TokenToCondition(instr->op(), is_unsigned); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - // Compare left and right operands as doubles and load the - // resulting flags into the normal status register. - __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); - // If a NaN is involved, i.e. the result is unordered (V set), - // jump to false block label. - __ b(vs, instr->FalseLabel(chunk_)); - } else { - if (right->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(right)); - if (instr->hydrogen_value()->representation().IsSmi()) { - __ cmp(ToRegister(left), Operand(Smi::FromInt(value))); - } else { - __ cmp(ToRegister(left), Operand(value)); - } - } else if (left->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(left)); - if (instr->hydrogen_value()->representation().IsSmi()) { - __ cmp(ToRegister(right), Operand(Smi::FromInt(value))); - } else { - __ cmp(ToRegister(right), Operand(value)); - } - // We commuted the operands, so commute the condition. - cond = CommuteCondition(cond); - } else { - __ cmp(ToRegister(left), ToRegister(right)); - } - } - EmitBranch(instr, cond); - } -} - - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - - __ cmp(left, Operand(right)); - EmitBranch(instr, eq); -} - - -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ mov(ip, Operand(factory()->the_hole_value())); - __ cmp(input_reg, ip); - EmitBranch(instr, eq); - return; - } - - DwVfpRegister input_reg = ToDoubleRegister(instr->object()); - __ VFPCompareAndSetFlags(input_reg, input_reg); - EmitFalseBranch(instr, vc); - - Register scratch = scratch0(); - __ VmovHigh(scratch, input_reg); - __ cmp(scratch, Operand(kHoleNanUpper32)); - EmitBranch(instr, eq); -} - - -Condition LCodeGen::EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); - - return lt; -} - - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp1 = ToRegister(instr->temp()); - - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - Condition true_cond = - EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Register input_reg = EmitLoadRegister(instr->value(), ip); - __ SmiTst(input_reg); - EmitBranch(instr, eq); -} - - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); - __ tst(temp, Operand(1 << Map::kIsUndetectable)); - EmitBranch(instr, ne); -} - - -static Condition ComputeCompareCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return eq; - case Token::LT: - return lt; - case Token::GT: - return gt; - case Token::LTE: - return le; - case Token::GTE: - return ge; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r1)); - DCHECK(ToRegister(instr->right()).is(r0)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ CompareRoot(r0, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq); -} - - -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK(from == to || to == LAST_TYPE); - return from; -} - - -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return eq; - if (to == LAST_TYPE) return hs; - if (from == FIRST_TYPE) return ls; - UNREACHABLE(); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register scratch = scratch0(); - Register input = ToRegister(instr->value()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - - __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); - EmitBranch(instr, BranchCondition(instr->hydrogen())); -} - -// Branches to a label or falls through with the answer in flags. Trashes -// the temp registers, but not the input. -void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handle class_name, Register input, - Register temp, Register temp2) { - DCHECK(!input.is(temp)); - DCHECK(!input.is(temp2)); - DCHECK(!temp.is(temp2)); - - __ JumpIfSmi(input, is_false); - - __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ b(hs, is_true); - } else { - __ b(hs, is_false); - } - - // Check if the constructor in the map is a function. - Register instance_type = ip; - __ GetMapConstructor(temp, temp, temp2, instance_type); - - // Objects with a non-function constructor have class 'Object'. - __ cmp(instance_type, Operand(JS_FUNCTION_TYPE)); - if (String::Equals(isolate()->factory()->Object_string(), class_name)) { - __ b(ne, is_true); - } else { - __ b(ne, is_false); - } - - // temp now contains the constructor function. Grab the - // instance class name from there. - __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(temp, - FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - __ cmp(temp, Operand(class_name)); - // End with the answer in flags. -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = scratch0(); - Register temp2 = ToRegister(instr->temp()); - Handle class_name = instr->hydrogen()->class_name(); - - EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - class_name, input, temp, temp2); - - EmitBranch(instr, eq); -} - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ cmp(temp, Operand(instr->map())); - EmitBranch(instr, eq); -} - - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = scratch0(); - Register const object_instance_type = ip; - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - __ SmiTst(object); - EmitFalseBranch(instr, eq); - } - - // Loop through the {object}s prototype chain looking for the {prototype}. - __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); - Label loop; - __ bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ ldrb(object_instance_type, - FieldMemOperand(object_map, Map::kBitFieldOffset)); - __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck); - // Deoptimize for proxies. - __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); - DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy); - - __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); - __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); - EmitFalseBranch(instr, eq); - __ cmp(object_prototype, prototype); - EmitTrueBranch(instr, eq); - __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); - __ b(&loop); -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - Token::Value op = instr->op(); - - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - // This instruction also signals no smi code inlined. - __ cmp(r0, Operand::Zero()); - - Condition condition = ComputeCompareCondition(op); - __ LoadRoot(ToRegister(instr->result()), - Heap::kTrueValueRootIndex, - condition); - __ LoadRoot(ToRegister(instr->result()), - Heap::kFalseValueRootIndex, - NegateCondition(condition)); -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in r0. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ push(r0); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - if (info()->saves_caller_doubles()) { - RestoreCallerDoubles(); - } - if (NeedsEagerFrame()) { - masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); - } - { ConstantPoolUnavailableScope constant_pool_unavailable(masm()); - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - int32_t sp_delta = (parameter_count + 1) * kPointerSize; - if (sp_delta != 0) { - __ add(sp, sp, Operand(sp_delta)); - } - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register reg = ToRegister(instr->parameter_count()); - // The argument count parameter is a smi - __ SmiUntag(reg); - __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); - } - - __ Jump(lr); - } -} - - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - __ ldr(result, ContextMemOperand(context, instr->slot_index())); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(result, ip); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } else { - __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); - } - } -} - - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - Register scratch = scratch0(); - MemOperand target = ContextMemOperand(context, instr->slot_index()); - - Label skip_assignment; - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ ldr(scratch, target); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(scratch, ip); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } else { - __ b(ne, &skip_assignment); - } - } - - __ str(value, target); - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - __ RecordWriteContextSlot(context, - target.offset(), - value, - scratch, - GetLinkRegisterState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } - - __ bind(&skip_assignment); -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - Register object = ToRegister(instr->object()); - - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - MemOperand operand = MemOperand(object, offset); - __ Load(result, operand, access.representation()); - return; - } - - if (instr->hydrogen()->representation().IsDouble()) { - DwVfpRegister result = ToDoubleRegister(instr->result()); - __ vldr(result, FieldMemOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - if (!access.IsInobject()) { - __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - object = result; - } - MemOperand operand = FieldMemOperand(object, offset); - __ Load(result, operand, access.representation()); -} - - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register scratch = scratch0(); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - - // Get the prototype or initial map from the function. - __ ldr(result, - FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(result, ip); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - - // If the function does not have an initial map, we're done. - Label done; - __ CompareObjectType(result, scratch, scratch, MAP_TYPE); - __ b(ne, &done); - - // Get the prototype from the initial map. - __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); - - // All done. - __ bind(&done); -} - - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - if (instr->length()->IsConstantOperand()) { - int const_length = ToInteger32(LConstantOperand::cast(instr->length())); - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int index = (const_length - const_index) + 1; - __ ldr(result, MemOperand(arguments, index * kPointerSize)); - } else { - Register index = ToRegister(instr->index()); - __ rsb(result, index, Operand(const_length + 1)); - __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); - } - } else if (instr->index()->IsConstantOperand()) { - Register length = ToRegister(instr->length()); - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int loc = const_index - 1; - if (loc != 0) { - __ sub(result, length, Operand(loc)); - __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); - } else { - __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); - } - } else { - Register length = ToRegister(instr->length()); - Register index = ToRegister(instr->index()); - __ sub(result, length, index); - __ add(result, result, Operand(1)); - __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); - } -} - - -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - DwVfpRegister result = ToDoubleRegister(instr->result()); - Operand operand = key_is_constant - ? Operand(constant_key << element_size_shift) - : Operand(key, LSL, shift_size); - __ add(scratch0(), external_pointer, operand); - if (elements_kind == FLOAT32_ELEMENTS) { - __ vldr(double_scratch0().low(), scratch0(), base_offset); - __ vcvt_f64_f32(result, double_scratch0().low()); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), base_offset); - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, base_offset); - switch (elements_kind) { - case INT8_ELEMENTS: - __ ldrsb(result, mem_operand); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ ldrb(result, mem_operand); - break; - case INT16_ELEMENTS: - __ ldrsh(result, mem_operand); - break; - case UINT16_ELEMENTS: - __ ldrh(result, mem_operand); - break; - case INT32_ELEMENTS: - __ ldr(result, mem_operand); - break; - case UINT32_ELEMENTS: - __ ldr(result, mem_operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ cmp(result, Operand(0x80000000)); - DeoptimizeIf(cs, instr, DeoptimizeReason::kNegativeValue); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { - Register elements = ToRegister(instr->elements()); - bool key_is_constant = instr->key()->IsConstantOperand(); - Register key = no_reg; - DwVfpRegister result = ToDoubleRegister(instr->result()); - Register scratch = scratch0(); - - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - - int base_offset = instr->base_offset(); - if (key_is_constant) { - int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - base_offset += constant_key * kDoubleSize; - } - __ add(scratch, elements, Operand(base_offset)); - - if (!key_is_constant) { - key = ToRegister(instr->key()); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - __ add(scratch, scratch, Operand(key, LSL, shift_size)); - } - - __ vldr(result, scratch, 0); - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } -} - - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - Register elements = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - Register store_base = scratch; - int offset = instr->base_offset(); - - if (instr->key()->IsConstantOperand()) { - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - store_base = elements; - } else { - Register key = ToRegister(instr->key()); - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsSmi()) { - __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); - } else { - __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - } - } - __ ldr(result, MemOperand(store_base, offset)); - - // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ SmiTst(result); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi); - } else { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } - } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - __ b(ne, &done); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise - // it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset)); - __ cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid))); - DeoptimizeIf(ne, instr, DeoptimizeReason::kHole); - } - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ bind(&done); - } -} - - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } -} - - -MemOperand LCodeGen::PrepareKeyedOperand(Register key, - Register base, - bool key_is_constant, - int constant_key, - int element_size, - int shift_size, - int base_offset) { - if (key_is_constant) { - return MemOperand(base, (constant_key << element_size) + base_offset); - } - - if (base_offset == 0) { - if (shift_size >= 0) { - return MemOperand(base, key, LSL, shift_size); - } else { - DCHECK_EQ(-1, shift_size); - return MemOperand(base, key, LSR, 1); - } - } - - if (shift_size >= 0) { - __ add(scratch0(), base, Operand(key, LSL, shift_size)); - return MemOperand(scratch0(), base_offset); - } else { - DCHECK_EQ(-1, shift_size); - __ add(scratch0(), base, Operand(key, ASR, 1)); - return MemOperand(scratch0(), base_offset); - } -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - __ sub(result, sp, Operand(2 * kPointerSize)); - } else if (instr->hydrogen()->arguments_adaptor()) { - // Check if the calling frame is an arguments adaptor frame. - Label done, adapted; - __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(result, MemOperand(scratch, - CommonFrameConstants::kContextOrFrameTypeOffset)); - __ cmp(result, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ mov(result, fp, LeaveCC, ne); - __ mov(result, scratch, LeaveCC, eq); - } else { - __ mov(result, fp); - } -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register elem = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - __ cmp(fp, elem); - __ mov(result, Operand(scope()->num_parameters())); - __ b(eq, &done); - - // Arguments adaptor frame present. Get argument length from there. - __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(result, - MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(result); - - // Argument length is in result register. - __ bind(&done); -} - - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. - Label global_object, result_in_receiver; - - if (!instr->hydrogen()->known_function()) { - // Do not transform the receiver to object for strict mode functions or - // builtins. - __ ldr(scratch, - FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(scratch, - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(scratch, Operand(SharedFunctionInfo::IsStrictBit::kMask | - SharedFunctionInfo::IsNativeBit::kMask)); - __ b(ne, &result_in_receiver); - } - - // Normal function. Replace undefined or null with global receiver. - __ LoadRoot(scratch, Heap::kNullValueRootIndex); - __ cmp(receiver, scratch); - __ b(eq, &global_object); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); - __ cmp(receiver, scratch); - __ b(eq, &global_object); - - // Deoptimize if the receiver is not a JS object. - __ SmiTst(receiver); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi); - __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); - DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject); - - __ b(&result_in_receiver); - __ bind(&global_object); - __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); - __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); - __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); - - if (result.is(receiver)) { - __ bind(&result_in_receiver); - } else { - Label result_ok; - __ b(&result_ok); - __ bind(&result_in_receiver); - __ mov(result, receiver); - __ bind(&result_ok); - } -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); - Register scratch = scratch0(); - DCHECK(receiver.is(r0)); // Used for parameter count. - DCHECK(function.is(r1)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).is(r0)); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - __ cmp(length, Operand(kArgumentsLimit)); - DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments); - - // Push the receiver and use the register to keep the original - // number of arguments. - __ push(receiver); - __ mov(receiver, length); - // The arguments are at a one pointer size offset from elements. - __ add(elements, elements, Operand(1 * kPointerSize)); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ cmp(length, Operand::Zero()); - __ b(eq, &invoke); - __ bind(&loop); - __ ldr(scratch, MemOperand(elements, length, LSL, 2)); - __ push(scratch); - __ sub(length, length, Operand(1), SetCC); - __ b(ne, &loop); - - __ bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(r0); - // It is safe to use r3, r4 and r5 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) r3 (new.target) will be initialized below. - PrepareForTailCall(actual, r3, r4, r5); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - // The number of arguments is stored in receiver which is r0, as expected - // by InvokeFunction. - ParameterCount actual(receiver); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { - Abort(kDoPushArgumentNotImplementedForDoubleType); - } else { - Register argument_reg = EmitLoadRegister(argument, ip); - __ push(argument_reg); - } -} - - -void LCodeGen::DoDrop(LDrop* instr) { - __ Drop(instr->count()); -} - - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); -} - - -void LCodeGen::DoContext(LContext* instr) { - // If there is a non-return use, the context must be moved to a register. - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in cp. - DCHECK(result.is(cp)); - } -} - - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - __ Move(scratch0(), instr->hydrogen()->declarations()); - __ push(scratch0()); - __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); - __ push(scratch0()); - __ Move(scratch0(), instr->hydrogen()->feedback_vector()); - __ push(scratch0()); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - Register function_reg = r1; - - LPointerMap* pointers = instr->pointer_map(); - - if (can_invoke_directly) { - // Change context. - __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ LoadRoot(r3, Heap::kUndefinedValueRootIndex); - __ mov(r0, Operand(arity)); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); - if (is_tail_call) { - __ Jump(ip); - } else { - __ Call(ip); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - } - } else { - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, expected, actual, flag, generator); - } -} - - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - DCHECK(instr->context() != NULL); - DCHECK(ToRegister(instr->context()).is(cp)); - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // Deoptimize if not a heap number. - __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch, Operand(ip)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - - Label done; - Register exponent = scratch0(); - scratch = no_reg; - __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, just - // return it. - __ tst(exponent, Operand(HeapNumber::kSignMask)); - // Move the input to the result if necessary. - __ Move(result, input); - __ b(eq, &done); - - // Input is negative. Reverse its sign. - // Preserve the value of all registers. - { - PushSafepointRegistersScope scope(this); - - // Registers were saved at the safepoint, so we can use - // many scratch registers. - Register tmp1 = input.is(r1) ? r0 : r1; - Register tmp2 = input.is(r2) ? r0 : r2; - Register tmp3 = input.is(r3) ? r0 : r3; - Register tmp4 = input.is(r4) ? r0 : r4; - - // exponent: floating point exponent value. - - Label allocated, slow; - __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); - __ b(&allocated); - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, - instr->context()); - // Set the pointer to the new heap number in tmp. - if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); - // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input, input); - __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - - __ bind(&allocated); - // exponent: floating point exponent value. - // tmp1: allocated heap number. - __ bic(exponent, exponent, Operand(HeapNumber::kSignMask)); - __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); - __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); - __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - - __ StoreToSafepointRegisterSlot(tmp1, result); - } - - __ bind(&done); -} - - -void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - __ cmp(input, Operand::Zero()); - __ Move(result, input, pl); - // We can make rsb conditional because the previous cmp instruction - // will clear the V (overflow) flag and rsb won't set this flag - // if input is positive. - __ rsb(result, input, Operand::Zero(), SetCC, mi); - // Deoptimize on overflow. - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); -} - - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - // Class for deferred case. - class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { - public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LMathAbs* instr_; - }; - - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsDouble()) { - DwVfpRegister input = ToDoubleRegister(instr->value()); - DwVfpRegister result = ToDoubleRegister(instr->result()); - __ vabs(result, input); - } else if (r.IsSmiOrInteger32()) { - EmitIntegerMathAbs(instr); - } else { - // Representation is tagged. - DeferredMathAbsTaggedHeapNumber* deferred = - new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); - Register input = ToRegister(instr->value()); - // Smi check. - __ JumpIfNotSmi(input, deferred->entry()); - // If smi, handle it directly. - EmitIntegerMathAbs(instr); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoMathFloor(LMathFloor* instr) { - DwVfpRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register input_high = scratch0(); - Label done, exact; - - __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); - DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); - - __ bind(&exact); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - __ cmp(result, Operand::Zero()); - __ b(ne, &done); - __ cmp(input_high, Operand::Zero()); - DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); - } - __ bind(&done); -} - - -void LCodeGen::DoMathRound(LMathRound* instr) { - DwVfpRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); - DwVfpRegister input_plus_dot_five = double_scratch1; - Register input_high = scratch0(); - DwVfpRegister dot_five = double_scratch0(); - Label convert, done; - - __ Vmov(dot_five, 0.5, scratch0()); - __ vabs(double_scratch1, input); - __ VFPCompareAndSetFlags(double_scratch1, dot_five); - // If input is in [-0.5, -0], the result is -0. - // If input is in [+0, +0.5[, the result is +0. - // If the input is +0.5, the result is 1. - __ b(hi, &convert); // Out of [-0.5, +0.5]. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ VmovHigh(input_high, input); - __ cmp(input_high, Operand::Zero()); - // [-0.5, -0]. - DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); - } - __ VFPCompareAndSetFlags(input, dot_five); - __ mov(result, Operand(1), LeaveCC, eq); // +0.5. - // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on - // flag kBailoutOnMinusZero. - __ mov(result, Operand::Zero(), LeaveCC, ne); - __ b(&done); - - __ bind(&convert); - __ vadd(input_plus_dot_five, input, dot_five); - // Reuse dot_five (double_scratch0) as we no longer need this value. - __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), - &done, &done); - DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); - __ bind(&done); -} - - -void LCodeGen::DoMathFround(LMathFround* instr) { - DwVfpRegister input_reg = ToDoubleRegister(instr->value()); - DwVfpRegister output_reg = ToDoubleRegister(instr->result()); - LowDwVfpRegister scratch = double_scratch0(); - __ vcvt_f32_f64(scratch.low(), input_reg); - __ vcvt_f64_f32(output_reg, scratch.low()); -} - - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - DwVfpRegister input = ToDoubleRegister(instr->value()); - DwVfpRegister result = ToDoubleRegister(instr->result()); - __ vsqrt(result, input); -} - - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - DwVfpRegister input = ToDoubleRegister(instr->value()); - DwVfpRegister result = ToDoubleRegister(instr->result()); - DwVfpRegister temp = double_scratch0(); - - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label done; - __ vmov(temp, -V8_INFINITY, scratch0()); - __ VFPCompareAndSetFlags(input, temp); - __ vneg(result, temp, eq); - __ b(&done, eq); - - // Add +0 to convert -0 to +0. - __ vadd(result, input, kDoubleRegZero); - __ vsqrt(result, result); - __ bind(&done); -} - - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - // Having marked this as a call, we can use any registers. - // Just make sure that the input/output registers are the expected ones. - Register tagged_exponent = MathPowTaggedDescriptor::exponent(); - DCHECK(!instr->right()->IsDoubleRegister() || - ToDoubleRegister(instr->right()).is(d1)); - DCHECK(!instr->right()->IsRegister() || - ToRegister(instr->right()).is(tagged_exponent)); - DCHECK(ToDoubleRegister(instr->left()).is(d0)); - DCHECK(ToDoubleRegister(instr->result()).is(d2)); - - if (exponent_type.IsSmi()) { - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsTagged()) { - Label no_deopt; - __ JumpIfSmi(tagged_exponent, &no_deopt); - DCHECK(!r6.is(tagged_exponent)); - __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r6, Operand(ip)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - __ bind(&no_deopt); - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsInteger32()) { - MathPowStub stub(isolate(), MathPowStub::INTEGER); - __ CallStub(&stub); - } else { - DCHECK(exponent_type.IsDouble()); - MathPowStub stub(isolate(), MathPowStub::DOUBLE); - __ CallStub(&stub); - } -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - - -void LCodeGen::DoMathLog(LMathLog* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - __ clz(result, input); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(scratch3, - MemOperand(scratch2, StandardFrameConstants::kContextOffset)); - __ cmp(scratch3, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - __ b(ne, &no_arguments_adaptor); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ mov(fp, scratch2); - __ ldr(caller_args_count_reg, - MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(caller_args_count_reg); - __ b(&formal_parameter_count_loaded); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count - __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3); - - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->function()).is(r1)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use r3, r4 and r5 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) r3 (new.target) will be initialized below. - PrepareForTailCall(actual, r3, r4, r5); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(r1, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } -} - - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(ToRegister(instr->result()).is(r0)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - __ Jump(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - // Make sure we don't emit any additional entries in the constant pool - // before the call to ensure that the CallCodeSize() calculated the - // correct - // number of instructions for the constant pool load. - { - ConstantPoolUnavailableScope constant_pool_unavailable(masm_); - __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); - } - __ Jump(target); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - PlatformInterfaceDescriptor* call_descriptor = - instr->descriptor().platform_specific_descriptor(); - if (call_descriptor != NULL) { - __ Call(code, RelocInfo::CODE_TARGET, al, - call_descriptor->storage_mode()); - } else { - __ Call(code, RelocInfo::CODE_TARGET, al); - } - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(target)); - // Make sure we don't emit any additional entries in the constant pool - // before the call to ensure that the CallCodeSize() calculated the - // correct - // number of instructions for the constant pool load. - { - ConstantPoolUnavailableScope constant_pool_unavailable(masm_); - __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); - } - __ Call(target); - } - generator.AfterCall(); - } -} - - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->constructor()).is(r1)); - DCHECK(ToRegister(instr->result()).is(r0)); - - __ mov(r0, Operand(instr->arity())); - __ Move(r2, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - // We might need a change here - // look at the first argument - __ ldr(r5, MemOperand(sp, 0)); - __ cmp(r5, Operand::Zero()); - __ b(eq, &packed_case); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), - holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ jmp(&done); - __ bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); -} - - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ str(code_object, - FieldMemOperand(function, JSFunction::kCodeEntryOffset)); -} - - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ add(result, base, Operand(ToInteger32(offset))); - } else { - Register offset = ToRegister(instr->offset()); - __ add(result, base, offset); - } -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - Representation representation = instr->representation(); - - Register object = ToRegister(instr->object()); - Register scratch = scratch0(); - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - Register value = ToRegister(instr->value()); - MemOperand operand = MemOperand(object, offset); - __ Store(value, operand, representation); - return; - } - - __ AssertNotSmi(object); - - DCHECK(!representation.IsSmi() || - !instr->value()->IsConstantOperand() || - IsSmi(LConstantOperand::cast(instr->value()))); - if (representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!instr->hydrogen()->has_transition()); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - DwVfpRegister value = ToDoubleRegister(instr->value()); - __ vstr(value, FieldMemOperand(object, offset)); - return; - } - - if (instr->hydrogen()->has_transition()) { - Handle transition = instr->hydrogen()->transition_map(); - AddDeprecationDependency(transition); - __ mov(scratch, Operand(transition)); - __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); - if (instr->hydrogen()->NeedsWriteBarrierForMap()) { - Register temp = ToRegister(instr->temp()); - // Update the write barrier for the map field. - __ RecordWriteForMap(object, - scratch, - temp, - GetLinkRegisterState(), - kSaveFPRegs); - } - } - - // Do the store. - Register value = ToRegister(instr->value()); - if (access.IsInobject()) { - MemOperand operand = FieldMemOperand(object, offset); - __ Store(value, operand, representation); - if (instr->hydrogen()->NeedsWriteBarrier()) { - // Update the write barrier for the object for in-object properties. - __ RecordWriteField(object, - offset, - value, - scratch, - GetLinkRegisterState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - instr->hydrogen()->SmiCheckForWriteBarrier(), - instr->hydrogen()->PointersToHereCheckForValue()); - } - } else { - __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); - MemOperand operand = FieldMemOperand(scratch, offset); - __ Store(value, operand, representation); - if (instr->hydrogen()->NeedsWriteBarrier()) { - // Update the write barrier for the properties array. - // object is used as a scratch register. - __ RecordWriteField(scratch, - offset, - value, - object, - GetLinkRegisterState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - instr->hydrogen()->SmiCheckForWriteBarrier(), - instr->hydrogen()->PointersToHereCheckForValue()); - } - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; - if (instr->index()->IsConstantOperand()) { - Operand index = ToOperand(instr->index()); - Register length = ToRegister(instr->length()); - __ cmp(length, index); - cc = CommuteCondition(cc); - } else { - Register index = ToRegister(instr->index()); - Operand length = ToOperand(instr->length()); - __ cmp(index, length); - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - Label done; - __ b(NegateCondition(cc), &done); - __ stop("eliminated bounds check failed"); - __ bind(&done); - } else { - DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); - } -} - - -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - Register address = scratch0(); - DwVfpRegister value(ToDoubleRegister(instr->value())); - if (key_is_constant) { - if (constant_key != 0) { - __ add(address, external_pointer, - Operand(constant_key << element_size_shift)); - } else { - address = external_pointer; - } - } else { - __ add(address, external_pointer, Operand(key, LSL, shift_size)); - } - if (elements_kind == FLOAT32_ELEMENTS) { - __ vcvt_f32_f64(double_scratch0().low(), value); - __ vstr(double_scratch0().low(), address, base_offset); - } else { // Storing doubles, not floats. - __ vstr(value, address, base_offset); - } - } else { - Register value(ToRegister(instr->value())); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - base_offset); - switch (elements_kind) { - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - case INT8_ELEMENTS: - __ strb(value, mem_operand); - break; - case INT16_ELEMENTS: - case UINT16_ELEMENTS: - __ strh(value, mem_operand); - break; - case INT32_ELEMENTS: - case UINT32_ELEMENTS: - __ str(value, mem_operand); - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - DwVfpRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register scratch = scratch0(); - DwVfpRegister double_scratch = double_scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int base_offset = instr->base_offset(); - - // Calculate the effective address of the slot in the array to store the - // double value. - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - if (key_is_constant) { - int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - __ add(scratch, elements, - Operand((constant_key << element_size_shift) + base_offset)); - } else { - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - __ add(scratch, elements, Operand(base_offset)); - __ add(scratch, scratch, - Operand(ToRegister(instr->key()), LSL, shift_size)); - } - - if (instr->NeedsCanonicalization()) { - // Force a canonical NaN. - __ VFPCanonicalizeNaN(double_scratch, value); - __ vstr(double_scratch, scratch, 0); - } else { - __ vstr(value, scratch, 0); - } -} - - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) - : no_reg; - Register scratch = scratch0(); - Register store_base = scratch; - int offset = instr->base_offset(); - - // Do the store. - if (instr->key()->IsConstantOperand()) { - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - store_base = elements; - } else { - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsSmi()) { - __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); - } else { - __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - } - } - __ str(value, MemOperand(store_base, offset)); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ add(key, store_base, Operand(offset)); - __ RecordWrite(elements, - key, - value, - GetLinkRegisterState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed, - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - // By cases: external, fast double - if (instr->is_fixed_typed_array()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = r0; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ jmp(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ cmp(ToRegister(current_capacity), Operand(constant_key)); - __ b(le, deferred->entry()); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ cmp(ToRegister(key), Operand(constant_capacity)); - __ b(ge, deferred->entry()); - } else { - __ cmp(ToRegister(key), ToRegister(current_capacity)); - __ b(ge, deferred->entry()); - } - - if (instr->elements()->IsRegister()) { - __ Move(result, ToRegister(instr->elements())); - } else { - __ ldr(result, ToMemOperand(instr->elements())); - } - - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = r0; - __ mov(result, Operand::Zero()); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - if (instr->object()->IsRegister()) { - __ Move(result, ToRegister(instr->object())); - } else { - __ ldr(result, ToMemOperand(instr->object())); - } - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - LConstantOperand* constant_key = LConstantOperand::cast(key); - int32_t int_key = ToInteger32(constant_key); - if (Smi::IsValid(int_key)) { - __ mov(r3, Operand(Smi::FromInt(int_key))); - } else { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - Label is_smi; - __ SmiTag(r3, ToRegister(key), SetCC); - // Deopt if the key is outside Smi range. The stub expects Smi and would - // bump the elements into dictionary mode (and trigger a deopt) anyways. - __ b(vc, &is_smi); - __ PopSafepointRegisters(); - DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow); - __ bind(&is_smi); - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - __ SmiTst(result); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi); -} - - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp = ToRegister(instr->temp()); - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound); - __ bind(&no_memento_found); -} - - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r1)); - DCHECK(ToRegister(instr->right()).is(r0)); - StringAddStub stub(isolate(), - instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt final : public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new(zone()) DeferredStringCharCodeAt(this, instr); - - StringCharLoadGenerator::Generate(masm(), - ToRegister(instr->string()), - ToRegister(instr->index()), - ToRegister(instr->result()), - deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - __ push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - __ mov(scratch, Operand(Smi::FromInt(const_index))); - __ push(scratch); - } else { - Register index = ToRegister(instr->index()); - __ SmiTag(index); - __ push(index); - } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, - instr->context()); - __ AssertSmi(r0); - __ SmiUntag(r0); - __ StoreToSafepointRegisterSlot(r0, result); -} - - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode final : public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredStringCharFromCode(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new(zone()) DeferredStringCharFromCode(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - DCHECK(!char_code.is(result)); - - __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); - __ b(hi, deferred->entry()); - __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); - __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(result, ip); - __ b(eq, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - __ SmiTag(char_code); - __ push(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(r0, result); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister() || input->IsStackSlot()); - LOperand* output = instr->result(); - DCHECK(output->IsDoubleRegister()); - SwVfpRegister single_scratch = double_scratch0().low(); - if (input->IsStackSlot()) { - Register scratch = scratch0(); - __ ldr(scratch, ToMemOperand(input)); - __ vmov(single_scratch, scratch); - } else { - __ vmov(single_scratch, ToRegister(input)); - } - __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); -} - - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - - SwVfpRegister flt_scratch = double_scratch0().low(); - __ vmov(flt_scratch, ToRegister(input)); - __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch); -} - - -void LCodeGen::DoNumberTagI(LNumberTagI* instr) { - class DeferredNumberTagI final : public LDeferredCode { - public: - DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, - instr_->value(), - instr_->temp1(), - instr_->temp2(), - SIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagI* instr_; - }; - - Register src = ToRegister(instr->value()); - Register dst = ToRegister(instr->result()); - - DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); - __ SmiTag(dst, src, SetCC); - __ b(vs, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU final : public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, - instr_->value(), - instr_->temp1(), - instr_->temp2(), - UNSIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagU* instr_; - }; - - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - - DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); - __ cmp(input, Operand(Smi::kMaxValue)); - __ b(hi, deferred->entry()); - __ SmiTag(result, input); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2, - IntegerSignedness signedness) { - Label done, slow; - Register src = ToRegister(value); - Register dst = ToRegister(instr->result()); - Register tmp1 = scratch0(); - Register tmp2 = ToRegister(temp1); - Register tmp3 = ToRegister(temp2); - LowDwVfpRegister dbl_scratch = double_scratch0(); - - if (signedness == SIGNED_INT32) { - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - if (dst.is(src)) { - __ SmiUntag(src, dst); - __ eor(src, src, Operand(0x80000000)); - } - __ vmov(dbl_scratch.low(), src); - __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low()); - } else { - __ vmov(dbl_scratch.low(), src); - __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low()); - } - - if (FLAG_inline_new) { - __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); - __ b(&done); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // TODO(3095996): Put a valid pointer value in the stack slot where the - // result register is stored, as this register is in the pointer map, but - // contains an integer value. - __ mov(dst, Operand::Zero()); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!dst.is(cp)) { - __ mov(cp, Operand::Zero()); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r0, dst); - } - - // Done. Put the value in dbl_scratch into the value of the allocated heap - // number. - __ bind(&done); - __ vstr(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD final : public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagD* instr_; - }; - - DwVfpRegister input_reg = ToDoubleRegister(instr->value()); - Register scratch = scratch0(); - Register reg = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); - - DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); - } else { - __ jmp(deferred->entry()); - } - __ bind(deferred->exit()); - __ vstr(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ mov(reg, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(cp)) { - __ mov(cp, Operand::Zero()); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r0, reg); -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - Register output = ToRegister(instr->result()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - __ tst(input, Operand(0xc0000000)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - } - if (hchange->CheckFlag(HValue::kCanOverflow) && - !hchange->value()->CheckFlag(HValue::kUint32)) { - __ SmiTag(output, input, SetCC); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ SmiTag(output, input); - } -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - if (instr->needs_check()) { - STATIC_ASSERT(kHeapObjectTag == 1); - // If the input is a HeapObject, SmiUntag will set the carry flag. - __ SmiUntag(result, input, SetCC); - DeoptimizeIf(cs, instr, DeoptimizeReason::kNotASmi); - } else { - __ SmiUntag(result, input); - } -} - - -void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, - DwVfpRegister result_reg, - NumberUntagDMode mode) { - bool can_convert_undefined_to_nan = instr->truncating(); - bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); - - Register scratch = scratch0(); - SwVfpRegister flt_scratch = double_scratch0().low(); - DCHECK(!result_reg.is(double_scratch0())); - Label convert, load_smi, done; - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - // Heap number map check. - __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch, Operand(ip)); - if (can_convert_undefined_to_nan) { - __ b(ne, &convert); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - } - // load heap number - __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); - if (deoptimize_on_minus_zero) { - __ VmovLow(scratch, result_reg); - __ cmp(scratch, Operand::Zero()); - __ b(ne, &done); - __ VmovHigh(scratch, result_reg); - __ cmp(scratch, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - __ jmp(&done); - if (can_convert_undefined_to_nan) { - __ bind(&convert); - // Convert undefined (and hole) to NaN. - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input_reg, Operand(ip)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ LoadRoot(scratch, Heap::kNanValueRootIndex); - __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); - __ jmp(&done); - } - } else { - __ SmiUntag(scratch, input_reg); - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - } - // Smi to double register conversion - __ bind(&load_smi); - // scratch: untagged value of input_reg - __ vmov(flt_scratch, scratch); - __ vcvt_f64_s32(result_reg, flt_scratch); - __ bind(&done); -} - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { - Register input_reg = ToRegister(instr->value()); - Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->temp()); - LowDwVfpRegister double_scratch = double_scratch0(); - DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2()); - - DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); - DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); - - Label done; - - // The input was optimistically untagged; revert it. - // The carry flag is set when we reach this deferred code as we just executed - // SmiUntag(heap_object, SetCC) - STATIC_ASSERT(kHeapObjectTag == 1); - __ adc(scratch2, input_reg, Operand(input_reg)); - - // Heap number map check. - __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch1, Operand(ip)); - - if (instr->truncating()) { - Label truncate; - __ b(eq, &truncate); - __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball); - __ bind(&truncate); - __ TruncateHeapNumberToI(input_reg, scratch2); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - - __ sub(ip, scratch2, Operand(kHeapObjectTag)); - __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); - __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ cmp(input_reg, Operand::Zero()); - __ b(ne, &done); - __ VmovHigh(scratch1, double_scratch2); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero); - } - } - __ bind(&done); -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI final : public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LTaggedToI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - DCHECK(input->Equals(instr->result())); - - Register input_reg = ToRegister(input); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiUntag(input_reg); - } else { - DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); - - // Optimistically untag the input. - // If the input is a HeapObject, SmiUntag will set the carry flag. - __ SmiUntag(input_reg, SetCC); - // Branch to deferred code if the input was tagged. - // The deferred code will take care of restoring the tag. - __ b(cs, deferred->entry()); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - DwVfpRegister result_reg = ToDoubleRegister(result); - - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - EmitNumberUntagD(instr, input_reg, result_reg, mode); -} - - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = scratch0(); - DwVfpRegister double_input = ToDoubleRegister(instr->value()); - LowDwVfpRegister double_scratch = double_scratch0(); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); - // Deoptimize if the input wasn't a int32 (inside a double). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ cmp(result_reg, Operand::Zero()); - __ b(ne, &done); - __ VmovHigh(scratch1, double_input); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } -} - - -void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = scratch0(); - DwVfpRegister double_input = ToDoubleRegister(instr->value()); - LowDwVfpRegister double_scratch = double_scratch0(); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); - // Deoptimize if the input wasn't a int32 (inside a double). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ cmp(result_reg, Operand::Zero()); - __ b(ne, &done); - __ VmovHigh(scratch1, double_input); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } - __ SmiTag(result_reg, SetCC); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->value(); - __ SmiTst(ToRegister(input)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - LOperand* input = instr->value(); - __ SmiTst(ToRegister(input)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi); - } -} - - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - Register view = ToRegister(instr->view()); - Register scratch = scratch0(); - - __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); - __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); - __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - Register scratch = scratch0(); - - __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - __ cmp(scratch, Operand(first)); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); - } else { - DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType); - // Omit check for the last type. - if (last != LAST_TYPE) { - __ cmp(scratch, Operand(last)); - DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType); - } - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); - __ tst(scratch, Operand(mask)); - DeoptimizeIf(tag == 0 ? ne : eq, instr, - DeoptimizeReason::kWrongInstanceType); - } else { - __ and_(scratch, scratch, Operand(mask)); - __ cmp(scratch, Operand(tag)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); - } - } -} - - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Register reg = ToRegister(instr->value()); - Handle object = instr->hydrogen()->object().handle(); - AllowDeferredHandleDereference smi_check; - if (isolate()->heap()->InNewSpace(*object)) { - Register reg = ToRegister(instr->value()); - Handle cell = isolate()->factory()->NewCell(object); - __ mov(ip, Operand(cell)); - __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); - __ cmp(reg, ip); - } else { - __ cmp(reg, Operand(object)); - } - DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch); -} - - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); - __ ldr(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset)); - __ tst(scratch0(), Operand(Map::Deprecated::kMask)); - __ b(eq, &deopt); - - { - PushSafepointRegistersScope scope(this); - __ push(object); - __ mov(cp, Operand::Zero()); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters( - instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r0, scratch0()); - } - __ tst(scratch0(), Operand(kSmiTagMask)); - __ b(ne, &done); - - __ bind(&deopt); - DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps final : public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - void Generate() override { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - LInstruction* instr() override { return instr_; } - - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - Register map_reg = scratch0(); - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register reg = ToRegister(input); - - __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, reg); - __ bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMap(map_reg, map, &success); - __ b(eq, &success); - } - - Handle map = maps->at(maps->size() - 1).handle(); - __ CompareMap(map_reg, map, &success); - if (instr->hydrogen()->HasMigrationTarget()) { - __ b(ne, deferred->entry()); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); - } - - __ bind(&success); -} - - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - Register unclamped_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampUint8(result_reg, unclamped_reg); -} - - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - Register scratch = scratch0(); - Register input_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); - Label is_smi, done, heap_number; - - // Both smi and heap number cases are handled. - __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); - - // Check for heap number - __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ cmp(scratch, Operand(factory()->heap_number_map())); - __ b(eq, &heap_number); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - __ cmp(input_reg, Operand(factory()->undefined_value())); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ mov(result_reg, Operand::Zero()); - __ jmp(&done); - - // Heap number - __ bind(&heap_number); - __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); - __ jmp(&done); - - // smi - __ bind(&is_smi); - __ ClampUint8(result_reg, result_reg); - - __ bind(&done); -} - - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate final : public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, LAllocate* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredAllocate(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = - new(zone()) DeferredAllocate(this, instr); - - Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } else { - Register size = ToRegister(instr->size()); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } - - __ bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - STATIC_ASSERT(kHeapObjectTag == 1); - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ mov(scratch, Operand(size - kHeapObjectTag)); - } else { - __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); - } - __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); - Label loop; - __ bind(&loop); - __ sub(scratch, scratch, Operand(kPointerSize), SetCC); - __ str(scratch2, MemOperand(result, scratch)); - __ b(ge, &loop); - } -} - - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, Operand(Smi::kZero)); - - PushSafepointRegistersScope scope(this); - if (instr->size()->IsRegister()) { - Register size = ToRegister(instr->size()); - DCHECK(!size.is(result)); - __ SmiTag(size); - __ push(size); - } else { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - if (size >= 0 && size <= Smi::kMaxValue) { - __ Push(Smi::FromInt(size)); - } else { - // We should never get here at runtime => abort - __ stop("invalid allocation size"); - return; - } - } - - int flags = AllocateDoubleAlignFlag::encode( - instr->hydrogen()->MustAllocateDoubleAligned()); - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ Push(Smi::FromInt(flags)); - - CallRuntimeFromDeferred( - Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); - __ StoreToSafepointRegisterSlot(r0, result); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - Register top_address = scratch0(); - __ sub(r0, r0, Operand(kHeapObjectTag)); - __ mov(top_address, Operand(allocation_top)); - __ str(r0, MemOperand(top_address)); - __ add(r0, r0, Operand(kHeapObjectTag)); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register scratch1 = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->value()).is(r3)); - DCHECK(ToRegister(instr->result()).is(r0)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ mov(r0, Operand(isolate()->factory()->number_string())); - __ jmp(&end); - __ bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ bind(&end); -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->value()); - - Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), - instr->FalseLabel(chunk_), - input, - instr->type_literal()); - if (final_branch_condition != kNoCondition) { - EmitBranch(instr, final_branch_condition); - } -} - - -Condition LCodeGen::EmitTypeofIs(Label* true_label, - Label* false_label, - Register input, - Handle type_name) { - Condition final_branch_condition = kNoCondition; - Register scratch = scratch0(); - Factory* factory = isolate()->factory(); - if (String::Equals(type_name, factory->number_string())) { - __ JumpIfSmi(input, true_label); - __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->string_string())) { - __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); - final_branch_condition = lt; - - } else if (String::Equals(type_name, factory->symbol_string())) { - __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->boolean_string())) { - __ CompareRoot(input, Heap::kTrueValueRootIndex); - __ b(eq, true_label); - __ CompareRoot(input, Heap::kFalseValueRootIndex); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->undefined_string())) { - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ b(eq, false_label); - __ JumpIfSmi(input, false_label); - // Check for undetectable objects => true. - __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ tst(scratch, Operand(1 << Map::kIsUndetectable)); - final_branch_condition = ne; - - } else if (String::Equals(type_name, factory->function_string())) { - __ JumpIfSmi(input, false_label); - __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ and_(scratch, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - __ cmp(scratch, Operand(1 << Map::kIsCallable)); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->object_string())) { - __ JumpIfSmi(input, false_label); - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ b(eq, true_label); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE); - __ b(lt, false_label); - // Check for callable or undetectable objects => false. - __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ tst(scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - final_branch_condition = eq; - - } else { - __ b(false_label); - } - - return final_branch_condition; -} - - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - // Block literal pool emission for duration of padding. - Assembler::BlockConstPoolScope block_const_pool(masm()); - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - DCHECK_EQ(0, padding_size % Assembler::kInstrSize); - while (padding_size > 0) { - __ nop(); - padding_size -= Assembler::kInstrSize; - } - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && type == Deoptimizer::EAGER) { - type = Deoptimizer::LAZY; - } - - DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type); -} - - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck final : public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStackCheck(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &done); - Handle stack_check = isolate()->builtins()->StackCheck(); - masm()->MaybeCheckConstPool(); - PredictableCodeSizeScope predictable(masm()); - predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET)); - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(cp)); - CallCode(stack_check, RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new(zone()) DeferredStackCheck(this, instr); - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(lo, deferred_stack_check->entry()); - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - Label use_cache, call_runtime; - __ CheckEnumCache(&call_runtime); - - __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ b(&use_cache); - - // Get the set of properties to enumerate. - __ bind(&call_runtime); - __ push(r0); - CallRuntime(Runtime::kForInEnumerate, instr); - __ bind(&use_cache); -} - - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - __ EnumLength(result, map); - __ cmp(result, Operand(Smi::kZero)); - __ b(ne, &load_cache); - __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); - __ jmp(&done); - - __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ ldr(result, - FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ ldr(result, - FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); - __ cmp(result, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - Register map = ToRegister(instr->map()); - __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); - __ cmp(map, scratch0()); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); -} - - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, - Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ Push(object); - __ Push(index); - __ mov(cp, Operand::Zero()); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters( - instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r0, result); -} - - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, - LLoadFieldByIndex* instr, - Register result, - Register object, - Register index) - : LDeferredCode(codegen), - instr_(instr), - result_(result), - object_(object), - index_(index) { - } - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register result_; - Register object_; - Register index_; - }; - - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - DeferredLoadMutableDouble* deferred; - deferred = new(zone()) DeferredLoadMutableDouble( - this, instr, result, object, index); - - Label out_of_object, done; - - __ tst(index, Operand(Smi::FromInt(1))); - __ b(ne, deferred->entry()); - __ mov(index, Operand(index, ASR, 1)); - - __ cmp(index, Operand::Zero()); - __ b(lt, &out_of_object); - - __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index)); - __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); - - __ b(&done); - - __ bind(&out_of_object); - __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - // Index is equal to negated out of object property index plus 1. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); - __ ldr(result, FieldMemOperand(scratch, - FixedArray::kHeaderSize - kPointerSize)); - __ bind(deferred->exit()); - __ bind(&done); -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/arm/lithium-codegen-arm.h b/src/crankshaft/arm/lithium-codegen-arm.h deleted file mode 100644 index 77094e55af..0000000000 --- a/src/crankshaft/arm/lithium-codegen-arm.h +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_ -#define V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_ - -#include "src/ast/scopes.h" -#include "src/crankshaft/arm/lithium-arm.h" -#include "src/crankshaft/arm/lithium-gap-resolver-arm.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class SafepointGenerator; - -class LCodeGen: public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - LinkRegisterStatus GetLinkRegisterState() const { - return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; - } - - // Support for converting LOperands to assembler types. - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LOperand must be a double register. - DwVfpRegister ToDoubleRegister(LOperand* op) const; - - // LOperand is loaded into dbl_scratch, unless already a double register. - DwVfpRegister EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DwVfpRegister dbl_scratch); - int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; - int32_t ToInteger32(LConstantOperand* op) const; - Smi* ToSmi(LConstantOperand* op) const; - double ToDouble(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Returns a MemOperand pointing to the high word of a DoubleStackSlot. - MemOperand ToHighMemOperand(LOperand* op) const; - - bool IsInteger32(LConstantOperand* op) const; - bool IsSmi(LConstantOperand* op) const; - Handle ToHandle(LConstantOperand* op) const; - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - // Deferred code support. - void DoDeferredNumberTagD(LNumberTagD* instr); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - void DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2, - IntegerSignedness signedness); - - void DoDeferredTaggedToI(LTaggedToI* instr); - void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocate(LAllocate* instr); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, - Register object, - Register index); - - // Parallel move support. - void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); - - MemOperand PrepareKeyedOperand(Register key, - Register base, - bool key_is_constant, - int constant_key, - int element_size, - int shift_size, - int base_offset); - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - Scope* scope() const { return scope_; } - - Register scratch0() { return r9; } - LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; } - - LInstruction* GetNextInstruction(); - - void EmitClassOfTest(Label* if_true, Label* if_false, - Handle class_name, Register input, - Register temporary, Register temporary2); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - void SaveCallerDoubles(); - void RestoreCallerDoubles(); - - // Code generation passes. Returns true if code generation should - // continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS - }; - - int CallCodeSize(Handle code, RelocInfo::Mode mode); - - void CallCode( - Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS); - - void CallCodeGeneric( - Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode, - TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS); - - void CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, - int num_arguments, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, num_arguments, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void LoadContextFromDeferred(LOperand* context); - void CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in r1. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode); - - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason); - - void AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - Register ToRegister(int index) const; - DwVfpRegister ToDoubleRegister(int index) const; - - MemOperand BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding); - - void EmitIntegerMathAbs(LMathAbs* instr); - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); - - static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block); - - // EmitBranch expects to be the last instruction of a block. - template - void EmitBranch(InstrType instr, Condition condition); - template - void EmitTrueBranch(InstrType instr, Condition condition); - template - void EmitFalseBranch(InstrType instr, Condition condition); - void EmitNumberUntagD(LNumberUntagD* instr, Register input, - DwVfpRegister result, NumberUntagDMode mode); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitTypeofIs(Label* true_label, - Label* false_label, - Register input, - Handle type_name); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed); - - // Emits optimized code to deep-copy the contents of statically known - // object graphs (e.g. object literal boilerplate). - void EmitDeepCopy(Handle object, - Register result, - Register source, - int* offset, - AllocationSiteMode mode); - - void EnsureSpaceForLazyDeopt(int space_needed) override; - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); - - template - void EmitVectorLoadICRegisters(T* instr); - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - class PushSafepointRegistersScope final BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->info()->is_calling()); - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - codegen_->masm_->PushSafepointRegisters(); - } - - ~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - codegen_->masm_->PopSafepointRegisters(); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; - } - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode : public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() {} - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; - int instruction_index_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_ diff --git a/src/crankshaft/arm/lithium-gap-resolver-arm.cc b/src/crankshaft/arm/lithium-gap-resolver-arm.cc deleted file mode 100644 index daf439f53c..0000000000 --- a/src/crankshaft/arm/lithium-gap-resolver-arm.cc +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/arm/lithium-gap-resolver-arm.h" -#include "src/assembler-inl.h" -#include "src/crankshaft/arm/lithium-codegen-arm.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -// We use the root register to spill a value while breaking a cycle in parallel -// moves. We don't need access to roots while resolving the move list and using -// the root register has two advantages: -// - It is not in crankshaft allocatable registers list, so it can't interfere -// with any of the moves we are resolving. -// - We don't need to push it on the stack, as we can reload it with its value -// once we have resolved a cycle. -#define kSavedValueRegister kRootRegister - - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), - saved_destination_(NULL), need_to_restore_root_(false) { } - - -#define __ ACCESS_MASM(cgen_->masm()) - - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(moves_.is_empty()); - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - root_index_ = i; // Any cycle is found when by reaching this move again. - PerformMove(i); - if (in_cycle_) { - RestoreValue(); - } - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated()) { - DCHECK(moves_[i].source()->IsConstantOperand()); - EmitMove(i); - } - } - - if (need_to_restore_root_) { - DCHECK(kSavedValueRegister.is(kRootRegister)); - __ InitializeRootRegister(); - need_to_restore_root_ = false; - } - - moves_.Rewind(0); -} - - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); - } - Verify(); -} - - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. - - // We can only find a cycle, when doing a depth-first traversal of moves, - // be encountering the starting move again. So by spilling the source of - // the starting move, we break the cycle. All moves are then unblocked, - // and the starting move is completed by writing the spilled value to - // its destination. All other moves from the spilled source have been - // completed prior to breaking the cycle. - // An additional complication is that moves to MemOperands with large - // offsets (more than 1K or 4K) require us to spill this spilled value to - // the stack, to free up the register. - DCHECK(!moves_[index].IsPending()); - DCHECK(!moves_[index].IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved in a stack allocated local. Multiple moves can - // be pending because this function is recursive. - DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. - LOperand* destination = moves_[index].destination(); - moves_[index].set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - PerformMove(i); - // If there is a blocking, pending move it must be moves_[root_index_] - // and all other moves with the same source as moves_[root_index_] are - // sucessfully executed (because they are cycle-free) by this loop. - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - moves_[index].set_destination(destination); - - // The move may be blocked on a pending move, which must be the starting move. - // In this case, we have a cycle, and we save the source of this move to - // a scratch register to break it. - LMoveOperands other_move = moves_[root_index_]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - BreakCycle(index); - return; - } - - // This move is no longer blocked. - EmitMove(index); -} - - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - - -void LGapResolver::BreakCycle(int index) { - // We save in a register the source of that move and we remember its - // destination. Then we mark this move as resolved so the cycle is - // broken and we can perform the other moves. - DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); - DCHECK(!in_cycle_); - in_cycle_ = true; - LOperand* source = moves_[index].source(); - saved_destination_ = moves_[index].destination(); - if (source->IsRegister()) { - need_to_restore_root_ = true; - __ mov(kSavedValueRegister, cgen_->ToRegister(source)); - } else if (source->IsStackSlot()) { - need_to_restore_root_ = true; - __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); - } else if (source->IsDoubleRegister()) { - __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); - } else if (source->IsDoubleStackSlot()) { - __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); - } else { - UNREACHABLE(); - } - // This move will be done by restoring the saved value to the destination. - moves_[index].Eliminate(); -} - - -void LGapResolver::RestoreValue() { - DCHECK(in_cycle_); - DCHECK(saved_destination_ != NULL); - - if (saved_destination_->IsRegister()) { - __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); - } else if (saved_destination_->IsStackSlot()) { - __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); - } else if (saved_destination_->IsDoubleRegister()) { - __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); - } else if (saved_destination_->IsDoubleStackSlot()) { - __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); - } else { - UNREACHABLE(); - } - - in_cycle_ = false; - saved_destination_ = NULL; -} - - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - - if (source->IsRegister()) { - Register source_register = cgen_->ToRegister(source); - if (destination->IsRegister()) { - __ mov(cgen_->ToRegister(destination), source_register); - } else { - DCHECK(destination->IsStackSlot()); - __ str(source_register, cgen_->ToMemOperand(destination)); - } - } else if (source->IsStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsRegister()) { - __ ldr(cgen_->ToRegister(destination), source_operand); - } else { - DCHECK(destination->IsStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (!destination_operand.OffsetIsUint12Encodable()) { - // ip is overwritten while saving the value to the destination. - // Therefore we can't use ip. It is OK if the read from the source - // destroys ip, since that happens before the value is read. - __ vldr(kScratchDoubleReg.low(), source_operand); - __ vstr(kScratchDoubleReg.low(), destination_operand); - } else { - __ ldr(ip, source_operand); - __ str(ip, destination_operand); - } - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - Representation r = cgen_->IsSmi(constant_source) - ? Representation::Smi() : Representation::Integer32(); - if (cgen_->IsInteger32(constant_source)) { - __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r))); - } else { - __ Move(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - DwVfpRegister result = cgen_->ToDoubleRegister(destination); - double v = cgen_->ToDouble(constant_source); - __ Vmov(result, v, ip); - } else { - DCHECK(destination->IsStackSlot()); - DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. - need_to_restore_root_ = true; - Representation r = cgen_->IsSmi(constant_source) - ? Representation::Smi() : Representation::Integer32(); - if (cgen_->IsInteger32(constant_source)) { - __ mov(kSavedValueRegister, - Operand(cgen_->ToRepresentation(constant_source, r))); - } else { - __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source)); - } - __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsDoubleRegister()) { - DwVfpRegister source_register = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - __ vmov(cgen_->ToDoubleRegister(destination), source_register); - } else { - DCHECK(destination->IsDoubleStackSlot()); - __ vstr(source_register, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsDoubleStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsDoubleRegister()) { - __ vldr(cgen_->ToDoubleRegister(destination), source_operand); - } else { - DCHECK(destination->IsDoubleStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - // kScratchDoubleReg was used to break the cycle. - __ vpush(kScratchDoubleReg); - __ vldr(kScratchDoubleReg, source_operand); - __ vstr(kScratchDoubleReg, destination_operand); - __ vpop(kScratchDoubleReg); - } else { - __ vldr(kScratchDoubleReg, source_operand); - __ vstr(kScratchDoubleReg, destination_operand); - } - } - } else { - UNREACHABLE(); - } - - moves_[index].Eliminate(); -} - - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/arm/lithium-gap-resolver-arm.h b/src/crankshaft/arm/lithium-gap-resolver-arm.h deleted file mode 100644 index 59413c5772..0000000000 --- a/src/crankshaft/arm/lithium-gap-resolver-arm.h +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ -#define V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class LGapResolver final BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // If a cycle is found in the series of moves, save the blocking value to - // a scratch register. The cycle must be found by hitting the root of the - // depth-first search. - void BreakCycle(int index); - - // After a cycle has been resolved, restore the value from the scratch - // register to its proper destination. - void RestoreValue(); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Verify the move list before performing moves. - void Verify(); - - LCodeGen* cgen_; - - // List of moves not yet resolved. - ZoneList moves_; - - int root_index_; - bool in_cycle_; - LOperand* saved_destination_; - - // We use the root register as a scratch in a few places. When that happens, - // this flag is set to indicate that it needs to be restored. - bool need_to_restore_root_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ diff --git a/src/crankshaft/arm64/OWNERS b/src/crankshaft/arm64/OWNERS deleted file mode 100644 index 906a5ce641..0000000000 --- a/src/crankshaft/arm64/OWNERS +++ /dev/null @@ -1 +0,0 @@ -rmcilroy@chromium.org diff --git a/src/crankshaft/arm64/delayed-masm-arm64-inl.h b/src/crankshaft/arm64/delayed-masm-arm64-inl.h deleted file mode 100644 index 8932c61a9b..0000000000 --- a/src/crankshaft/arm64/delayed-masm-arm64-inl.h +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_ -#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_ - -#include "src/arm64/macro-assembler-arm64-inl.h" -#include "src/crankshaft/arm64/delayed-masm-arm64.h" - -namespace v8 { -namespace internal { - -#define __ ACCESS_MASM(masm_) - -DelayedMasm::DelayedMasm(LCodeGen* owner, MacroAssembler* masm, - const Register& scratch_register) - : cgen_(owner), - masm_(masm), - scratch_register_(scratch_register), - scratch_register_used_(false), - pending_(kNone), - saved_value_(0) { -#ifdef DEBUG - pending_register_ = no_reg; - pending_value_ = 0; - pending_pc_ = 0; - scratch_register_acquired_ = false; -#endif -} - -void DelayedMasm::EndDelayedUse() { - EmitPending(); - DCHECK(!scratch_register_acquired_); - ResetSavedValue(); -} - - -void DelayedMasm::Mov(const Register& rd, - const Operand& operand, - DiscardMoveMode discard_mode) { - EmitPending(); - DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_); - __ Mov(rd, operand, discard_mode); -} - -void DelayedMasm::Fmov(VRegister fd, VRegister fn) { - EmitPending(); - __ Fmov(fd, fn); -} - -void DelayedMasm::Fmov(VRegister fd, double imm) { - EmitPending(); - __ Fmov(fd, imm); -} - - -void DelayedMasm::LoadObject(Register result, Handle object) { - EmitPending(); - DCHECK(!IsScratchRegister(result) || scratch_register_acquired_); - __ LoadObject(result, object); -} - -void DelayedMasm::InitializeRootRegister() { masm_->InitializeRootRegister(); } - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_ diff --git a/src/crankshaft/arm64/delayed-masm-arm64.cc b/src/crankshaft/arm64/delayed-masm-arm64.cc deleted file mode 100644 index c6a03939b9..0000000000 --- a/src/crankshaft/arm64/delayed-masm-arm64.cc +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_ARM64 - -#include "src/crankshaft/arm64/delayed-masm-arm64.h" -#include "src/arm64/macro-assembler-arm64-inl.h" -#include "src/crankshaft/arm64/lithium-codegen-arm64.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -#define __ ACCESS_MASM(masm_) - - -void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) { - DCHECK((src->IsStackSlot() && dst->IsStackSlot()) || - (src->IsDoubleStackSlot() && dst->IsDoubleStackSlot())); - MemOperand src_operand = cgen_->ToMemOperand(src); - MemOperand dst_operand = cgen_->ToMemOperand(dst); - if (pending_ == kStackSlotMove) { - DCHECK(pending_pc_ == masm_->pc_offset()); - UseScratchRegisterScope scope(masm_); - DoubleRegister temp1 = scope.AcquireD(); - DoubleRegister temp2 = scope.AcquireD(); - switch (MemOperand::AreConsistentForPair(pending_address_src_, - src_operand)) { - case MemOperand::kNotPair: - __ Ldr(temp1, pending_address_src_); - __ Ldr(temp2, src_operand); - break; - case MemOperand::kPairAB: - __ Ldp(temp1, temp2, pending_address_src_); - break; - case MemOperand::kPairBA: - __ Ldp(temp2, temp1, src_operand); - break; - } - switch (MemOperand::AreConsistentForPair(pending_address_dst_, - dst_operand)) { - case MemOperand::kNotPair: - __ Str(temp1, pending_address_dst_); - __ Str(temp2, dst_operand); - break; - case MemOperand::kPairAB: - __ Stp(temp1, temp2, pending_address_dst_); - break; - case MemOperand::kPairBA: - __ Stp(temp2, temp1, dst_operand); - break; - } - ResetPending(); - return; - } - - EmitPending(); - pending_ = kStackSlotMove; - pending_address_src_ = src_operand; - pending_address_dst_ = dst_operand; -#ifdef DEBUG - pending_pc_ = masm_->pc_offset(); -#endif -} - - -void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) { - DCHECK(!scratch_register_acquired_); - if ((pending_ == kStoreConstant) && (value == pending_value_)) { - MemOperand::PairResult result = - MemOperand::AreConsistentForPair(pending_address_dst_, operand); - if (result != MemOperand::kNotPair) { - const MemOperand& dst = - (result == MemOperand::kPairAB) ? - pending_address_dst_ : - operand; - DCHECK(pending_pc_ == masm_->pc_offset()); - if (pending_value_ == 0) { - __ Stp(xzr, xzr, dst); - } else { - SetSavedValue(pending_value_); - __ Stp(ScratchRegister(), ScratchRegister(), dst); - } - ResetPending(); - return; - } - } - - EmitPending(); - pending_ = kStoreConstant; - pending_address_dst_ = operand; - pending_value_ = value; -#ifdef DEBUG - pending_pc_ = masm_->pc_offset(); -#endif -} - - -void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) { - if ((pending_ == kLoad) && - pending_register_.IsSameSizeAndType(rd)) { - switch (MemOperand::AreConsistentForPair(pending_address_src_, operand)) { - case MemOperand::kNotPair: - break; - case MemOperand::kPairAB: - DCHECK(pending_pc_ == masm_->pc_offset()); - DCHECK(!IsScratchRegister(pending_register_) || - scratch_register_acquired_); - DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_); - __ Ldp(pending_register_, rd, pending_address_src_); - ResetPending(); - return; - case MemOperand::kPairBA: - DCHECK(pending_pc_ == masm_->pc_offset()); - DCHECK(!IsScratchRegister(pending_register_) || - scratch_register_acquired_); - DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_); - __ Ldp(rd, pending_register_, operand); - ResetPending(); - return; - } - } - - EmitPending(); - pending_ = kLoad; - pending_register_ = rd; - pending_address_src_ = operand; -#ifdef DEBUG - pending_pc_ = masm_->pc_offset(); -#endif -} - - -void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) { - if ((pending_ == kStore) && - pending_register_.IsSameSizeAndType(rd)) { - switch (MemOperand::AreConsistentForPair(pending_address_dst_, operand)) { - case MemOperand::kNotPair: - break; - case MemOperand::kPairAB: - DCHECK(pending_pc_ == masm_->pc_offset()); - __ Stp(pending_register_, rd, pending_address_dst_); - ResetPending(); - return; - case MemOperand::kPairBA: - DCHECK(pending_pc_ == masm_->pc_offset()); - __ Stp(rd, pending_register_, operand); - ResetPending(); - return; - } - } - - EmitPending(); - pending_ = kStore; - pending_register_ = rd; - pending_address_dst_ = operand; -#ifdef DEBUG - pending_pc_ = masm_->pc_offset(); -#endif -} - - -void DelayedMasm::EmitPending() { - DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset())); - switch (pending_) { - case kNone: - return; - case kStoreConstant: - if (pending_value_ == 0) { - __ Str(xzr, pending_address_dst_); - } else { - SetSavedValue(pending_value_); - __ Str(ScratchRegister(), pending_address_dst_); - } - break; - case kLoad: - DCHECK(!IsScratchRegister(pending_register_) || - scratch_register_acquired_); - __ Ldr(pending_register_, pending_address_src_); - break; - case kStore: - __ Str(pending_register_, pending_address_dst_); - break; - case kStackSlotMove: { - UseScratchRegisterScope scope(masm_); - DoubleRegister temp = scope.AcquireD(); - __ Ldr(temp, pending_address_src_); - __ Str(temp, pending_address_dst_); - break; - } - } - ResetPending(); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_ARM64 diff --git a/src/crankshaft/arm64/delayed-masm-arm64.h b/src/crankshaft/arm64/delayed-masm-arm64.h deleted file mode 100644 index 1cf416021e..0000000000 --- a/src/crankshaft/arm64/delayed-masm-arm64.h +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_ -#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; - -// This class delays the generation of some instructions. This way, we have a -// chance to merge two instructions in one (with load/store pair). -// Each instruction must either: -// - merge with the pending instruction and generate just one instruction. -// - emit the pending instruction and then generate the instruction (or set the -// pending instruction). -class DelayedMasm BASE_EMBEDDED { - public: - inline DelayedMasm(LCodeGen* owner, MacroAssembler* masm, - const Register& scratch_register); - - ~DelayedMasm() { - DCHECK(!scratch_register_acquired_); - DCHECK(!scratch_register_used_); - DCHECK(!pending()); - } - inline void EndDelayedUse(); - - const Register& ScratchRegister() { - scratch_register_used_ = true; - return scratch_register_; - } - bool IsScratchRegister(const CPURegister& reg) { - return reg.Is(scratch_register_); - } - bool scratch_register_used() const { return scratch_register_used_; } - void reset_scratch_register_used() { scratch_register_used_ = false; } - // Acquire/Release scratch register for use outside this class. - void AcquireScratchRegister() { - EmitPending(); - ResetSavedValue(); -#ifdef DEBUG - DCHECK(!scratch_register_acquired_); - scratch_register_acquired_ = true; -#endif - } - void ReleaseScratchRegister() { -#ifdef DEBUG - DCHECK(scratch_register_acquired_); - scratch_register_acquired_ = false; -#endif - } - bool pending() { return pending_ != kNone; } - - // Extra layer over the macro-assembler instructions (which emits the - // potential pending instruction). - inline void Mov(const Register& rd, - const Operand& operand, - DiscardMoveMode discard_mode = kDontDiscardForSameWReg); - inline void Fmov(VRegister fd, VRegister fn); - inline void Fmov(VRegister fd, double imm); - inline void LoadObject(Register result, Handle object); - // Instructions which try to merge which the pending instructions. - void StackSlotMove(LOperand* src, LOperand* dst); - // StoreConstant can only be used if the scratch register is not acquired. - void StoreConstant(uint64_t value, const MemOperand& operand); - void Load(const CPURegister& rd, const MemOperand& operand); - void Store(const CPURegister& rd, const MemOperand& operand); - // Emit the potential pending instruction. - void EmitPending(); - // Reset the pending state. - void ResetPending() { - pending_ = kNone; -#ifdef DEBUG - pending_register_ = no_reg; - MemOperand tmp; - pending_address_src_ = tmp; - pending_address_dst_ = tmp; - pending_value_ = 0; - pending_pc_ = 0; -#endif - } - inline void InitializeRootRegister(); - - private: - // Set the saved value and load the ScratchRegister with it. - void SetSavedValue(uint64_t saved_value) { - DCHECK(saved_value != 0); - if (saved_value_ != saved_value) { - masm_->Mov(ScratchRegister(), saved_value); - saved_value_ = saved_value; - } - } - // Reset the saved value (i.e. the value of ScratchRegister is no longer - // known). - void ResetSavedValue() { - saved_value_ = 0; - } - - LCodeGen* cgen_; - MacroAssembler* masm_; - - // Register used to store a constant. - Register scratch_register_; - bool scratch_register_used_; - - // Sometimes we store or load two values in two contiguous stack slots. - // In this case, we try to use the ldp/stp instructions to reduce code size. - // To be able to do that, instead of generating directly the instructions, - // we register with the following fields that an instruction needs to be - // generated. Then with the next instruction, if the instruction is - // consistent with the pending one for stp/ldp we generate ldp/stp. Else, - // if they are not consistent, we generate the pending instruction and we - // register the new instruction (which becomes pending). - - // Enumeration of instructions which can be pending. - enum Pending { - kNone, - kStoreConstant, - kLoad, kStore, - kStackSlotMove - }; - // The pending instruction. - Pending pending_; - // For kLoad, kStore: register which must be loaded/stored. - CPURegister pending_register_; - // For kLoad, kStackSlotMove: address of the load. - MemOperand pending_address_src_; - // For kStoreConstant, kStore, kStackSlotMove: address of the store. - MemOperand pending_address_dst_; - // For kStoreConstant: value to be stored. - uint64_t pending_value_; - // Value held into the ScratchRegister if the saved_value_ is not 0. - // For 0, we use xzr. - uint64_t saved_value_; -#ifdef DEBUG - // Address where the pending instruction must be generated. It's only used to - // check that nothing else has been generated since we set the pending - // instruction. - int pending_pc_; - // If true, the scratch register has been acquired outside this class. The - // scratch register can no longer be used for constants. - bool scratch_register_acquired_; -#endif -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_ diff --git a/src/crankshaft/arm64/lithium-arm64.cc b/src/crankshaft/arm64/lithium-arm64.cc deleted file mode 100644 index 9ad2a62638..0000000000 --- a/src/crankshaft/arm64/lithium-arm64.cc +++ /dev/null @@ -1,2475 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/arm64/lithium-arm64.h" - -#include - -#include "src/arm64/assembler-arm64-inl.h" -#include "src/crankshaft/arm64/lithium-codegen-arm64.h" -#include "src/crankshaft/lithium-inl.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || - LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || - operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); - } -} -#endif - - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - stream->Add(" length "); - length()->PrintTo(stream); - stream->Add(" index "); - index()->PrintTo(stream); -} - - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), - true_block_id(), false_block_id()); -} - - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access(); - stream->Add(os.str().c_str()); - stream->Add(" <- "); - value()->PrintTo(stream); -} - - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add("%p -> %p", *original_map(), *transitioned_map()); -} - - -template -void LUnaryMathOperation::PrintDataTo(StringStream* stream) { - value()->PrintTo(stream); -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-d"; - case Token::SUB: return "sub-d"; - case Token::MUL: return "mul-d"; - case Token::DIV: return "div-d"; - case Token::MOD: return "mod-d"; - default: - UNREACHABLE(); - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-t"; - case Token::SUB: return "sub-t"; - case Token::MUL: return "mul-t"; - case Token::MOD: return "mod-t"; - case Token::DIV: return "div-t"; - case Token::BIT_AND: return "bit-and-t"; - case Token::BIT_OR: return "bit-or-t"; - case Token::BIT_XOR: return "bit-xor-t"; - case Token::ROR: return "ror-t"; - case Token::SHL: return "shl-t"; - case Token::SAR: return "sar-t"; - case Token::SHR: return "shr-t"; - default: - UNREACHABLE(); - } -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, - DoubleRegister fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() ? UseConstant(value) : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value); -} - - -LConstantOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? UseConstant(value) - : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); -} - - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, int index) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed( - LTemplateResultInstruction<1>* instr, Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixedDouble( - LTemplateResultInstruction<1>* instr, DoubleRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = - (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(zone())); - return instr; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LUnallocated* LChunkBuilder::TempDoubleRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - -int LPlatformChunk::GetNextSpillIndex() { return current_frame_slots_++; } - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - int index = GetNextSpillIndex(); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new(zone()) LPlatformChunk(info_, graph_); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph_->blocks(); - for (int i = 0; i < blocks->length(); i++) { - DoBasicBlock(blocks->at(i)); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block) { - DCHECK(is_building()); - current_block_ = block; - - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) || - (pred->end()->SecondSuccessor()->block_id() > block->block_id())) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - - // Translate hydrogen instructions to lithium ones for the current block. - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while ((current != NULL) && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new(zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new(zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new(zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - instr = new(zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, the register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - - if (op == Token::MOD) { - LOperand* left = UseFixedDouble(instr->left(), d0); - LOperand* right = UseFixedDouble(instr->right(), d1); - LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right); - return MarkAsCall(DefineFixedDouble(result, d0), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineAsRegister(result); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - DCHECK((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) || - (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) || - (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) || - (op == Token::BIT_OR) || (op == Token::BIT_AND) || - (op == Token::BIT_XOR)); - HValue* left = instr->left(); - HValue* right = instr->right(); - - // TODO(jbramley): Once we've implemented smi support for all arithmetic - // operations, these assertions should check IsTagged(). - DCHECK(instr->representation().IsSmiOrTagged()); - DCHECK(left->representation().IsSmiOrTagged()); - DCHECK(right->representation().IsSmiOrTagged()); - - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left_operand = UseFixed(left, x1); - LOperand* right_operand = UseFixed(right, x0); - LArithmeticT* result = - new(zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, x0), instr); -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = NULL; - LOperand* length = NULL; - LOperand* index = NULL; - - if (instr->length()->IsConstant() && instr->index()->IsConstant()) { - args = UseRegisterAtStart(instr->arguments()); - length = UseConstant(instr->length()); - index = UseConstant(instr->index()); - } else { - args = UseRegister(instr->arguments()); - length = UseRegisterAtStart(instr->length()); - index = UseRegisterOrConstantAtStart(instr->index()); - } - - return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - - LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr); - if (shifted_operation != NULL) { - return shifted_operation; - } - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = - UseRegisterOrConstantAtStart(instr->BetterRightOperand()); - LInstruction* result = instr->representation().IsSmi() ? - DefineAsRegister(new(zone()) LAddS(left, right)) : - DefineAsRegister(new(zone()) LAddI(left, right)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return DefineAsRegister(new(zone()) LAddE(left, right)); - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::ADD, instr); - } else { - DCHECK(instr->representation().IsTagged()); - return DoArithmeticT(Token::ADD, instr); - } -} - - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = UseRegisterOrConstant(instr->size()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - if (instr->IsAllocationFolded()) { - LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL; - LAllocate* result = - new (zone()) LAllocate(context, size, temp1, temp2, temp3); - return AssignPointerMap(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), x1); - LOperand* receiver = UseFixed(instr->receiver(), x0); - LOperand* length = UseFixed(instr->length(), x2); - LOperand* elements = UseFixed(instr->elements(), x3); - LApplyArguments* result = new(zone()) LApplyArguments(function, - receiver, - length, - elements); - return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) { - info()->MarkAsRequiresFrame(); - LOperand* temp = instr->from_inlined() ? NULL : TempRegister(); - return DefineAsRegister(new(zone()) LArgumentsElements(temp)); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) { - info()->MarkAsRequiresFrame(); - LOperand* value = UseRegisterAtStart(instr->value()); - return DefineAsRegister(new(zone()) LArgumentsLength(value)); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr); - if (shifted_operation != NULL) { - return shifted_operation; - } - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = - UseRegisterOrConstantAtStart(instr->BetterRightOperand()); - return instr->representation().IsSmi() ? - DefineAsRegister(new(zone()) LBitS(left, right)) : - DefineAsRegister(new(zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - // V8 expects a label to be generated for each basic block. - // This is used in some places like LAllocator::IsBlockBoundary - // in lithium-allocator.cc - return new(zone()) LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseRegisterOrConstantAtStart(instr->length()) - : UseRegisterAtStart(instr->length()); - LInstruction* result = new(zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - - if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) { - // These representations have simple checks that cannot deoptimize. - return new(zone()) LBranch(UseRegister(value), NULL, NULL); - } else { - DCHECK(r.IsTagged()); - if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() || - type.IsHeapNumber()) { - // These types have simple checks that cannot deoptimize. - return new(zone()) LBranch(UseRegister(value), NULL, NULL); - } - - if (type.IsString()) { - // This type cannot deoptimize, but needs a scratch register. - return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL); - } - - ToBooleanHints expected = instr->expected_input_types(); - bool needs_temps = (expected & ToBooleanHint::kNeedsMap) || - expected == ToBooleanHint::kNone; - LOperand* temp1 = needs_temps ? TempRegister() : NULL; - LOperand* temp2 = needs_temps ? TempRegister() : NULL; - - if (expected == ToBooleanHint::kAny || expected == ToBooleanHint::kNone) { - // The generic case cannot deoptimize because it already supports every - // possible input type. - DCHECK(needs_temps); - return new(zone()) LBranch(UseRegister(value), temp1, temp2); - } else { - return AssignEnvironment( - new(zone()) LBranch(UseRegister(value), temp1, temp2)); - } - } -} - - -LInstruction* LChunkBuilder::DoCallWithDescriptor( - HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), cp); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - if (i < descriptor.GetParameterCount()) { - int argc = descriptor.GetParameterCount() - i; - AddInstruction(new (zone()) LPreparePushArguments(argc), instr); - LPushArguments* push_args = new (zone()) LPushArguments(zone()); - for (; i < descriptor.GetParameterCount(); i++) { - if (push_args->ShouldSplitPush()) { - AddInstruction(push_args, instr); - push_args = new (zone()) LPushArguments(zone()); - } - op = UseRegisterAtStart(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - push_args->AddArgument(op); - } - AddInstruction(push_args, instr); - } - - LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor, - ops, - zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, x0), instr); -} - - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), cp); - // The call to ArrayConstructCode will expect the constructor to be in x1. - LOperand* constructor = UseFixed(instr->constructor(), x1); - LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, x0), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr); -} - - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LOperand* temp = TempRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LNumberUntagD(value, temp)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new(zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = instr->CanTruncateToInt32() - ? NULL : TempDoubleRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } else { - DCHECK(to.IsSmi() || to.IsInteger32()); - if (instr->CanTruncateToInt32()) { - LOperand* value = UseRegister(val); - return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value)); - } else { - LOperand* value = UseRegister(val); - LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value); - return AssignEnvironment(DefineAsRegister(result)); - } - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegister(val); - LNumberTagU* result = - new(zone()) LNumberTagU(value, TempRegister(), TempRegister()); - return AssignPointerMap(DefineAsRegister(result)); - } else { - STATIC_ASSERT((kMinInt == Smi::kMinValue) && - (kMaxInt == Smi::kMaxValue)); - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new(zone()) LSmiTag(value)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegisterAtStart(val); - LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister( - new(zone()) LUint32ToDouble(UseRegisterAtStart(val))); - } else { - return DefineAsRegister( - new(zone()) LInteger32ToDouble(UseRegisterAtStart(val))); - } - } - } - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckValue(value)); -} - - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - LInstruction* result = new(zone()) LCheckInstanceType(value, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value, temp)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - return DefineAsRegister(new(zone()) LClampDToUint8(reg)); - } else if (input_rep.IsInteger32()) { - return DefineAsRegister(new(zone()) LClampIToUint8(reg)); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - return AssignEnvironment( - DefineAsRegister(new(zone()) LClampTToUint8(reg, - TempDoubleRegister()))); - } -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) - LClassOfTestAndBranch(value, TempRegister(), TempRegister()); -} - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (instr->left()->IsConstant() && instr->right()->IsConstant()) { - LOperand* left = UseConstant(instr->left()); - LOperand* right = UseConstant(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } -} - - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), x1); - LOperand* right = UseFixed(instr->right(), x0); - LCmpT* result = new(zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, x0), instr); -} - - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegister(instr->value()); - if (instr->representation().IsTagged()) { - return new(zone()) LCmpHoleAndBranchT(value); - } else { - LOperand* temp = TempRegister(); - return new(zone()) LCmpHoleAndBranchD(value, temp); - } -} - - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new(zone()) LCmpMapAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new(zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new(zone()) LConstantI); - } else if (r.IsDouble()) { - return DefineAsRegister(new(zone()) LConstantD); - } else if (r.IsExternal()) { - return DefineAsRegister(new(zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new(zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new(zone()) LContext, cp); - } - - return DefineAsRegister(new(zone()) LContext); -} - - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new(zone()) LDebugBreak(); -} - - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new(zone()) LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) - ? NULL : TempRegister(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI( - dividend, divisor, temp)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) - ? NULL : TempRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); - if (!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if ((instr->arguments_var() != NULL) && - instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoForceRepresentation( - HForceRepresentation* instr) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()); -} - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister()); -} - - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister( - new(zone()) LInnerAllocatedObject(base_object, offset)); -} - - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LOperand* scratch1 = TempRegister(); - LOperand* scratch2 = TempRegister(); - LHasInPrototypeChainAndBranch* result = new (zone()) - LHasInPrototypeChainAndBranch(object, prototype, scratch1, scratch2); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), cp); - // The function is required (by MacroAssembler::InvokeFunction) to be in x1. - LOperand* function = UseFixed(instr->function(), x1); - LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new(zone()) LIsStringAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value())); -} - - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LIsUndetectableAndBranch(value, TempRegister()); -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new(zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = - current_block_->last_environment()->DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - - return pop; -} - - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - LOperand* function = UseRegister(instr->function()); - LOperand* temp = TempRegister(); - return AssignEnvironment(DefineAsRegister( - new(zone()) LLoadFunctionPrototype(function, temp))); -} - - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK(instr->key()->representation().IsSmiOrInteger32()); - ElementsKind elements_kind = instr->elements_kind(); - LOperand* elements = UseRegister(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - - if (!instr->is_fixed_typed_array()) { - if (instr->representation().IsDouble()) { - LOperand* temp = (!instr->key()->IsConstant() || - instr->RequiresHoleCheck()) - ? TempRegister() - : NULL; - LInstruction* result = DefineAsRegister( - new (zone()) LLoadKeyedFixedDouble(elements, key, temp)); - if (instr->RequiresHoleCheck()) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(instr->representation().IsSmiOrTagged() || - instr->representation().IsInteger32()); - LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister(); - LInstruction* result = - DefineAsRegister(new (zone()) LLoadKeyedFixed(elements, key, temp)); - if (instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && - info()->IsStub())) { - result = AssignEnvironment(result); - } - return result; - } - } else { - DCHECK((instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || - (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); - - LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister(); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - LInstruction* result = DefineAsRegister(new (zone()) LLoadKeyedExternal( - elements, key, backing_store_owner, temp)); - if (elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32)) { - result = AssignEnvironment(result); - } - return result; - } -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LLoadNamedField(object)); -} - - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new(zone()) LLoadRoot); -} - - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? - NULL : TempRegister(); - LInstruction* result = DefineAsRegister( - new(zone()) LFlooringDivByConstI(dividend, divisor, temp)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LOperand* remainder = TempRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LFlooringDivI(dividend, divisor, remainder)); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseRegisterOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - return DefineAsRegister(new(zone()) LMathMinMax(left, right)); -} - - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( - dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp = TempRegister(); - LInstruction* result = DefineAsRegister(new(zone()) LModByConstI( - dividend, divisor, temp)); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoModByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoModByConstI(instr); - } else { - return DoModI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); - - HValue* least_const = instr->BetterLeftOperand(); - HValue* most_const = instr->BetterRightOperand(); - - // LMulConstI can handle a subset of constants: - // With support for overflow detection: - // -1, 0, 1, 2 - // 2^n, -(2^n) - // Without support for overflow detection: - // 2^n + 1, -(2^n - 1) - if (most_const->IsConstant()) { - int32_t constant = HConstant::cast(most_const)->Integer32Value(); - bool small_constant = (constant >= -1) && (constant <= 2); - bool end_range_constant = (constant <= -kMaxInt) || (constant == kMaxInt); - int32_t constant_abs = Abs(constant); - - if (!end_range_constant && - (small_constant || (base::bits::IsPowerOfTwo32(constant_abs)) || - (!can_overflow && (base::bits::IsPowerOfTwo32(constant_abs + 1) || - base::bits::IsPowerOfTwo32(constant_abs - 1))))) { - LConstantOperand* right = UseConstant(most_const); - bool need_register = - base::bits::IsPowerOfTwo32(constant_abs) && !small_constant; - LOperand* left = need_register ? UseRegister(least_const) - : UseRegisterAtStart(least_const); - LInstruction* result = - DefineAsRegister(new(zone()) LMulConstIS(left, right)); - if ((bailout_on_minus_zero && constant <= 0) || - (can_overflow && constant != 1 && - base::bits::IsPowerOfTwo32(constant_abs))) { - result = AssignEnvironment(result); - } - return result; - } - } - - // LMulI/S can handle all cases, but it requires that a register is - // allocated for the second operand. - LOperand* left = UseRegisterAtStart(least_const); - LOperand* right = UseRegisterAtStart(most_const); - LInstruction* result = instr->representation().IsSmi() - ? DefineAsRegister(new(zone()) LMulS(left, right)) - : DefineAsRegister(new(zone()) LMulI(left, right)); - if ((bailout_on_minus_zero && least_const != most_const) || can_overflow) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new(zone()) LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new(zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk_->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - DCHECK(instr->representation().IsDouble()); - // We call a C function for double power. It can't trigger a GC. - // We need to use fixed result register for the call. - Representation exponent_type = instr->right()->representation(); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseFixedDouble(instr->left(), d0); - LOperand* right; - if (exponent_type.IsInteger32()) { - right = UseFixed(instr->right(), MathPowIntegerDescriptor::exponent()); - } else if (exponent_type.IsDouble()) { - right = UseFixedDouble(instr->right(), d1); - } else { - right = UseFixed(instr->right(), MathPowTaggedDescriptor::exponent()); - } - LPower* result = new(zone()) LPower(left, right); - return MarkAsCall(DefineFixedDouble(result, d0), - instr, - CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - AddInstruction(new(zone()) LPreparePushArguments(argc), instr); - - LPushArguments* push_args = new(zone()) LPushArguments(zone()); - - for (int i = 0; i < argc; ++i) { - if (push_args->ShouldSplitPush()) { - AddInstruction(push_args, instr); - push_args = new(zone()) LPushArguments(zone()); - } - push_args->AddArgument(UseRegister(instr->argument(i))); - } - - return push_args; -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() - ? UseFixed(instr->context(), cp) - : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn(UseFixed(instr->value(), x0), context, - parameter_count); -} - - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* temp = TempRegister(); - LSeqStringGetChar* result = - new(zone()) LSeqStringGetChar(string, index, temp); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegister(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegister(instr->index()) - : UseRegisterOrConstant(instr->index()); - LOperand* value = UseRegister(instr->value()); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL; - LOperand* temp = TempRegister(); - LSeqStringSetChar* result = - new(zone()) LSeqStringSetChar(context, string, index, value, temp); - return DefineAsRegister(result); -} - - -HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val, - HValue** left) { - if (!val->representation().IsInteger32()) return NULL; - if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL; - - HBinaryOperation* hinstr = HBinaryOperation::cast(val); - HValue* hleft = hinstr->left(); - HValue* hright = hinstr->right(); - DCHECK(hleft->representation().Equals(hinstr->representation())); - DCHECK(hright->representation().Equals(hinstr->representation())); - - if (hleft == hright) return NULL; - - if ((hright->IsConstant() && - LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) || - (hinstr->IsCommutative() && hleft->IsConstant() && - LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) { - // The constant operand will likely fit in the immediate field. We are - // better off with - // lsl x8, x9, #imm - // add x0, x8, #imm2 - // than with - // mov x16, #imm2 - // add x0, x16, x9 LSL #imm - return NULL; - } - - HBitwiseBinaryOperation* shift = NULL; - // TODO(aleram): We will miss situations where a shift operation is used by - // different instructions both as a left and right operands. - if (hright->IsBitwiseBinaryShift() && - HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) { - shift = HBitwiseBinaryOperation::cast(hright); - if (left != NULL) { - *left = hleft; - } - } else if (hinstr->IsCommutative() && - hleft->IsBitwiseBinaryShift() && - HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) { - shift = HBitwiseBinaryOperation::cast(hleft); - if (left != NULL) { - *left = hright; - } - } else { - return NULL; - } - - if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) { - // Shifts right by zero can deoptimize. - return NULL; - } - - return shift; -} - - -bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) { - if (!shift->representation().IsInteger32()) { - return false; - } - for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) { - if (shift != CanTransformToShiftedOp(it.value())) { - return false; - } - } - return true; -} - - -LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand( - HBinaryOperation* instr) { - HValue* left; - HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left); - - if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) { - return DoShiftedBinaryOp(instr, left, shift); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoShiftedBinaryOp( - HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) { - DCHECK(hshift->IsBitwiseBinaryShift()); - DCHECK(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0)); - - LTemplateResultInstruction<1>* res; - LOperand* left = UseRegisterAtStart(hleft); - LOperand* right = UseRegisterAtStart(hshift->left()); - LOperand* shift_amount = UseConstant(hshift->right()); - Shift shift_op; - switch (hshift->opcode()) { - case HValue::kShl: shift_op = LSL; break; - case HValue::kShr: shift_op = LSR; break; - case HValue::kSar: shift_op = ASR; break; - default: UNREACHABLE(); shift_op = NO_SHIFT; - } - - if (hinstr->IsBitwise()) { - res = new(zone()) LBitI(left, right, shift_op, shift_amount); - } else if (hinstr->IsAdd()) { - res = new(zone()) LAddI(left, right, shift_op, shift_amount); - } else { - DCHECK(hinstr->IsSub()); - res = new(zone()) LSubI(left, right, shift_op, shift_amount); - } - if (hinstr->CheckFlag(HValue::kCanOverflow)) { - AssignEnvironment(res); - } - return DefineAsRegister(res); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsTagged()) { - return DoArithmeticT(op, instr); - } - - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - - if (ShiftCanBeOptimizedAway(instr)) { - return NULL; - } - - LOperand* left = instr->representation().IsSmi() - ? UseRegister(instr->left()) - : UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - - // The only shift that can deoptimize is `left >>> 0`, where left is negative. - // In these cases, the result is a uint32 that is too large for an int32. - bool right_can_be_zero = !instr->right()->IsConstant() || - (JSShiftAmountFromHConstant(instr->right()) == 0); - bool can_deopt = false; - if ((op == Token::SHR) && right_can_be_zero) { - can_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result; - if (instr->representation().IsInteger32()) { - result = DefineAsRegister(new (zone()) LShiftI(op, left, right, can_deopt)); - } else { - DCHECK(instr->representation().IsSmi()); - result = DefineAsRegister(new (zone()) LShiftS(op, left, right, can_deopt)); - } - - return can_deopt ? AssignEnvironment(result) : result; -} - - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new(zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new(zone()) LStackCheck(context))); - } -} - - -LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) { - LOperand* function = UseRegister(instr->function()); - LOperand* code_object = UseRegisterAtStart(instr->code_object()); - LOperand* temp = TempRegister(); - return new(zone()) LStoreCodeEntry(function, code_object, temp); -} - - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* temp = TempRegister(); - LOperand* context; - LOperand* value; - if (instr->NeedsWriteBarrier()) { - // TODO(all): Replace these constraints when RecordWriteStub has been - // rewritten. - context = UseRegisterAndClobber(instr->context()); - value = UseRegisterAndClobber(instr->value()); - } else { - context = UseRegister(instr->context()); - value = UseRegister(instr->value()); - } - LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* temp = NULL; - LOperand* elements = NULL; - LOperand* val = NULL; - - if (!instr->is_fixed_typed_array() && - instr->value()->representation().IsTagged() && - instr->NeedsWriteBarrier()) { - // RecordWrite() will clobber all registers. - elements = UseRegisterAndClobber(instr->elements()); - val = UseRegisterAndClobber(instr->value()); - temp = TempRegister(); - } else { - elements = UseRegister(instr->elements()); - val = UseRegister(instr->value()); - temp = instr->key()->IsConstant() ? NULL : TempRegister(); - } - - if (instr->is_fixed_typed_array()) { - DCHECK((instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); - DCHECK(instr->elements()->representation().IsExternal()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - return new (zone()) - LStoreKeyedExternal(elements, key, val, backing_store_owner, temp); - - } else if (instr->value()->representation().IsDouble()) { - DCHECK(instr->elements()->representation().IsTagged()); - return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp); - - } else { - DCHECK(instr->elements()->representation().IsTagged()); - DCHECK(instr->value()->representation().IsSmiOrTagged() || - instr->value()->representation().IsInteger32()); - return new(zone()) LStoreKeyedFixed(elements, key, val, temp); - } -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - // TODO(jbramley): It might be beneficial to allow value to be a constant in - // some cases. x64 makes use of this with FLAG_track_fields, for example. - - LOperand* object = UseRegister(instr->object()); - LOperand* value; - LOperand* temp0 = NULL; - LOperand* temp1 = NULL; - - if (instr->access().IsExternalMemory() || - (!FLAG_unbox_double_fields && instr->field_representation().IsDouble())) { - value = UseRegister(instr->value()); - } else if (instr->NeedsWriteBarrier()) { - value = UseRegisterAndClobber(instr->value()); - temp0 = TempRegister(); - temp1 = TempRegister(); - } else if (instr->NeedsWriteBarrierForMap()) { - value = UseRegister(instr->value()); - temp0 = TempRegister(); - temp1 = TempRegister(); - } else { - value = UseRegister(instr->value()); - temp0 = TempRegister(); - } - - return new(zone()) LStoreNamedField(object, value, temp0, temp1); -} - - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), x1); - LOperand* right = UseFixed(instr->right(), x0); - - LStringAdd* result = new(zone()) LStringAdd(context, left, right); - return MarkAsCall(DefineFixed(result, x0), instr); -} - - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseRegisterAndClobber(instr->string()); - LOperand* index = UseRegisterAndClobber(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new(zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new(zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), x1); - LOperand* right = UseFixed(instr->right(), x0); - LStringCompareAndBranch* result = - new(zone()) LStringCompareAndBranch(context, left, right); - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - - LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr); - if (shifted_operation != NULL) { - return shifted_operation; - } - - LOperand *left; - if (instr->left()->IsConstant() && - (HConstant::cast(instr->left())->Integer32Value() == 0)) { - left = UseConstant(instr->left()); - } else { - left = UseRegisterAtStart(instr->left()); - } - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - LInstruction* result = instr->representation().IsSmi() ? - DefineAsRegister(new(zone()) LSubS(left, right)) : - DefineAsRegister(new(zone()) LSubI(left, right)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - if (instr->HasNoUses()) { - return NULL; - } else { - return DefineAsRegister(new(zone()) LThisFunction); - } -} - - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, NULL, - TempRegister(), TempRegister()); - return result; - } else { - LOperand* object = UseFixed(instr->object(), x0); - LOperand* context = UseFixed(instr->context(), cp); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, context, NULL, NULL); - return MarkAsCall(result, instr); - } -} - - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LTrapAllocationMemento* result = - new(zone()) LTrapAllocationMemento(object, temp1, temp2); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseRegister(instr->object()); - LOperand* elements = UseRegister(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, x0); - return AssignPointerMap(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* value = UseFixed(instr->value(), x3); - LTypeof* result = new (zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, x0), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - // We only need temp registers in some cases, but we can't dereference the - // instr->type_literal() handle to test that here. - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - - return new(zone()) LTypeofIsAndBranch( - UseRegister(instr->value()), temp1, temp2); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathAbs: { - Representation r = instr->representation(); - if (r.IsTagged()) { - // The tagged case might need to allocate a HeapNumber for the result, - // so it is handled by a separate LInstruction. - LOperand* context = UseFixed(instr->context(), cp); - LOperand* input = UseRegister(instr->value()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LOperand* temp3 = TempRegister(); - LInstruction* result = DefineAsRegister( - new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3)); - return AssignEnvironment(AssignPointerMap(result)); - } else { - LOperand* input = UseRegisterAtStart(instr->value()); - LInstruction* result = DefineAsRegister(new(zone()) LMathAbs(input)); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; - } - } - case kMathCos: { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - LMathCos* result = new (zone()) LMathCos(input); - return MarkAsCall(DefineFixedDouble(result, d0), instr); - } - case kMathSin: { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - LMathSin* result = new (zone()) LMathSin(input); - return MarkAsCall(DefineFixedDouble(result, d0), instr); - } - case kMathExp: { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - LMathExp* result = new (zone()) LMathExp(input); - return MarkAsCall(DefineFixedDouble(result, d0), instr); - } - case kMathFloor: { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - if (instr->representation().IsInteger32()) { - LMathFloorI* result = new(zone()) LMathFloorI(input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - } else { - DCHECK(instr->representation().IsDouble()); - LMathFloorD* result = new(zone()) LMathFloorD(input); - return DefineAsRegister(result); - } - } - case kMathLog: { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - LMathLog* result = new(zone()) LMathLog(input); - return MarkAsCall(DefineFixedDouble(result, d0), instr); - } - case kMathPowHalf: { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegister(instr->value()); - return DefineAsRegister(new(zone()) LMathPowHalf(input)); - } - case kMathRound: { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegister(instr->value()); - if (instr->representation().IsInteger32()) { - LOperand* temp = TempDoubleRegister(); - LMathRoundI* result = new(zone()) LMathRoundI(input, temp); - return AssignEnvironment(DefineAsRegister(result)); - } else { - DCHECK(instr->representation().IsDouble()); - LMathRoundD* result = new(zone()) LMathRoundD(input); - return DefineAsRegister(result); - } - } - case kMathFround: { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineAsRegister(result); - } - case kMathSqrt: { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return DefineAsRegister(new(zone()) LMathSqrt(input)); - } - case kMathClz32: { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->value()->representation().IsInteger32()); - LOperand* input = UseRegisterAtStart(instr->value()); - return DefineAsRegister(new(zone()) LMathClz32(input)); - } - default: - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk_->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kTooManySpillSlotsNeededForOSR); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { - return NULL; -} - - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), cp); - // Assign object to a fixed register different from those already used in - // LForInPrepareMap. - LOperand* object = UseFixed(instr->enumerable(), x0); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map))); -} - - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegister(instr->map()); - LOperand* temp = TempRegister(); - return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp)); -} - - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - LOperand* index = UseRegisterAndClobber(instr->index()); - LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegister(instr->receiver()); - LOperand* function = UseRegister(instr->function()); - LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function); - return AssignEnvironment(DefineAsRegister(result)); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/arm64/lithium-arm64.h b/src/crankshaft/arm64/lithium-arm64.h deleted file mode 100644 index 026f65cb97..0000000000 --- a/src/crankshaft/arm64/lithium-arm64.h +++ /dev/null @@ -1,2849 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_ -#define V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddE) \ - V(AddI) \ - V(AddS) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BitS) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CallWithDescriptor) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckMapValue) \ - V(CheckMaps) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CmpHoleAndBranchD) \ - V(CmpHoleAndBranchT) \ - V(CmpMapAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpT) \ - V(CompareNumericAndBranch) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToIntOrSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsSmiAndBranch) \ - V(IsStringAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyedExternal) \ - V(LoadKeyedFixed) \ - V(LoadKeyedFixedDouble) \ - V(LoadNamedField) \ - V(LoadRoot) \ - V(MathAbs) \ - V(MathAbsTagged) \ - V(MathClz32) \ - V(MathCos) \ - V(MathSin) \ - V(MathExp) \ - V(MathFloorD) \ - V(MathFloorI) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRoundD) \ - V(MathRoundI) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulConstIS) \ - V(MulI) \ - V(MulS) \ - V(NumberTagD) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PreparePushArguments) \ - V(PushArguments) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(ShiftS) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyedExternal) \ - V(StoreKeyedFixed) \ - V(StoreKeyedFixedDouble) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(SubS) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(TruncateDoubleToIntOrSmi) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { \ - return H##type::cast(this->hydrogen_value()); \ - } - - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) { } - - virtual ~LInstruction() { } - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { - // Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) - kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - - // Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall(); - } - bool IsMarkedAsCall() const { return IsCall(); } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - -#ifdef DEBUG - void VerifyCall(); -#endif - - private: - class IsCallBits: public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int32_t bit_field_; -}; - - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return (R != 0) && (result() != NULL); } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - protected: - EmbeddedContainer results_; -}; - - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -template -class LControlInstruction : public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) { } - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - DECLARE_HYDROGEN_ACCESSOR(ControlInstruction); - - Label* false_label_; - Label* true_label_; -}; - - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) - : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const override { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new(zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) { } - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { - inputs_[0] = value; - } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - - private: - HBasicBlock* block_; -}; - - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - LLazyBailout() : gap_instructions_size_(0) { } - - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") - - void set_gap_instructions_size(int gap_instructions_size) { - gap_instructions_size_ = gap_instructions_size; - } - int gap_instructions_size() { return gap_instructions_size_; } - - private: - int gap_instructions_size_; -}; - - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) - : LGap(block), replacement_(NULL) { } - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - LOsrEntry() {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, - LOperand* length, - LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LAddE final : public LTemplateInstruction<1, 2, 0> { - public: - LAddE(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) - : shift_(NO_SHIFT), shift_amount_(0) { - inputs_[0] = left; - inputs_[1] = right; - } - - LAddI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount) - : shift_(shift), shift_amount_(shift_amount) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Shift shift() const { return shift_; } - LOperand* shift_amount() const { return shift_amount_; } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) - - protected: - Shift shift_; - LOperand* shift_amount_; -}; - - -class LAddS final : public LTemplateInstruction<1, 2, 0> { - public: - LAddS(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LAllocate final : public LTemplateInstruction<1, 2, 3> { - public: - LAllocate(LOperand* context, - LOperand* size, - LOperand* temp1, - LOperand* temp2, - LOperand* temp3) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - LOperand* temp3() { return temps_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 2> { - public: - LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { - inputs_[0] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* size() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, - LOperand* receiver, - LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } -}; - - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 1> { - public: - explicit LArgumentsElements(LOperand* temp) { - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { - inputs_[0] = elements; - } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, - LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, - LOperand* context, - LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - Token::Value op() const { return op_; } - - Opcode opcode() const override { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - explicit LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) - : shift_(NO_SHIFT), shift_amount_(0) { - inputs_[0] = left; - inputs_[1] = right; - } - - LBitI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount) - : shift_(shift), shift_amount_(shift_amount) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Shift shift() const { return shift_; } - LOperand* shift_amount() const { return shift_amount_; } - - Token::Value op() const { return hydrogen()->op(); } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) - - protected: - Shift shift_; - LOperand* shift_amount_; -}; - - -class LBitS final : public LTemplateInstruction<1, 2, 0> { - public: - LBitS(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return hydrogen()->op(); } - - DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) -}; - - -class LBranch final : public LControlInstruction<1, 2> { - public: - explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; } - - LOperand* view() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> { - public: - explicit LCheckInstanceType(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - - -class LCheckMaps final : public LTemplateInstruction<0, 1, 1> { - public: - explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampDToUint8(LOperand* unclamped) { - inputs_[0] = unclamped; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* unclamped) { - inputs_[0] = unclamped; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - - -class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* unclamped, LOperand* temp1) { - inputs_[0] = unclamped; - temps_[0] = temp1; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 2> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpHoleAndBranchD final : public LControlInstruction<1, 1> { - public: - explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) { - inputs_[0] = object; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LCmpHoleAndBranchT final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranchT(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LCmpMapAndBranch final : public LControlInstruction<1, 1> { - public: - LCmpMapAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) -}; - - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { - return hydrogen()->representation().IsDouble(); - } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LConstantD final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - double value() const { return hydrogen()->DoubleValue(); } -}; - - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivByConstI final : public LTemplateInstruction<1, 1, 1> { - public: - LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - - -class LDoubleToIntOrSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToIntOrSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool tag_result() { return hydrogen()->representation().IsSmi(); } -}; - - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { - inputs_[0] = map; - } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { - return HForInCacheArray::cast(this->hydrogen_value())->idx(); - } -}; - - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> { - public: - LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 2> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype, - LOperand* scratch1, LOperand* scratch2) { - inputs_[0] = object; - inputs_[1] = prototype; - temps_[0] = scratch1; - temps_[1] = scratch2; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - LOperand* scratch1() const { return temps_[0]; } - LOperand* scratch2() const { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : descriptor_(descriptor), - inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - CallInterfaceDescriptor descriptor() { return descriptor_; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - CallInterfaceDescriptor descriptor_; - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() const { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> { - public: - LLoadFunctionPrototype(LOperand* function, LOperand* temp) { - inputs_[0] = function; - temps_[0] = temp; - } - - LOperand* function() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) -}; - -template -class LLoadKeyed : public LTemplateInstruction<1, 3, T> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - this->inputs_[0] = elements; - this->inputs_[1] = key; - this->inputs_[2] = backing_store_owner; - } - - LOperand* elements() { return this->inputs_[0]; } - LOperand* key() { return this->inputs_[1]; } - LOperand* backing_store_owner() { return this->inputs_[2]; } - ElementsKind elements_kind() const { - return this->hydrogen()->elements_kind(); - } - bool is_external() const { - return this->hydrogen()->is_external(); - } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - bool is_typed_elements() const { - return is_external() || is_fixed_typed_array(); - } - uint32_t base_offset() const { - return this->hydrogen()->base_offset(); - } - void PrintDataTo(StringStream* stream) override { - this->elements()->PrintTo(stream); - stream->Add("["); - this->key()->PrintTo(stream); - if (this->base_offset() != 0) { - stream->Add(" + %d]", this->base_offset()); - } else { - stream->Add("]"); - } - } - - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) -}; - - -class LLoadKeyedExternal: public LLoadKeyed<1> { - public: - LLoadKeyedExternal(LOperand* elements, LOperand* key, - LOperand* backing_store_owner, LOperand* temp) - : LLoadKeyed<1>(elements, key, backing_store_owner) { - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external"); -}; - - -class LLoadKeyedFixed: public LLoadKeyed<1> { - public: - LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) - : LLoadKeyed<1>(elements, key, nullptr) { - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed"); -}; - - -class LLoadKeyedFixedDouble: public LLoadKeyed<1> { - public: - LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) - : LLoadKeyed<1>(elements, key, nullptr) { - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double"); -}; - - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - - -template -class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> { - public: - explicit LUnaryMathOperation(LOperand* value) { - this->inputs_[0] = value; - } - - LOperand* value() { return this->inputs_[0]; } - BuiltinFunctionId op() const { return this->hydrogen()->op(); } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathAbs final : public LUnaryMathOperation<0> { - public: - explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {} - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") -}; - - -class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> { - public: - LMathAbsTagged(LOperand* context, LOperand* value, - LOperand* temp1, LOperand* temp2, LOperand* temp3) { - inputs_[0] = context; - inputs_[1] = value; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - LOperand* temp3() { return temps_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -class LMathCos final : public LUnaryMathOperation<0> { - public: - explicit LMathCos(LOperand* value) : LUnaryMathOperation<0>(value) {} - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - -class LMathSin final : public LUnaryMathOperation<0> { - public: - explicit LMathSin(LOperand* value) : LUnaryMathOperation<0>(value) {} - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - -class LMathExp final : public LUnaryMathOperation<0> { - public: - explicit LMathExp(LOperand* value) : LUnaryMathOperation<0>(value) {} - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - - -// Math.floor with a double result. -class LMathFloorD final : public LUnaryMathOperation<0> { - public: - explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { } - DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d") -}; - - -// Math.floor with an integer result. -class LMathFloorI final : public LUnaryMathOperation<0> { - public: - explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { } - DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i") -}; - - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - - -class LMathLog final : public LUnaryMathOperation<0> { - public: - explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { } - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - - -class LMathClz32 final : public LUnaryMathOperation<0> { - public: - explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { } - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - - -class LMathMinMax final : public LTemplateInstruction<1, 2, 0> { - public: - LMathMinMax(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - - -class LMathPowHalf final : public LUnaryMathOperation<0> { - public: - explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { } - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - - -// Math.round with an integer result. -class LMathRoundD final : public LUnaryMathOperation<0> { - public: - explicit LMathRoundD(LOperand* value) - : LUnaryMathOperation<0>(value) { - } - - DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d") -}; - - -// Math.round with an integer result. -class LMathRoundI final : public LUnaryMathOperation<1> { - public: - LMathRoundI(LOperand* value, LOperand* temp1) - : LUnaryMathOperation<1>(value) { - temps_[0] = temp1; - } - - LOperand* temp1() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i") -}; - - -class LMathFround final : public LUnaryMathOperation<0> { - public: - explicit LMathFround(LOperand* value) : LUnaryMathOperation<0>(value) {} - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - - -class LMathSqrt final : public LUnaryMathOperation<0> { - public: - explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { } - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModByConstI final : public LTemplateInstruction<1, 1, 1> { - public: - LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModI final : public LTemplateInstruction<1, 2, 0> { - public: - LModI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LMulConstIS final : public LTemplateInstruction<1, 2, 0> { - public: - LMulConstIS(LOperand* left, LConstantOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); } - - DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -class LMulI final : public LTemplateInstruction<1, 2, 0> { - public: - LMulI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -class LMulS final : public LTemplateInstruction<1, 2, 0> { - public: - LMulS(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -class LNumberTagD final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LNumberTagU final : public LTemplateInstruction<1, 1, 2> { - public: - explicit LNumberTagU(LOperand* value, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> { - public: - LNumberUntagD(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - - -class LPreparePushArguments final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LPreparePushArguments(int argc) : argc_(argc) {} - - inline int argc() const { return argc_; } - - DECLARE_CONCRETE_INSTRUCTION(PreparePushArguments, "prepare-push-arguments") - - protected: - int argc_; -}; - - -class LPushArguments final : public LTemplateResultInstruction<0> { - public: - explicit LPushArguments(Zone* zone, - int capacity = kRecommendedMaxPushedArgs) - : zone_(zone), inputs_(capacity, zone) {} - - LOperand* argument(int i) { return inputs_[i]; } - int ArgumentCount() const { return inputs_.length(); } - - void AddArgument(LOperand* arg) { inputs_.Add(arg, zone_); } - - DECLARE_CONCRETE_INSTRUCTION(PushArguments, "push-arguments") - - // It is better to limit the number of arguments pushed simultaneously to - // avoid pressure on the register allocator. - static const int kRecommendedMaxPushedArgs = 4; - bool ShouldSplitPush() const { - return inputs_.length() >= kRecommendedMaxPushedArgs; - } - - protected: - Zone* zone_; - ZoneList inputs_; - - private: - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - LOperand* value() { return inputs_[0]; } - LOperand* parameter_count() { return inputs_[2]; } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") -}; - - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 1> { - public: - LSeqStringGetChar(LOperand* string, - LOperand* index, - LOperand* temp) { - inputs_[0] = string; - inputs_[1] = index; - temps_[0] = temp; - } - - LOperand* string() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 1> { - public: - LSeqStringSetChar(LOperand* context, - LOperand* string, - LOperand* index, - LOperand* value, - LOperand* temp) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - temps_[0] = temp; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) - : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - bool needs_check() const { return needs_check_; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - private: - bool needs_check_; -}; - - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -template -class LStoreKeyed : public LTemplateInstruction<0, 4, T> { - public: - LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value, - LOperand* backing_store_owner) { - this->inputs_[0] = elements; - this->inputs_[1] = key; - this->inputs_[2] = value; - this->inputs_[3] = backing_store_owner; - } - - bool is_external() const { return this->hydrogen()->is_external(); } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - bool is_typed_elements() const { - return is_external() || is_fixed_typed_array(); - } - LOperand* elements() { return this->inputs_[0]; } - LOperand* key() { return this->inputs_[1]; } - LOperand* value() { return this->inputs_[2]; } - LOperand* backing_store_owner() { return this->inputs_[3]; } - ElementsKind elements_kind() const { - return this->hydrogen()->elements_kind(); - } - - bool NeedsCanonicalization() { - if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() || - hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) { - return false; - } - return this->hydrogen()->NeedsCanonicalization(); - } - uint32_t base_offset() const { return this->hydrogen()->base_offset(); } - - void PrintDataTo(StringStream* stream) override { - this->elements()->PrintTo(stream); - stream->Add("["); - this->key()->PrintTo(stream); - if (this->base_offset() != 0) { - stream->Add(" + %d] <-", this->base_offset()); - } else { - stream->Add("] <- "); - } - - if (this->value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - this->value()->PrintTo(stream); - } - } - - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) -}; - - -class LStoreKeyedExternal final : public LStoreKeyed<1> { - public: - LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value, - LOperand* backing_store_owner, LOperand* temp) - : LStoreKeyed<1>(elements, key, value, backing_store_owner) { - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external") -}; - - -class LStoreKeyedFixed final : public LStoreKeyed<1> { - public: - LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value, - LOperand* temp) - : LStoreKeyed<1>(elements, key, value, nullptr) { - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed") -}; - - -class LStoreKeyedFixedDouble final : public LStoreKeyed<1> { - public: - LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value, - LOperand* temp) - : LStoreKeyed<1>(elements, key, value, nullptr) { - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble, - "store-keyed-fixed-double") -}; - - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> { - public: - LStoreNamedField(LOperand* object, LOperand* value, - LOperand* temp0, LOperand* temp1) { - inputs_[0] = object; - inputs_[1] = value; - temps_[0] = temp0; - temps_[1] = temp1; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp0() { return temps_[0]; } - LOperand* temp1() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; - - Representation representation() const { - return hydrogen()->field_representation(); - } -}; - - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 2> { - public: - explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LShiftS final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftS(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s") - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object, - LOperand* temp) { - inputs_[0] = function; - inputs_[1] = code_object; - temps_[0] = temp; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) { - inputs_[0] = context; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) - : shift_(NO_SHIFT), shift_amount_(0) { - inputs_[0] = left; - inputs_[1] = right; - } - - LSubI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount) - : shift_(shift), shift_amount_(shift_amount) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Shift shift() const { return shift_; } - LOperand* shift_amount() const { return shift_amount_; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) - - protected: - Shift shift_; - LOperand* shift_amount_; -}; - - -class LSubS: public LTemplateInstruction<1, 2, 0> { - public: - LSubS(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> { - public: - LTransitionElementsKind(LOperand* object, - LOperand* context, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* object() { return inputs_[0]; } - LOperand* context() { return inputs_[1]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() const { return hydrogen()->from_kind(); } - ElementsKind to_kind() const { return hydrogen()->to_kind(); } -}; - - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> { - public: - LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) { - inputs_[0] = object; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento") -}; - - -class LTruncateDoubleToIntOrSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LTruncateDoubleToIntOrSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi, - "truncate-double-to-int-or-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool tag_result() { return hydrogen()->representation().IsSmi(); } -}; - - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIsAndBranch final : public LControlInstruction<1, 2> { - public: - LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() const { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LUint32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 1> { - public: - LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) { - inputs_[0] = value; - inputs_[1] = map; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> { - public: - LWrapReceiver(LOperand* receiver, LOperand* function) { - inputs_[0] = receiver; - inputs_[1] = function; - } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } -}; - - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) - : LChunk(info, graph) { } - - int GetNextSpillIndex(); - LOperand* GetNextSpillSlot(RegisterKind kind); -}; - - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HBinaryOperation* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - static bool HasMagicNumberForDivision(int32_t divisor); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(DoubleRegister reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, - DoubleRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // The operand created by UseRegister is guaranteed to be live until the end - // of the instruction. This means that register allocator will not reuse its - // register for any other operand inside instruction. - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - - // The operand created by UseRegisterAndClobber is guaranteed to be live until - // the end of the end of the instruction, and it may also be used as a scratch - // register by the instruction implementation. - // - // This behaves identically to ARM's UseTempRegister. However, it is renamed - // to discourage its use in ARM64, since in most cases it is better to - // allocate a temporary register for the Lithium instruction. - MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value); - - // The operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. The register allocator is free to assign the same - // register to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // A constant operand. - MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - virtual MUST_USE_RESULT LOperand* UseAny(HValue* value); - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - - // Temporary operand that must be in a double register. - MUST_USE_RESULT LUnallocated* TempDoubleRegister(); - - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - - // Temporary operand that must be in a fixed double register. - MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg); - LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, - DoubleRegister reg); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - // By default we assume that instruction sequences generated for calls - // cannot deoptimize eagerly and we do not attach environment to this - // instruction. - LInstruction* MarkAsCall( - LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - LInstruction* AssignPointerMap(LInstruction* instr); - LInstruction* AssignEnvironment(LInstruction* instr); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - void DoBasicBlock(HBasicBlock* block); - - int JSShiftAmountFromHConstant(HValue* constant) { - return HConstant::cast(constant)->Integer32Value() & 0x1f; - } - bool LikelyFitsImmField(HInstruction* instr, int imm) { - if (instr->IsAdd() || instr->IsSub()) { - return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm); - } else { - DCHECK(instr->IsBitwise()); - unsigned unused_n, unused_imm_s, unused_imm_r; - return Assembler::IsImmLogical(imm, kWRegSizeInBits, - &unused_n, &unused_imm_s, &unused_imm_r); - } - } - - // Indicates if a sequence of the form - // lsl x8, x9, #imm - // add x0, x1, x8 - // can be replaced with: - // add x0, x1, x9 LSL #imm - // If this is not possible, the function returns NULL. Otherwise it returns a - // pointer to the shift instruction that would be optimized away. - HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val, - HValue** left = NULL); - // Checks if all uses of the shift operation can optimize it away. - bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift); - // Attempts to merge the binary operation and an eventual previous shift - // operation into a single operation. Returns the merged instruction on - // success, and NULL otherwise. - LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op); - LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr, - HValue* left, - HBitwiseBinaryOperation* shift); - - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, - HBinaryOperation* instr); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_ diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc deleted file mode 100644 index 0dc22f430e..0000000000 --- a/src/crankshaft/arm64/lithium-codegen-arm64.cc +++ /dev/null @@ -1,5536 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/arm64/lithium-codegen-arm64.h" - -#include "src/arm64/frames-arm64.h" -#include "src/arm64/macro-assembler-arm64-inl.h" -#include "src/base/bits.h" -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, - LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), - pointers_(pointers), - deopt_mode_(mode) { } - virtual ~SafepointGenerator() { } - - virtual void BeforeCall(int call_size) const { } - - virtual void AfterCall() const { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - -LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope( - LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->info()->is_calling()); - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - - UseScratchRegisterScope temps(codegen_->masm_); - // Preserve the value of lr which must be saved on the stack (the call to - // the stub will clobber it). - Register to_be_pushed_lr = - temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr()); - codegen_->masm_->Mov(to_be_pushed_lr, lr); - StoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->CallStub(&stub); -} - -LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - RestoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->CallStub(&stub); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; -} - -#define __ masm()-> - -// Emit code to branch if the given condition holds. -// The code generated here doesn't modify the flags and they must have -// been set by some prior instructions. -// -// The EmitInverted function simply inverts the condition. -class BranchOnCondition : public BranchGenerator { - public: - BranchOnCondition(LCodeGen* codegen, Condition cond) - : BranchGenerator(codegen), - cond_(cond) { } - - virtual void Emit(Label* label) const { - __ B(cond_, label); - } - - virtual void EmitInverted(Label* label) const { - if (cond_ != al) { - __ B(NegateCondition(cond_), label); - } - } - - private: - Condition cond_; -}; - - -// Emit code to compare lhs and rhs and branch if the condition holds. -// This uses MacroAssembler's CompareAndBranch function so it will handle -// converting the comparison to Cbz/Cbnz if the right-hand side is 0. -// -// EmitInverted still compares the two operands but inverts the condition. -class CompareAndBranch : public BranchGenerator { - public: - CompareAndBranch(LCodeGen* codegen, - Condition cond, - const Register& lhs, - const Operand& rhs) - : BranchGenerator(codegen), - cond_(cond), - lhs_(lhs), - rhs_(rhs) { } - - virtual void Emit(Label* label) const { - __ CompareAndBranch(lhs_, rhs_, cond_, label); - } - - virtual void EmitInverted(Label* label) const { - __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label); - } - - private: - Condition cond_; - const Register& lhs_; - const Operand& rhs_; -}; - - -// Test the input with the given mask and branch if the condition holds. -// If the condition is 'eq' or 'ne' this will use MacroAssembler's -// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the -// conversion to Tbz/Tbnz when possible. -class TestAndBranch : public BranchGenerator { - public: - TestAndBranch(LCodeGen* codegen, - Condition cond, - const Register& value, - uint64_t mask) - : BranchGenerator(codegen), - cond_(cond), - value_(value), - mask_(mask) { } - - virtual void Emit(Label* label) const { - switch (cond_) { - case eq: - __ TestAndBranchIfAllClear(value_, mask_, label); - break; - case ne: - __ TestAndBranchIfAnySet(value_, mask_, label); - break; - default: - __ Tst(value_, mask_); - __ B(cond_, label); - } - } - - virtual void EmitInverted(Label* label) const { - // The inverse of "all clear" is "any set" and vice versa. - switch (cond_) { - case eq: - __ TestAndBranchIfAnySet(value_, mask_, label); - break; - case ne: - __ TestAndBranchIfAllClear(value_, mask_, label); - break; - default: - __ Tst(value_, mask_); - __ B(NegateCondition(cond_), label); - } - } - - private: - Condition cond_; - const Register& value_; - uint64_t mask_; -}; - - -// Test the input and branch if it is non-zero and not a NaN. -class BranchIfNonZeroNumber : public BranchGenerator { - public: - BranchIfNonZeroNumber(LCodeGen* codegen, const VRegister& value, - const VRegister& scratch) - : BranchGenerator(codegen), value_(value), scratch_(scratch) {} - - virtual void Emit(Label* label) const { - __ Fabs(scratch_, value_); - // Compare with 0.0. Because scratch_ is positive, the result can be one of - // nZCv (equal), nzCv (greater) or nzCV (unordered). - __ Fcmp(scratch_, 0.0); - __ B(gt, label); - } - - virtual void EmitInverted(Label* label) const { - __ Fabs(scratch_, value_); - __ Fcmp(scratch_, 0.0); - __ B(le, label); - } - - private: - const VRegister& value_; - const VRegister& scratch_; -}; - - -// Test the input and branch if it is a heap number. -class BranchIfHeapNumber : public BranchGenerator { - public: - BranchIfHeapNumber(LCodeGen* codegen, const Register& value) - : BranchGenerator(codegen), value_(value) { } - - virtual void Emit(Label* label) const { - __ JumpIfHeapNumber(value_, label); - } - - virtual void EmitInverted(Label* label) const { - __ JumpIfNotHeapNumber(value_, label); - } - - private: - const Register& value_; -}; - - -// Test the input and branch if it is the specified root value. -class BranchIfRoot : public BranchGenerator { - public: - BranchIfRoot(LCodeGen* codegen, const Register& value, - Heap::RootListIndex index) - : BranchGenerator(codegen), value_(value), index_(index) { } - - virtual void Emit(Label* label) const { - __ JumpIfRoot(value_, index_, label); - } - - virtual void EmitInverted(Label* label) const { - __ JumpIfNotRoot(value_, index_, label); - } - - private: - const Register& value_; - const Heap::RootListIndex index_; -}; - - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation( - environment, translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); - } -} - - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, - dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - DoubleRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, - translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, zone()); - } -} - - -void LCodeGen::CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode) { - DCHECK(instr != NULL); - - Assembler::BlockPoolsScope scope(masm_); - __ Call(code, mode); - RecordSafepointWithLazyDeopt(instr, safepoint_mode); - - if ((code->kind() == Code::COMPARE_IC)) { - // Signal that we don't inline smi code before these stubs in the - // optimizing code generator. - InlineSmiCheckInfo::EmitNotInlined(masm()); - } -} - - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(instr->IsMarkedAsCall()); - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->constructor()).is(x1)); - - __ Mov(x0, Operand(instr->arity())); - __ Mov(x2, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - - // We might need to create a holey array; look at the first argument. - __ Peek(x10, 0); - __ Cbz(x10, &packed_case); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), - holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ B(&done); - __ Bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ Bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } - RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta()); - - DCHECK(ToRegister(instr->result()).is(x0)); -} - - -void LCodeGen::CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - - __ CallRuntime(function, num_arguments, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - __ Mov(cp, ToRegister(context)); - } else if (context->IsStackSlot()) { - __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ LoadHeapObject(cp, - Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context) { - if (context != nullptr) LoadContextFromDeferred(context); - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters( - instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); -} - - -void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kLazyDeopt); - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode deopt_mode) { - DCHECK(expected_safepoint_kind_ == kind); - - const ZoneList* operands = pointers->GetNormalizedOperands(); - Safepoint safepoint = safepoints_.DefineSafepoint( - masm(), kind, arguments, deopt_mode); - - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); -} - - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, deopt_mode); -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); -} - - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // NONE indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::NONE); - - return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && - GenerateJumpTable() && GenerateSafepointTable(); -} - - -void LCodeGen::SaveCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Save clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator iterator(doubles); - int count = 0; - while (!iterator.Done()) { - // TODO(all): Is this supposed to save just the callee-saved doubles? It - // looks like it's saving all of them. - VRegister value = VRegister::from_code(iterator.Current()); - __ Poke(value, count * kDoubleSize); - iterator.Advance(); - count++; - } -} - - -void LCodeGen::RestoreCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Restore clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator iterator(doubles); - int count = 0; - while (!iterator.Done()) { - // TODO(all): Is this supposed to restore just the callee-saved doubles? It - // looks like it's restoring all of them. - VRegister value = VRegister::from_code(iterator.Current()); - __ Peek(value, count * kDoubleSize); - iterator.Advance(); - count++; - } -} - - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - } - - DCHECK(__ StackPointer().Is(jssp)); - info()->set_prologue_offset(masm_->pc_offset()); - if (NeedsEagerFrame()) { - if (info()->IsStub()) { - __ StubPrologue( - StackFrame::STUB, - GetStackSlotCount() + TypedFrameConstants::kFixedSlotCount); - } else { - __ Prologue(info()->GeneratePreagedPrologue()); - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - if (slots > 0) { - __ Claim(slots, kPointerSize); - } - } - frame_is_built_ = true; - } - - if (info()->saves_caller_doubles()) { - SaveCallerDoubles(); - } - return !is_aborted(); -} - - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Allocate a local context if needed. - if (info()->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is in x1. - int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ Mov(x10, Operand(info()->scope()->scope_info())); - __ Push(x1, x10); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of the FastNewFunctionContext builtin is always in new space. - need_write_barrier = false; - } else { - __ Push(x1); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - // Context is returned in x0. It replaces the context passed to us. It's - // saved in the stack and kept live in cp. - __ Mov(cp, x0); - __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset)); - // Copy any necessary parameters into the context. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - Register value = x0; - Register scratch = x3; - - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ Ldr(value, MemOperand(fp, parameter_offset)); - // Store it in the context. - MemOperand target = ContextMemOperand(cp, var->index()); - __ Str(value, target); - // Update the write barrier. This clobbers value and scratch. - if (need_write_barrier) { - __ RecordWriteContextSlot(cp, static_cast(target.offset()), - value, scratch, GetLinkRegisterState(), - kSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(cp, &done); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } -} - - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) { - LDeferredCode* code = deferred_[i]; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment(";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), - code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - - __ Bind(code->entry()); - - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - __ Push(lr, fp); - __ Mov(fp, StackFrame::TypeToMarker(StackFrame::STUB)); - __ Push(fp); - __ Add(fp, __ StackPointer(), - TypedFrameConstants::kFixedFrameSizeFromFp); - Comment(";;; Deferred code"); - } - - code->Generate(); - - if (NeedsDeferredFrame()) { - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - __ Pop(xzr, fp, lr); - frame_is_built_ = false; - } - - __ B(code->exit()); - } - } - - // Force constant pool emission at the end of the deferred code to make - // sure that no constant pools are emitted after deferred code because - // deferred code generation is the last step which generates code. The two - // following steps will only output data used by crakshaft. - masm()->CheckConstPool(true, false); - - return !is_aborted(); -} - - -bool LCodeGen::GenerateJumpTable() { - Label needs_frame, call_deopt_entry; - - if (jump_table_.length() > 0) { - Comment(";;; -------------------- Jump table --------------------"); - Address base = jump_table_[0]->address; - - UseScratchRegisterScope temps(masm()); - Register entry_offset = temps.AcquireX(); - - int length = jump_table_.length(); - for (int i = 0; i < length; i++) { - Deoptimizer::JumpTableEntry* table_entry = jump_table_[i]; - __ Bind(&table_entry->label); - - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - - // Second-level deopt table entries are contiguous and small, so instead - // of loading the full, absolute address of each one, load the base - // address and add an immediate offset. - __ Mov(entry_offset, entry - base); - - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - Comment(";;; call deopt with frame"); - // Save lr before Bl, fp will be adjusted in the needs_frame code. - __ Push(lr, fp); - // Reuse the existing needs_frame code. - __ Bl(&needs_frame); - } else { - // There is nothing special to do, so just continue to the second-level - // table. - __ Bl(&call_deopt_entry); - } - - masm()->CheckConstPool(false, false); - } - - if (needs_frame.is_linked()) { - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - DCHECK(info()->IsStub()); - - Comment(";;; needs_frame common code"); - UseScratchRegisterScope temps(masm()); - Register stub_marker = temps.AcquireX(); - __ Bind(&needs_frame); - __ Mov(stub_marker, StackFrame::TypeToMarker(StackFrame::STUB)); - __ Push(cp, stub_marker); - __ Add(fp, __ StackPointer(), 2 * kPointerSize); - } - - // Generate common code for calling the second-level deopt table. - __ Bind(&call_deopt_entry); - - if (info()->saves_caller_doubles()) { - DCHECK(info()->IsStub()); - RestoreCallerDoubles(); - } - - Register deopt_entry = temps.AcquireX(); - __ Mov(deopt_entry, Operand(reinterpret_cast(base), - RelocInfo::RUNTIME_ENTRY)); - __ Add(deopt_entry, deopt_entry, entry_offset); - __ Br(deopt_entry); - } - - // Force constant pool emission at the end of the deopt jump table to make - // sure that no constant pools are emitted after. - masm()->CheckConstPool(true, false); - - // The deoptimization jump table is the last part of the instruction - // sequence. Mark the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - // We do not know how much data will be emitted for the safepoint table, so - // force emission of the veneer pool. - masm()->CheckVeneerPool(true, true); - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); -} - -void LCodeGen::DeoptimizeBranch( - LInstruction* instr, DeoptimizeReason deopt_reason, BranchType branch_type, - Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - Deoptimizer::BailoutType bailout_type = - info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - - if (override_bailout_type != NULL) { - bailout_type = *override_bailout_type; - } - - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - } - - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { - Label not_zero; - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - - __ Push(x0, x1, x2); - __ Mrs(x2, NZCV); - __ Mov(x0, count); - __ Ldr(w1, MemOperand(x0)); - __ Subs(x1, x1, 1); - __ B(gt, ¬_zero); - __ Mov(w1, FLAG_deopt_every_n_times); - __ Str(w1, MemOperand(x0)); - __ Pop(x2, x1, x0); - DCHECK(frame_is_built_); - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - __ Unreachable(); - - __ Bind(¬_zero); - __ Str(w1, MemOperand(x0)); - __ Msr(NZCV, x2); - __ Pop(x2, x1, x0); - } - - if (info()->ShouldTrapOnDeopt()) { - Label dont_trap; - __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); - __ Debug("trap_on_deopt", __LINE__, BREAK); - __ Bind(&dont_trap); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - // Go through jump table if we need to build frame, or restore caller doubles. - if (branch_type == always && - frame_is_built_ && !info()->saves_caller_doubles()) { - DeoptComment(deopt_info); - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - } else { - Deoptimizer::JumpTableEntry* table_entry = - new (zone()) Deoptimizer::JumpTableEntry( - entry, deopt_info, bailout_type, !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry->IsEquivalentTo(*jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - __ B(&jump_table_.last()->label, branch_type, reg, bit); - } -} - -void LCodeGen::Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType* override_bailout_type) { - DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1, - override_bailout_type); -} - -void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, - DeoptimizeReason deopt_reason) { - DeoptimizeBranch(instr, deopt_reason, static_cast(cond)); -} - -void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason) { - DeoptimizeBranch(instr, deopt_reason, reg_zero, rt); -} - -void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason) { - DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt); -} - -void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason) { - int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; - DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason); -} - -void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason) { - DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); -} - -void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason) { - DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); -} - -void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, - LInstruction* instr, - DeoptimizeReason deopt_reason) { - __ CompareRoot(rt, index); - DeoptimizeIf(eq, instr, deopt_reason); -} - -void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, - LInstruction* instr, - DeoptimizeReason deopt_reason) { - __ CompareRoot(rt, index); - DeoptimizeIf(ne, instr, deopt_reason); -} - -void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, - DeoptimizeReason deopt_reason) { - __ TestForMinusZero(input); - DeoptimizeIf(vs, instr, deopt_reason); -} - - -void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) { - __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); -} - -void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, - DeoptimizeReason deopt_reason) { - DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit); -} - -void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, - DeoptimizeReason deopt_reason) { - DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit); -} - - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - intptr_t current_pc = masm()->pc_offset(); - - if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { - ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - DCHECK((padding_size % kInstructionSize) == 0); - InstructionAccurateScope instruction_accurate( - masm(), padding_size / kInstructionSize); - - while (padding_size > 0) { - __ nop(); - padding_size -= kInstructionSize; - } - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - // TODO(all): support zero register results, as ToRegister32. - DCHECK((op != NULL) && op->IsRegister()); - return Register::from_code(op->index()); -} - - -Register LCodeGen::ToRegister32(LOperand* op) const { - DCHECK(op != NULL); - if (op->IsConstantOperand()) { - // If this is a constant operand, the result must be the zero register. - DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0); - return wzr; - } else { - return ToRegister(op).W(); - } -} - - -Smi* LCodeGen::ToSmi(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - return Smi::FromInt(constant->Integer32Value()); -} - - -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - DCHECK((op != NULL) && op->IsDoubleRegister()); - return DoubleRegister::from_code(op->index()); -} - - -Operand LCodeGen::ToOperand(LOperand* op) { - DCHECK(op != NULL); - if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk()->LookupConstant(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsSmi()) { - DCHECK(constant->HasSmiValue()); - return Operand(Smi::FromInt(constant->Integer32Value())); - } else if (r.IsInteger32()) { - DCHECK(constant->HasInteger32Value()); - return Operand(constant->Integer32Value()); - } else if (r.IsDouble()) { - Abort(kToOperandUnsupportedDoubleImmediate); - } - DCHECK(r.IsTagged()); - return Operand(constant->handle(isolate())); - } else if (op->IsRegister()) { - return Operand(ToRegister(op)); - } else if (op->IsDoubleRegister()) { - Abort(kToOperandIsDoubleRegisterUnimplemented); - return Operand(0); - } - // Stack slots not implemented, use ToMemOperand instead. - UNREACHABLE(); -} - - -Operand LCodeGen::ToOperand32(LOperand* op) { - DCHECK(op != NULL); - if (op->IsRegister()) { - return Operand(ToRegister32(op)); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk()->LookupConstant(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - return Operand(constant->Integer32Value()); - } else { - // Other constants not implemented. - Abort(kToOperand32UnsupportedImmediate); - } - } - // Other cases are not implemented. - UNREACHABLE(); -} - - -static int64_t ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize; -} - - -MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const { - DCHECK(op != NULL); - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - int fp_offset = FrameSlotToFPOffset(op->index()); - // Loads and stores have a bigger reach in positive offset than negative. - // We try to access using jssp (positive offset) first, then fall back to - // fp (negative offset) if that fails. - // - // We can reference a stack slot from jssp only if we know how much we've - // put on the stack. We don't know this in the following cases: - // - stack_mode != kCanUseStackPointer: this is the case when deferred - // code has saved the registers. - // - saves_caller_doubles(): some double registers have been pushed, jssp - // references the end of the double registers and not the end of the stack - // slots. - // In both of the cases above, we _could_ add the tracking information - // required so that we can use jssp here, but in practice it isn't worth it. - if ((stack_mode == kCanUseStackPointer) && - !info()->saves_caller_doubles()) { - int jssp_offset_to_fp = - (pushed_arguments_ + GetTotalFrameSlotCount()) * kPointerSize - - StandardFrameConstants::kFixedFrameSizeAboveFp; - int jssp_offset = fp_offset + jssp_offset_to_fp; - if (masm()->IsImmLSScaled(jssp_offset, kPointerSizeLog2)) { - return MemOperand(masm()->StackPointer(), jssp_offset); - } - } - return MemOperand(fp, fp_offset); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand(masm()->StackPointer(), - ArgumentsOffsetWithoutFrame(op->index())); - } -} - - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - - -template -Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) { - if (shift_info->shift() == NO_SHIFT) { - return ToOperand32(right); - } else { - return Operand( - ToRegister32(right), - shift_info->shift(), - JSShiftAmountFromLConstant(shift_info->shift_amount())); - } -} - - -bool LCodeGen::IsSmi(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - - -bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - return constant->Integer32Value(); -} - - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - - -Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { - Condition cond = nv; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = eq; - break; - case Token::NE: - case Token::NE_STRICT: - cond = ne; - break; - case Token::LT: - cond = is_unsigned ? lo : lt; - break; - case Token::GT: - cond = is_unsigned ? hi : gt; - break; - case Token::LTE: - cond = is_unsigned ? ls : le; - break; - case Token::GTE: - cond = is_unsigned ? hs : ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -template -void LCodeGen::EmitBranchGeneric(InstrType instr, - const BranchGenerator& branch) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - - if (right_block == left_block) { - EmitGoto(left_block); - } else if (left_block == next_block) { - branch.EmitInverted(chunk_->GetAssemblyLabel(right_block)); - } else { - branch.Emit(chunk_->GetAssemblyLabel(left_block)); - if (right_block != next_block) { - __ B(chunk_->GetAssemblyLabel(right_block)); - } - } -} - - -template -void LCodeGen::EmitBranch(InstrType instr, Condition condition) { - DCHECK((condition != al) && (condition != nv)); - BranchOnCondition branch(this, condition); - EmitBranchGeneric(instr, branch); -} - - -template -void LCodeGen::EmitCompareAndBranch(InstrType instr, - Condition condition, - const Register& lhs, - const Operand& rhs) { - DCHECK((condition != al) && (condition != nv)); - CompareAndBranch branch(this, condition, lhs, rhs); - EmitBranchGeneric(instr, branch); -} - - -template -void LCodeGen::EmitTestAndBranch(InstrType instr, - Condition condition, - const Register& value, - uint64_t mask) { - DCHECK((condition != al) && (condition != nv)); - TestAndBranch branch(this, condition, value, mask); - EmitBranchGeneric(instr, branch); -} - -template -void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr, - const VRegister& value, - const VRegister& scratch) { - BranchIfNonZeroNumber branch(this, value, scratch); - EmitBranchGeneric(instr, branch); -} - - -template -void LCodeGen::EmitBranchIfHeapNumber(InstrType instr, - const Register& value) { - BranchIfHeapNumber branch(this, value); - EmitBranchGeneric(instr, branch); -} - - -template -void LCodeGen::EmitBranchIfRoot(InstrType instr, - const Register& value, - Heap::RootListIndex index) { - BranchIfRoot branch(this, value, index); - EmitBranchGeneric(instr, branch); -} - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; - i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) { - resolver_.Resolve(move); - } - } -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - - // The pointer to the arguments array come from DoArgumentsElements. - // It does not point directly to the arguments and there is an offest of - // two words that we must take into account when accessing an argument. - // Subtracting the index from length accounts for one, so we add one more. - - if (instr->length()->IsConstantOperand() && - instr->index()->IsConstantOperand()) { - int index = ToInteger32(LConstantOperand::cast(instr->index())); - int length = ToInteger32(LConstantOperand::cast(instr->length())); - int offset = ((length - index) + 1) * kPointerSize; - __ Ldr(result, MemOperand(arguments, offset)); - } else if (instr->index()->IsConstantOperand()) { - Register length = ToRegister32(instr->length()); - int index = ToInteger32(LConstantOperand::cast(instr->index())); - int loc = index - 1; - if (loc != 0) { - __ Sub(result.W(), length, loc); - __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2)); - } else { - __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2)); - } - } else { - Register length = ToRegister32(instr->length()); - Operand index = ToOperand32(instr->index()); - __ Sub(result.W(), length, index); - __ Add(result.W(), result.W(), 1); - __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2)); - } -} - - -void LCodeGen::DoAddE(LAddE* instr) { - Register result = ToRegister(instr->result()); - Register left = ToRegister(instr->left()); - Operand right = Operand(x0); // Dummy initialization. - if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) { - right = Operand(ToRegister(instr->right())); - } else if (instr->right()->IsConstantOperand()) { - right = ToInteger32(LConstantOperand::cast(instr->right())); - } else { - right = Operand(ToRegister32(instr->right()), SXTW); - } - - DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); - __ Add(result, left, right); -} - - -void LCodeGen::DoAddI(LAddI* instr) { - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - Register result = ToRegister32(instr->result()); - Register left = ToRegister32(instr->left()); - Operand right = ToShiftedRightOperand32(instr->right(), instr); - - if (can_overflow) { - __ Adds(result, left, right); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ Add(result, left, right); - } -} - - -void LCodeGen::DoAddS(LAddS* instr) { - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - Register result = ToRegister(instr->result()); - Register left = ToRegister(instr->left()); - Operand right = ToOperand(instr->right()); - if (can_overflow) { - __ Adds(result, left, right); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ Add(result, left, right); - } -} - - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate: public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, LAllocate* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); - - Register result = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, temp1, temp2, deferred->entry(), flags); - } else { - Register size = ToRegister32(instr->size()); - __ Sxtw(size.X(), size); - __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags); - } - - __ Bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - Register start = temp1; - Register end = temp2; - Register filler = ToRegister(instr->temp3()); - - __ Sub(start, result, kHeapObjectTag); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ Add(end, start, size); - } else { - __ Add(end, start, ToRegister(instr->size())); - } - __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex); - __ InitializeFieldsWithFiller(start, end, filler); - } else { - DCHECK(instr->temp3() == NULL); - } -} - - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Mov(ToRegister(instr->result()), Smi::kZero); - - PushSafepointRegistersScope scope(this); - LoadContextFromDeferred(instr->context()); - // We're in a SafepointRegistersScope so we can use any scratch registers. - Register size = x0; - if (instr->size()->IsConstantOperand()) { - __ Mov(size, ToSmi(LConstantOperand::cast(instr->size()))); - } else { - __ SmiTag(size, ToRegister32(instr->size()).X()); - } - int flags = AllocateDoubleAlignFlag::encode( - instr->hydrogen()->MustAllocateDoubleAligned()); - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ Mov(x10, Smi::FromInt(flags)); - __ Push(size, x10); - - CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, nullptr); - __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result())); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - Register top_address = x10; - __ Sub(x0, x0, Operand(kHeapObjectTag)); - __ Mov(top_address, Operand(allocation_top)); - __ Str(x0, MemOperand(top_address)); - __ Add(x0, x0, Operand(kHeapObjectTag)); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register scratch1 = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister32(instr->length()); - - Register elements = ToRegister(instr->elements()); - Register scratch = x5; - DCHECK(receiver.Is(x0)); // Used for parameter count. - DCHECK(function.Is(x1)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).Is(x0)); - DCHECK(instr->IsMarkedAsCall()); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - __ Cmp(length, kArgumentsLimit); - DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments); - - // Push the receiver and use the register to keep the original - // number of arguments. - __ Push(receiver); - Register argc = receiver; - receiver = NoReg; - __ Sxtw(argc, length); - // The arguments are at a one pointer size offset from elements. - __ Add(elements, elements, 1 * kPointerSize); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ Cbz(length, &invoke); - __ Bind(&loop); - __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2)); - __ Push(scratch); - __ Subs(length, length, 1); - __ B(ne, &loop); - - __ Bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(x0); - // It is safe to use x3, x4 and x5 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) x3 (new.target) will be initialized below. - PrepareForTailCall(actual, x3, x4, x5); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - // The number of arguments is stored in argc (receiver) which is x0, as - // expected by InvokeFunction. - ParameterCount actual(argc); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - // When we are inside an inlined function, the arguments are the last things - // that have been pushed on the stack. Therefore the arguments array can be - // accessed directly from jssp. - // However in the normal case, it is accessed via fp but there are two words - // on the stack between fp and the arguments (the saved lr and fp) and the - // LAccessArgumentsAt implementation take that into account. - // In the inlined case we need to subtract the size of 2 words to jssp to - // get a pointer which will work well with LAccessArgumentsAt. - DCHECK(masm()->StackPointer().Is(jssp)); - __ Sub(result, jssp, 2 * kPointerSize); - } else if (instr->hydrogen()->arguments_adaptor()) { - DCHECK(instr->temp() != NULL); - Register previous_fp = ToRegister(instr->temp()); - - __ Ldr(previous_fp, - MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(result, MemOperand(previous_fp, - CommonFrameConstants::kContextOrFrameTypeOffset)); - __ Cmp(result, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); - __ Csel(result, fp, previous_fp, ne); - } else { - __ Mov(result, fp); - } -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register elements = ToRegister(instr->elements()); - Register result = ToRegister32(instr->result()); - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - __ Cmp(fp, elements); - __ Mov(result, scope()->num_parameters()); - __ B(eq, &done); - - // Arguments adaptor frame present. Get argument length from there. - __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(result, - UntagSmiMemOperand(result.X(), - ArgumentsAdaptorFrameConstants::kLengthOffset)); - - // Argument length is in result register. - __ Bind(&done); -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - DoubleRegister result = ToDoubleRegister(instr->result()); - - switch (instr->op()) { - case Token::ADD: __ Fadd(result, left, right); break; - case Token::SUB: __ Fsub(result, left, right); break; - case Token::MUL: __ Fmul(result, left, right); break; - case Token::DIV: __ Fdiv(result, left, right); break; - case Token::MOD: { - // The ECMA-262 remainder operator is the remainder from a truncating - // (round-towards-zero) division. Note that this differs from IEEE-754. - // - // TODO(jbramley): See if it's possible to do this inline, rather than by - // calling a helper function. With frintz (to produce the intermediate - // quotient) and fmsub (to calculate the remainder without loss of - // precision), it should be possible. However, we would need support for - // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't - // support that yet. - DCHECK(left.Is(d0)); - DCHECK(right.Is(d1)); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), - 0, 2); - DCHECK(result.Is(d0)); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(x1)); - DCHECK(ToRegister(instr->right()).is(x0)); - DCHECK(ToRegister(instr->result()).is(x0)); - - UNREACHABLE(); -} - - -void LCodeGen::DoBitI(LBitI* instr) { - Register result = ToRegister32(instr->result()); - Register left = ToRegister32(instr->left()); - Operand right = ToShiftedRightOperand32(instr->right(), instr); - - switch (instr->op()) { - case Token::BIT_AND: __ And(result, left, right); break; - case Token::BIT_OR: __ Orr(result, left, right); break; - case Token::BIT_XOR: __ Eor(result, left, right); break; - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoBitS(LBitS* instr) { - Register result = ToRegister(instr->result()); - Register left = ToRegister(instr->left()); - Operand right = ToOperand(instr->right()); - - switch (instr->op()) { - case Token::BIT_AND: __ And(result, left, right); break; - case Token::BIT_OR: __ Orr(result, left, right); break; - case Token::BIT_XOR: __ Eor(result, left, right); break; - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) { - Condition cond = instr->hydrogen()->allow_equality() ? hi : hs; - DCHECK(instr->hydrogen()->index()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->length()->representation().IsInteger32()); - if (instr->index()->IsConstantOperand()) { - Operand index = ToOperand32(instr->index()); - Register length = ToRegister32(instr->length()); - __ Cmp(length, index); - cond = CommuteCondition(cond); - } else { - Register index = ToRegister32(instr->index()); - Operand length = ToOperand32(instr->length()); - __ Cmp(index, length); - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); - } else { - DeoptimizeIf(cond, instr, DeoptimizeReason::kOutOfBounds); - } -} - - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - Label* true_label = instr->TrueLabel(chunk_); - Label* false_label = instr->FalseLabel(chunk_); - - if (r.IsInteger32()) { - DCHECK(!info()->IsStub()); - EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0); - } else if (r.IsSmi()) { - DCHECK(!info()->IsStub()); - STATIC_ASSERT(kSmiTag == 0); - EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0); - } else if (r.IsDouble()) { - DoubleRegister value = ToDoubleRegister(instr->value()); - // Test the double value. Zero and NaN are false. - EmitBranchIfNonZeroNumber(instr, value, double_scratch()); - } else { - DCHECK(r.IsTagged()); - Register value = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ CompareRoot(value, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - EmitCompareAndBranch(instr, ne, value, Smi::kZero); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitGoto(instr->TrueDestination(chunk())); - } else if (type.IsHeapNumber()) { - DCHECK(!info()->IsStub()); - __ Ldr(double_scratch(), FieldMemOperand(value, - HeapNumber::kValueOffset)); - // Test the double value. Zero and NaN are false. - EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch()); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - Register temp = ToRegister(instr->temp1()); - __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset)); - EmitCompareAndBranch(instr, ne, temp, 0); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - // Avoid deopts in the case where we've never executed this path before. - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ JumpIfRoot( - value, Heap::kUndefinedValueRootIndex, false_label); - } - - if (expected & ToBooleanHint::kBoolean) { - // Boolean -> its value. - __ JumpIfRoot( - value, Heap::kTrueValueRootIndex, true_label); - __ JumpIfRoot( - value, Heap::kFalseValueRootIndex, false_label); - } - - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ JumpIfRoot( - value, Heap::kNullValueRootIndex, false_label); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - DCHECK(Smi::kZero == 0); - __ Cbz(value, false_label); - __ JumpIfSmi(value, true_label); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a smi, deopt. - DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi); - } - - Register map = NoReg; - Register scratch = NoReg; - - if (expected & ToBooleanHint::kNeedsMap) { - DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); - map = ToRegister(instr->temp1()); - scratch = ToRegister(instr->temp2()); - - __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); - - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); - __ TestAndBranchIfAnySet( - scratch, 1 << Map::kIsUndetectable, false_label); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE); - __ B(ge, true_label); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE); - __ B(ge, ¬_string); - __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset)); - __ Cbz(scratch, false_label); - __ B(true_label); - __ Bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - __ CompareInstanceType(map, scratch, SYMBOL_TYPE); - __ B(eq, true_label); - } - - if (expected & ToBooleanHint::kHeapNumber) { - Label not_heap_number; - __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number); - - __ Ldr(double_scratch(), - FieldMemOperand(value, HeapNumber::kValueOffset)); - __ Fcmp(double_scratch(), 0.0); - // If we got a NaN (overflow bit is set), jump to the false branch. - __ B(vs, false_label); - __ B(eq, false_label); - __ B(true_label); - __ Bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - Deoptimize(instr, DeoptimizeReason::kUnexpectedObject); - } - } - } -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - // The function interface relies on the following register assignments. - Register function_reg = x1; - Register arity_reg = x0; - - LPointerMap* pointers = instr->pointer_map(); - - if (FLAG_debug_code) { - Label is_not_smi; - // Try to confirm that function_reg (x1) is a tagged pointer. - __ JumpIfNotSmi(function_reg, &is_not_smi); - __ Abort(kExpectedFunctionObject); - __ Bind(&is_not_smi); - } - - if (can_invoke_directly) { - // Change context. - __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ LoadRoot(x3, Heap::kUndefinedValueRootIndex); - __ Mov(arity_reg, arity); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); - if (is_tail_call) { - __ Jump(x10); - } else { - __ Call(x10); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - } - } else { - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, expected, actual, flag, generator); - } -} - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(instr->IsMarkedAsCall()); - DCHECK(ToRegister(instr->result()).Is(x0)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - // TODO(all): on ARM we use a call descriptor to specify a storage mode - // but on ARM64 we only have one storage mode so it isn't necessary. Check - // this understanding is correct. - __ Jump(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); - __ Br(target); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - // TODO(all): on ARM we use a call descriptor to specify a storage mode - // but on ARM64 we only have one storage mode so it isn't necessary. Check - // this understanding is correct. - __ Call(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(target)); - __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); - __ Call(target); - } - generator.AfterCall(); - } - - HCallWithDescriptor* hinstr = instr->hydrogen(); - RecordPushedArgumentsDelta(hinstr->argument_delta()); - - // HCallWithDescriptor instruction is translated to zero or more - // LPushArguments (they handle parameters passed on the stack) followed by - // a LCallWithDescriptor. Each LPushArguments instruction generated records - // the number of arguments pushed thus we need to offset them here. - // The |argument_delta()| used above "knows" only about JS parameters while - // we are dealing here with particular calling convention details. - RecordPushedArgumentsDelta(-hinstr->descriptor().GetStackParameterCount()); -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); - RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta()); -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Register temp = ToRegister(instr->temp()); - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); - __ Ldr(temp, FieldMemOperand(temp, Map::kBitField3Offset)); - __ Tst(temp, Operand(Map::Deprecated::kMask)); - __ B(eq, &deopt); - - { - PushSafepointRegistersScope scope(this); - __ Push(object); - __ Mov(cp, 0); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters( - instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(x0, temp); - } - __ Tst(temp, Operand(kSmiTagMask)); - __ B(ne, &done); - - __ bind(&deopt); - Deoptimize(instr, DeoptimizeReason::kInstanceMigrationFailed); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps: public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - virtual void Generate() { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - virtual LInstruction* instr() { return instr_; } - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - Register object = ToRegister(instr->value()); - Register map_reg = ToRegister(instr->temp()); - - __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset)); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, object); - __ Bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMap(map_reg, map); - __ B(eq, &success); - } - Handle map = maps->at(maps->size() - 1).handle(); - __ CompareMap(map_reg, map); - - // We didn't match a map. - if (instr->hydrogen()->HasMigrationTarget()) { - __ B(ne, deferred->entry()); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); - } - - __ Bind(&success); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - DeoptimizeIfSmi(ToRegister(instr->value()), instr, DeoptimizeReason::kSmi); - } -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - Register value = ToRegister(instr->value()); - DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); - DeoptimizeIfNotSmi(value, instr, DeoptimizeReason::kNotASmi); -} - - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - UseScratchRegisterScope temps(masm()); - Register view = ToRegister(instr->view()); - Register scratch = temps.AcquireX(); - - __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); - __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); - __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - Register scratch = ToRegister(instr->temp()); - - __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first, last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - __ Cmp(scratch, first); - if (first == last) { - // If there is only one type in the interval check for equality. - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); - } else if (last == LAST_TYPE) { - // We don't need to compare with the higher bound of the interval. - DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType); - } else { - // If we are below the lower bound, set the C flag and clear the Z flag - // to force a deopt. - __ Ccmp(scratch, last, CFlag, hs); - DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType); - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK((tag == 0) || (tag == mask)); - if (tag == 0) { - DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, - DeoptimizeReason::kWrongInstanceType); - } else { - DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, - DeoptimizeReason::kWrongInstanceType); - } - } else { - if (tag == 0) { - __ Tst(scratch, mask); - } else { - __ And(scratch, scratch, mask); - __ Cmp(scratch, tag); - } - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); - } - } -} - - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DoubleRegister input = ToDoubleRegister(instr->unclamped()); - Register result = ToRegister32(instr->result()); - __ ClampDoubleToUint8(result, input, double_scratch()); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - Register input = ToRegister32(instr->unclamped()); - Register result = ToRegister32(instr->result()); - __ ClampInt32ToUint8(result, input); -} - - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - Register input = ToRegister(instr->unclamped()); - Register result = ToRegister32(instr->result()); - Label done; - - // Both smi and heap number cases are handled. - Label is_not_smi; - __ JumpIfNotSmi(input, &is_not_smi); - __ SmiUntag(result.X(), input); - __ ClampInt32ToUint8(result); - __ B(&done); - - __ Bind(&is_not_smi); - - // Check for heap number. - Label is_heap_number; - __ JumpIfHeapNumber(input, &is_heap_number); - - // Check for undefined. Undefined is coverted to zero for clamping conversion. - DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, - DeoptimizeReason::kNotAHeapNumberUndefined); - __ Mov(result, 0); - __ B(&done); - - // Heap number case. - __ Bind(&is_heap_number); - DoubleRegister dbl_scratch = double_scratch(); - DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); - __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); - - __ Bind(&done); -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Handle class_name = instr->hydrogen()->class_name(); - Label* true_label = instr->TrueLabel(chunk_); - Label* false_label = instr->FalseLabel(chunk_); - Register input = ToRegister(instr->value()); - Register scratch1 = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - __ JumpIfSmi(input, false_label); - - Register map = scratch2; - __ CompareObjectType(input, map, scratch1, FIRST_FUNCTION_TYPE); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ B(hs, true_label); - } else { - __ B(hs, false_label); - } - - // Check if the constructor in the map is a function. - { - UseScratchRegisterScope temps(masm()); - Register instance_type = temps.AcquireX(); - __ GetMapConstructor(scratch1, map, scratch2, instance_type); - __ Cmp(instance_type, JS_FUNCTION_TYPE); - } - // Objects with a non-function constructor have class 'Object'. - if (String::Equals(class_name, isolate()->factory()->Object_string())) { - __ B(ne, true_label); - } else { - __ B(ne, false_label); - } - - // The constructor function is in scratch1. Get its instance class name. - __ Ldr(scratch1, - FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset)); - __ Ldr(scratch1, FieldMemOperand( - scratch1, SharedFunctionInfo::kInstanceClassNameOffset)); - - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name)); -} - -void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) { - DCHECK(instr->hydrogen()->representation().IsDouble()); - VRegister object = ToDoubleRegister(instr->object()); - Register temp = ToRegister(instr->temp()); - - // If we don't have a NaN, we don't have the hole, so branch now to avoid the - // (relatively expensive) hole-NaN check. - __ Fcmp(object, object); - __ B(vc, instr->FalseLabel(chunk_)); - - // We have a NaN, but is it the hole? - __ Fmov(temp, object); - EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64); -} - - -void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) { - DCHECK(instr->hydrogen()->representation().IsTagged()); - Register object = ToRegister(instr->object()); - - EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex); -} - - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register value = ToRegister(instr->value()); - Register map = ToRegister(instr->temp()); - - __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); - EmitCompareAndBranch(instr, eq, map, Operand(instr->map())); -} - - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cond = TokenToCondition(instr->op(), is_unsigned); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right)); - - // If a NaN is involved, i.e. the result is unordered (V set), - // jump to false block label. - __ B(vs, instr->FalseLabel(chunk_)); - EmitBranch(instr, cond); - } else { - if (instr->hydrogen_value()->representation().IsInteger32()) { - if (right->IsConstantOperand()) { - EmitCompareAndBranch(instr, cond, ToRegister32(left), - ToOperand32(right)); - } else { - // Commute the operands and the condition. - EmitCompareAndBranch(instr, CommuteCondition(cond), - ToRegister32(right), ToOperand32(left)); - } - } else { - DCHECK(instr->hydrogen_value()->representation().IsSmi()); - if (right->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(right)); - EmitCompareAndBranch(instr, - cond, - ToRegister(left), - Operand(Smi::FromInt(value))); - } else if (left->IsConstantOperand()) { - // Commute the operands and the condition. - int32_t value = ToInteger32(LConstantOperand::cast(left)); - EmitCompareAndBranch(instr, - CommuteCondition(cond), - ToRegister(right), - Operand(Smi::FromInt(value))); - } else { - EmitCompareAndBranch(instr, - cond, - ToRegister(left), - ToRegister(right)); - } - } - } - } -} - - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - EmitCompareAndBranch(instr, eq, left, right); -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - Token::Value op = instr->op(); - Condition cond = TokenToCondition(op, false); - - DCHECK(ToRegister(instr->left()).Is(x1)); - DCHECK(ToRegister(instr->right()).Is(x0)); - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - // Signal that we don't inline smi code before this stub. - InlineSmiCheckInfo::EmitNotInlined(masm()); - - // Return true or false depending on CompareIC result. - // This instruction is marked as call. We can clobber any register. - DCHECK(instr->IsMarkedAsCall()); - __ LoadTrueFalseRoots(x1, x2); - __ Cmp(x0, 0); - __ Csel(ToRegister(instr->result()), x1, x2, cond); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - DCHECK(instr->result()->IsDoubleRegister()); - DoubleRegister result = ToDoubleRegister(instr->result()); - if (instr->value() == 0) { - if (copysign(1.0, instr->value()) == 1.0) { - __ Fmov(result, fp_zero); - } else { - __ Fneg(result, fp_zero); - } - } else { - __ Fmov(result, instr->value()); - } -} - - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ Mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - DCHECK(is_int32(instr->value())); - // Cast the value here to ensure that the value isn't sign extended by the - // implicit Operand constructor. - __ Mov(ToRegister32(instr->result()), static_cast(instr->value())); -} - - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ Mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ LoadObject(ToRegister(instr->result()), object); -} - - -void LCodeGen::DoContext(LContext* instr) { - // If there is a non-return use, the context must be moved to a register. - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in cp. - DCHECK(result.is(cp)); - } -} - - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Register reg = ToRegister(instr->value()); - Handle object = instr->hydrogen()->object().handle(); - AllowDeferredHandleDereference smi_check; - if (isolate()->heap()->InNewSpace(*object)) { - UseScratchRegisterScope temps(masm()); - Register temp = temps.AcquireX(); - Handle cell = isolate()->factory()->NewCell(object); - __ Mov(temp, Operand(cell)); - __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); - __ Cmp(reg, temp); - } else { - __ Cmp(reg, Operand(object)); - } - DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch); -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && (type == Deoptimizer::EAGER)) { - type = Deoptimizer::LAZY; - } - - Deoptimize(instr, instr->hydrogen()->reason(), &type); -} - - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister32(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister32(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kDivisionByZero); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - // Test dividend for kMinInt by subtracting one (cmp) and checking for - // overflow. - __ Cmp(dividend, 1); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1) { - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - __ Tst(dividend, mask); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); - } - - if (divisor == -1) { // Nice shortcut, not needed for correctness. - __ Neg(result, dividend); - return; - } - int32_t shift = WhichPowerOf2Abs(divisor); - if (shift == 0) { - __ Mov(result, dividend); - } else if (shift == 1) { - __ Add(result, dividend, Operand(dividend, LSR, 31)); - } else { - __ Mov(result, Operand(dividend, ASR, 31)); - __ Add(result, dividend, Operand(result, LSR, 32 - shift)); - } - if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); - if (divisor < 0) __ Neg(result, result); -} - - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister32(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister32(instr->result()); - DCHECK(!AreAliased(dividend, result)); - - if (divisor == 0) { - Deoptimize(instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero); - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Neg(result, result); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - Register temp = ToRegister32(instr->temp()); - DCHECK(!AreAliased(dividend, result, temp)); - __ Sxtw(dividend.X(), dividend); - __ Mov(temp, divisor); - __ Smsubl(temp.X(), result, temp, dividend.X()); - DeoptimizeIfNotZero(temp, instr, DeoptimizeReason::kLostPrecision); - } -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister32(instr->dividend()); - Register divisor = ToRegister32(instr->divisor()); - Register result = ToRegister32(instr->result()); - - // Issue the division first, and then check for any deopt cases whilst the - // result is computed. - __ Sdiv(result, dividend, divisor); - - if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - DCHECK(!instr->temp()); - return; - } - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) as that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Cmp(divisor, 0); - - // If the divisor < 0 (mi), compare the dividend, and deopt if it is - // zero, ie. zero dividend with negative divisor deopts. - // If the divisor >= 0 (pl, the opposite of mi) set the flags to - // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. - __ Ccmp(dividend, 0, NoFlag, mi); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - // Test dividend for kMinInt by subtracting one (cmp) and checking for - // overflow. - __ Cmp(dividend, 1); - // If overflow is set, ie. dividend = kMinInt, compare the divisor with - // -1. If overflow is clear, set the flags for condition ne, as the - // dividend isn't -1, and thus we shouldn't deopt. - __ Ccmp(divisor, -1, NoFlag, vs); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } - - // Compute remainder and deopt if it's not zero. - Register remainder = ToRegister32(instr->temp()); - __ Msub(remainder, result, divisor, dividend); - DeoptimizeIfNotZero(remainder, instr, DeoptimizeReason::kLostPrecision); -} - - -void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister32(instr->result()); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero); - } - - __ TryRepresentDoubleAsInt32(result, input, double_scratch()); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - - if (instr->tag_result()) { - __ SmiTag(result.X()); - } -} - - -void LCodeGen::DoDrop(LDrop* instr) { - __ Drop(instr->count()); - - RecordPushedArgumentsDelta(instr->hydrogen_value()->argument_delta()); -} - - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - - __ EnumLengthUntagged(result, map); - __ Cbnz(result, &load_cache); - - __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); - __ B(&done); - - __ Bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ Ldr(result, - FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); - DeoptimizeIfZero(result, instr, DeoptimizeReason::kNoCache); - - __ Bind(&done); -} - - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - Register object = ToRegister(instr->object()); - - DCHECK(instr->IsMarkedAsCall()); - DCHECK(object.Is(x0)); - - Label use_cache, call_runtime; - __ CheckEnumCache(object, x5, x1, x2, x3, x4, &call_runtime); - - __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); - __ B(&use_cache); - - // Get the set of properties to enumerate. - __ Bind(&call_runtime); - __ Push(object); - CallRuntime(Runtime::kForInEnumerate, instr); - __ Bind(&use_cache); -} - -void LCodeGen::EmitGoto(int block) { - // Do not emit jump if we are emitting a goto to the next block. - if (!IsNextEmittedBlock(block)) { - __ B(chunk_->GetAssemblyLabel(LookupDestination(block))); - } -} - -void LCodeGen::DoGoto(LGoto* instr) { - EmitGoto(instr->block_id()); -} - -// HHasInstanceTypeAndBranch instruction is built with an interval of type -// to test but is only used in very restricted ways. The only possible kinds -// of intervals are: -// - [ FIRST_TYPE, instr->to() ] -// - [ instr->form(), LAST_TYPE ] -// - instr->from() == instr->to() -// -// These kinds of intervals can be check with only one compare instruction -// providing the correct value and test condition are used. -// -// TestType() will return the value to use in the compare instruction and -// BranchCondition() will return the condition to use depending on the kind -// of interval actually specified in the instruction. -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK((from == to) || (to == LAST_TYPE)); - return from; -} - - -// See comment above TestType function for what this function does. -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return eq; - if (to == LAST_TYPE) return hs; - if (from == FIRST_TYPE) return ls; - UNREACHABLE(); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register scratch = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); - EmitBranch(instr, BranchCondition(instr->hydrogen())); -} - - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - __ Add(result, base, ToOperand32(instr->offset())); - } else { - __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW)); - } -} - - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = ToRegister(instr->scratch1()); - Register const object_instance_type = ToRegister(instr->scratch2()); - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - __ JumpIfSmi(object, instr->FalseLabel(chunk_)); - } - - // Loop through the {object}s prototype chain looking for the {prototype}. - __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); - Label loop; - __ Bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ Ldrb(object_instance_type, - FieldMemOperand(object_map, Map::kBitFieldOffset)); - __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck); - // Deoptimize for proxies. - __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); - DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy); - - __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); - __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); - __ B(eq, instr->FalseLabel(chunk_)); - __ Cmp(object_prototype, prototype); - __ B(eq, instr->TrueLabel(chunk_)); - __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); - __ B(&loop); -} - - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { - DoGap(instr); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - Register value = ToRegister32(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ Scvtf(result, value); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(scratch3, - MemOperand(scratch2, StandardFrameConstants::kContextOffset)); - __ Cmp(scratch3, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - __ B(ne, &no_arguments_adaptor); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ mov(fp, scratch2); - __ Ldr(caller_args_count_reg, - MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(caller_args_count_reg); - __ B(&formal_parameter_count_loaded); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count - __ Mov(caller_args_count_reg, - Immediate(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3); - - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(cp)); - // The function is required to be in x1. - DCHECK(ToRegister(instr->function()).is(x1)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use x3, x4 and x5 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) x3 (new.target) will be initialized below. - PrepareForTailCall(actual, x3, x4, x5); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(x1, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } - RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta()); -} - - -Condition LCodeGen::EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); - - return lt; -} - - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register val = ToRegister(instr->value()); - Register scratch = ToRegister(instr->temp()); - - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - Condition true_cond = - EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Register value = ToRegister(instr->value()); - STATIC_ASSERT(kSmiTag == 0); - EmitTestAndBranch(instr, eq, value, kSmiTagMask); -} - - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); - - EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable); -} - - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, - label->hydrogen_value()->id(), - label->block_id(), - LabelType(label)); - - // Inherit pushed_arguments_ from the predecessor's argument count. - if (label->block()->HasPredecessor()) { - pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count(); -#ifdef DEBUG - for (auto p : *label->block()->predecessors()) { - DCHECK_EQ(p->argument_count(), pushed_arguments_); - } -#endif - } - - __ Bind(label->label()); - current_block_ = label->block_id(); - DoGap(label); -} - - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - __ Ldr(result, ContextMemOperand(context, instr->slot_index())); - if (instr->hydrogen()->RequiresHoleCheck()) { - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, - DeoptimizeReason::kHole); - } else { - Label not_the_hole; - __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ Bind(¬_the_hole); - } - } -} - - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - Register temp = ToRegister(instr->temp()); - - // Get the prototype or initial map from the function. - __ Ldr(result, FieldMemOperand(function, - JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, - DeoptimizeReason::kHole); - - // If the function does not have an initial map, we're done. - Label done; - __ CompareObjectType(result, temp, temp, MAP_TYPE); - __ B(ne, &done); - - // Get the prototype from the initial map. - __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); - - // All done. - __ Bind(&done); -} - - -MemOperand LCodeGen::PrepareKeyedExternalArrayOperand( - Register key, - Register base, - Register scratch, - bool key_is_smi, - bool key_is_constant, - int constant_key, - ElementsKind elements_kind, - int base_offset) { - int element_size_shift = ElementsKindToShiftSize(elements_kind); - - if (key_is_constant) { - int key_offset = constant_key << element_size_shift; - return MemOperand(base, key_offset + base_offset); - } - - if (key_is_smi) { - __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift)); - return MemOperand(scratch, base_offset); - } - - if (base_offset == 0) { - return MemOperand(base, key, SXTW, element_size_shift); - } - - DCHECK(!AreAliased(scratch, key)); - __ Add(scratch, base, base_offset); - return MemOperand(scratch, key, SXTW, element_size_shift); -} - - -void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) { - Register ext_ptr = ToRegister(instr->elements()); - Register scratch; - ElementsKind elements_kind = instr->elements_kind(); - - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - bool key_is_constant = instr->key()->IsConstantOperand(); - Register key = no_reg; - int constant_key = 0; - if (key_is_constant) { - DCHECK(instr->temp() == NULL); - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xf0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - scratch = ToRegister(instr->temp()); - key = ToRegister(instr->key()); - } - - MemOperand mem_op = - PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, - key_is_constant, constant_key, - elements_kind, - instr->base_offset()); - - if (elements_kind == FLOAT32_ELEMENTS) { - DoubleRegister result = ToDoubleRegister(instr->result()); - __ Ldr(result.S(), mem_op); - __ Fcvt(result, result.S()); - } else if (elements_kind == FLOAT64_ELEMENTS) { - DoubleRegister result = ToDoubleRegister(instr->result()); - __ Ldr(result, mem_op); - } else { - Register result = ToRegister(instr->result()); - - switch (elements_kind) { - case INT8_ELEMENTS: - __ Ldrsb(result, mem_op); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ Ldrb(result, mem_op); - break; - case INT16_ELEMENTS: - __ Ldrsh(result, mem_op); - break; - case UINT16_ELEMENTS: - __ Ldrh(result, mem_op); - break; - case INT32_ELEMENTS: - __ Ldrsw(result, mem_op); - break; - case UINT32_ELEMENTS: - __ Ldr(result.W(), mem_op); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - // Deopt if value > 0x80000000. - __ Tst(result, 0xFFFFFFFF80000000); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, - Register elements, - Register key, - bool key_is_tagged, - ElementsKind elements_kind, - Representation representation, - int base_offset) { - STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); - STATIC_ASSERT(kSmiTag == 0); - int element_size_shift = ElementsKindToShiftSize(elements_kind); - - // Even though the HLoad/StoreKeyed instructions force the input - // representation for the key to be an integer, the input gets replaced during - // bounds check elimination with the index argument to the bounds check, which - // can be tagged, so that case must be handled here, too. - if (key_is_tagged) { - __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); - if (representation.IsInteger32()) { - DCHECK(elements_kind == FAST_SMI_ELEMENTS); - // Read or write only the smi payload in the case of fast smi arrays. - return UntagSmiMemOperand(base, base_offset); - } else { - return MemOperand(base, base_offset); - } - } else { - // Sign extend key because it could be a 32-bit negative value or contain - // garbage in the top 32-bits. The address computation happens in 64-bit. - DCHECK((element_size_shift >= 0) && (element_size_shift <= 4)); - if (representation.IsInteger32()) { - DCHECK(elements_kind == FAST_SMI_ELEMENTS); - // Read or write only the smi payload in the case of fast smi arrays. - __ Add(base, elements, Operand(key, SXTW, element_size_shift)); - return UntagSmiMemOperand(base, base_offset); - } else { - __ Add(base, elements, base_offset); - return MemOperand(base, key, SXTW, element_size_shift); - } - } -} - - -void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { - Register elements = ToRegister(instr->elements()); - DoubleRegister result = ToDoubleRegister(instr->result()); - MemOperand mem_op; - - if (instr->key()->IsConstantOperand()) { - DCHECK(instr->hydrogen()->RequiresHoleCheck() || - (instr->temp() == NULL)); - - int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xf0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - int offset = instr->base_offset() + constant_key * kDoubleSize; - mem_op = MemOperand(elements, offset); - } else { - Register load_base = ToRegister(instr->temp()); - Register key = ToRegister(instr->key()); - bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind(), - instr->hydrogen()->representation(), - instr->base_offset()); - } - - __ Ldr(result, mem_op); - - if (instr->hydrogen()->RequiresHoleCheck()) { - Register scratch = ToRegister(instr->temp()); - __ Fmov(scratch, result); - __ Eor(scratch, scratch, kHoleNanInt64); - DeoptimizeIfZero(scratch, instr, DeoptimizeReason::kHole); - } -} - - -void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { - Register elements = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - MemOperand mem_op; - - Representation representation = instr->hydrogen()->representation(); - if (instr->key()->IsConstantOperand()) { - DCHECK(instr->temp() == NULL); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - int offset = instr->base_offset() + - ToInteger32(const_operand) * kPointerSize; - if (representation.IsInteger32()) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); - STATIC_ASSERT(kSmiTag == 0); - mem_op = UntagSmiMemOperand(elements, offset); - } else { - mem_op = MemOperand(elements, offset); - } - } else { - Register load_base = ToRegister(instr->temp()); - Register key = ToRegister(instr->key()); - bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - - mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind(), - representation, instr->base_offset()); - } - - __ Load(result, mem_op, representation); - - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - DeoptimizeIfNotSmi(result, instr, DeoptimizeReason::kNotASmi); - } else { - DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, - DeoptimizeReason::kHole); - } - } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - __ B(ne, &done); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise - // it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset)); - __ Cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid))); - DeoptimizeIf(ne, instr, DeoptimizeReason::kHole); - } - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ Bind(&done); - } -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - Register object = ToRegister(instr->object()); - - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - __ Load(result, MemOperand(object, offset), access.representation()); - return; - } - - if (instr->hydrogen()->representation().IsDouble()) { - DCHECK(access.IsInobject()); - VRegister result = ToDoubleRegister(instr->result()); - __ Ldr(result, FieldMemOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - Register source; - if (access.IsInobject()) { - source = object; - } else { - // Load the properties array, using result as a scratch register. - __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - source = result; - } - - if (access.representation().IsSmi() && - instr->hydrogen()->representation().IsInteger32()) { - // Read int value directly from upper half of the smi. - STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); - STATIC_ASSERT(kSmiTag == 0); - __ Load(result, UntagSmiFieldMemOperand(source, offset), - Representation::Integer32()); - } else { - __ Load(result, FieldMemOperand(source, offset), access.representation()); - } -} - - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsDouble()) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ Fabs(result, input); - } else if (r.IsSmi() || r.IsInteger32()) { - Register input = r.IsSmi() ? ToRegister(instr->value()) - : ToRegister32(instr->value()); - Register result = r.IsSmi() ? ToRegister(instr->result()) - : ToRegister32(instr->result()); - __ Abs(result, input); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, - Label* exit, - Label* allocation_entry) { - // Handle the tricky cases of MathAbsTagged: - // - HeapNumber inputs. - // - Negative inputs produce a positive result, so a new HeapNumber is - // allocated to hold it. - // - Positive inputs are returned as-is, since there is no need to allocate - // a new HeapNumber for the result. - // - The (smi) input -0x80000000, produces +0x80000000, which does not fit - // a smi. In this case, the inline code sets the result and jumps directly - // to the allocation_entry label. - DCHECK(instr->context() != NULL); - DCHECK(ToRegister(instr->context()).is(cp)); - Register input = ToRegister(instr->value()); - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); - Register result_bits = ToRegister(instr->temp3()); - Register result = ToRegister(instr->result()); - - Label runtime_allocation; - - // Deoptimize if the input is not a HeapNumber. - DeoptimizeIfNotHeapNumber(input, instr); - - // If the argument is positive, we can return it as-is, without any need to - // allocate a new HeapNumber for the result. We have to do this in integer - // registers (rather than with fabs) because we need to be able to distinguish - // the two zeroes. - __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset)); - __ Mov(result, input); - __ Tbz(result_bits, kXSignBit, exit); - - // Calculate abs(input) by clearing the sign bit. - __ Bic(result_bits, result_bits, kXSignMask); - - // Allocate a new HeapNumber to hold the result. - // result_bits The bit representation of the (double) result. - __ Bind(allocation_entry); - __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2); - // The inline (non-deferred) code will store result_bits into result. - __ B(exit); - - __ Bind(&runtime_allocation); - if (FLAG_debug_code) { - // Because result is in the pointer map, we need to make sure it has a valid - // tagged value before we call the runtime. We speculatively set it to the - // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already - // be valid. - Label result_ok; - Register input = ToRegister(instr->value()); - __ JumpIfSmi(result, &result_ok); - __ Cmp(input, result); - __ Assert(eq, kUnexpectedValue); - __ Bind(&result_ok); - } - - { PushSafepointRegistersScope scope(this); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(x0, result); - } - // The inline (non-deferred) code will store result_bits into result. -} - - -void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) { - // Class for deferred case. - class DeferredMathAbsTagged: public LDeferredCode { - public: - DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { - codegen()->DoDeferredMathAbsTagged(instr_, exit(), - allocation_entry()); - } - virtual LInstruction* instr() { return instr_; } - Label* allocation_entry() { return &allocation; } - private: - LMathAbsTagged* instr_; - Label allocation; - }; - - // TODO(jbramley): The early-exit mechanism would skip the new frame handling - // in GenerateDeferredCode. Tidy this up. - DCHECK(!NeedsDeferredFrame()); - - DeferredMathAbsTagged* deferred = - new(zone()) DeferredMathAbsTagged(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsTagged() || - instr->hydrogen()->value()->representation().IsSmi()); - Register input = ToRegister(instr->value()); - Register result_bits = ToRegister(instr->temp3()); - Register result = ToRegister(instr->result()); - Label done; - - // Handle smis inline. - // We can treat smis as 64-bit integers, since the (low-order) tag bits will - // never get set by the negation. This is therefore the same as the Integer32 - // case in DoMathAbs, except that it operates on 64-bit values. - STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); - - __ JumpIfNotSmi(input, deferred->entry()); - - __ Abs(result, input, NULL, &done); - - // The result is the magnitude (abs) of the smallest value a smi can - // represent, encoded as a double. - __ Mov(result_bits, bit_cast(static_cast(0x80000000))); - __ B(deferred->allocation_entry()); - - __ Bind(deferred->exit()); - __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset)); - - __ Bind(&done); -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - DCHECK(instr->IsMarkedAsCall()); - DCHECK(ToDoubleRegister(instr->value()).is(d0)); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1); - DCHECK(ToDoubleRegister(instr->result()).Is(d0)); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - DCHECK(instr->IsMarkedAsCall()); - DCHECK(ToDoubleRegister(instr->value()).is(d0)); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1); - DCHECK(ToDoubleRegister(instr->result()).Is(d0)); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - DCHECK(instr->IsMarkedAsCall()); - DCHECK(ToDoubleRegister(instr->value()).is(d0)); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1); - DCHECK(ToDoubleRegister(instr->result()).Is(d0)); -} - - -void LCodeGen::DoMathFloorD(LMathFloorD* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - - __ Frintm(result, input); -} - - -void LCodeGen::DoMathFloorI(LMathFloorI* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero); - } - - __ Fcvtms(result, input); - - // Check that the result fits into a 32-bit integer. - // - The result did not overflow. - __ Cmp(result, Operand(result, SXTW)); - // - The input was not NaN. - __ Fccmp(input, input, NoFlag, eq); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); -} - - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - Register dividend = ToRegister32(instr->dividend()); - Register result = ToRegister32(instr->result()); - int32_t divisor = instr->divisor(); - - // If the divisor is 1, return the dividend. - if (divisor == 1) { - __ Mov(result, dividend, kDiscardForSameWReg); - return; - } - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - int32_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 1) { - __ Mov(result, Operand(dividend, ASR, shift)); - return; - } - - // If the divisor is negative, we have to negate and handle edge cases. - __ Negs(result, dividend); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - // Dividing by -1 is basically negation, unless we overflow. - if (divisor == -1) { - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } - return; - } - - // If the negation could not overflow, simply shifting is OK. - if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - __ Mov(result, Operand(dividend, ASR, shift)); - return; - } - - __ Asr(result, result, shift); - __ Csel(result, result, kMinInt / divisor, vc); -} - - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister32(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister32(instr->result()); - DCHECK(!AreAliased(dividend, result)); - - if (divisor == 0) { - Deoptimize(instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Neg(result, result); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister32(instr->temp()); - DCHECK(!AreAliased(temp, dividend, result)); - Label needs_adjustment, done; - __ Cmp(dividend, 0); - __ B(divisor > 0 ? lt : gt, &needs_adjustment); - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Neg(result, result); - __ B(&done); - __ Bind(&needs_adjustment); - __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); - __ TruncatingDiv(result, temp, Abs(divisor)); - if (divisor < 0) __ Neg(result, result); - __ Sub(result, result, Operand(1)); - __ Bind(&done); -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - Register dividend = ToRegister32(instr->dividend()); - Register divisor = ToRegister32(instr->divisor()); - Register remainder = ToRegister32(instr->temp()); - Register result = ToRegister32(instr->result()); - - // This can't cause an exception on ARM, so we can speculatively - // execute it already now. - __ Sdiv(result, dividend, divisor); - - // Check for x / 0. - DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero); - - // Check for (kMinInt / -1). - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - // The V flag will be set iff dividend == kMinInt. - __ Cmp(dividend, 1); - __ Ccmp(divisor, -1, NoFlag, vs); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } - - // Check for (0 / -x) that will produce negative zero. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Cmp(divisor, 0); - __ Ccmp(dividend, 0, ZFlag, mi); - // "divisor" can't be null because the code would have already been - // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). - // In this case we need to deoptimize to produce a -0. - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - Label done; - // If both operands have the same sign then we are done. - __ Eor(remainder, dividend, divisor); - __ Tbz(remainder, kWSignBit, &done); - - // Check if the result needs to be corrected. - __ Msub(remainder, result, divisor, dividend); - __ Cbz(remainder, &done); - __ Sub(result, result, 1); - - __ Bind(&done); -} - - -void LCodeGen::DoMathLog(LMathLog* instr) { - DCHECK(instr->IsMarkedAsCall()); - DCHECK(ToDoubleRegister(instr->value()).is(d0)); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1); - DCHECK(ToDoubleRegister(instr->result()).Is(d0)); -} - - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister32(instr->value()); - Register result = ToRegister32(instr->result()); - __ Clz(result, input); -} - - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - Label done; - - // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases: - // Math.pow(-Infinity, 0.5) == +Infinity - // Math.pow(-0.0, 0.5) == +0.0 - - // Catch -infinity inputs first. - // TODO(jbramley): A constant infinity register would be helpful here. - __ Fmov(double_scratch(), kFP64NegativeInfinity); - __ Fcmp(double_scratch(), input); - __ Fabs(result, input); - __ B(&done, eq); - - // Add +0.0 to convert -0.0 to +0.0. - __ Fadd(double_scratch(), input, fp_zero); - __ Fsqrt(result, double_scratch()); - - __ Bind(&done); -} - - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - // Having marked this as a call, we can use any registers. - // Just make sure that the input/output registers are the expected ones. - Register tagged_exponent = MathPowTaggedDescriptor::exponent(); - Register integer_exponent = MathPowIntegerDescriptor::exponent(); - DCHECK(!instr->right()->IsDoubleRegister() || - ToDoubleRegister(instr->right()).is(d1)); - DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() || - ToRegister(instr->right()).is(tagged_exponent)); - DCHECK(!exponent_type.IsInteger32() || - ToRegister(instr->right()).is(integer_exponent)); - DCHECK(ToDoubleRegister(instr->left()).is(d0)); - DCHECK(ToDoubleRegister(instr->result()).is(d0)); - - if (exponent_type.IsSmi()) { - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsTagged()) { - Label no_deopt; - __ JumpIfSmi(tagged_exponent, &no_deopt); - DeoptimizeIfNotHeapNumber(tagged_exponent, instr); - __ Bind(&no_deopt); - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsInteger32()) { - // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub - // supports large integer exponents. - __ Sxtw(integer_exponent, integer_exponent); - MathPowStub stub(isolate(), MathPowStub::INTEGER); - __ CallStub(&stub); - } else { - DCHECK(exponent_type.IsDouble()); - MathPowStub stub(isolate(), MathPowStub::DOUBLE); - __ CallStub(&stub); - } -} - - -void LCodeGen::DoMathRoundD(LMathRoundD* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister scratch_d = double_scratch(); - - DCHECK(!AreAliased(input, result, scratch_d)); - - Label done; - - __ Frinta(result, input); - __ Fcmp(input, 0.0); - __ Fccmp(result, input, ZFlag, lt); - // The result is correct if the input was in [-0, +infinity], or was a - // negative integral value. - __ B(eq, &done); - - // Here the input is negative, non integral, with an exponent lower than 52. - // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff) - // case. So we can safely add 0.5. - __ Fmov(scratch_d, 0.5); - __ Fadd(result, input, scratch_d); - __ Frintm(result, result); - // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative. - __ Fabs(result, result); - __ Fneg(result, result); - - __ Bind(&done); -} - - -void LCodeGen::DoMathRoundI(LMathRoundI* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister temp = ToDoubleRegister(instr->temp1()); - DoubleRegister dot_five = double_scratch(); - Register result = ToRegister(instr->result()); - Label done; - - // Math.round() rounds to the nearest integer, with ties going towards - // +infinity. This does not match any IEEE-754 rounding mode. - // - Infinities and NaNs are propagated unchanged, but cause deopts because - // they can't be represented as integers. - // - The sign of the result is the same as the sign of the input. This means - // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a - // result of -0.0. - - // Add 0.5 and round towards -infinity. - __ Fmov(dot_five, 0.5); - __ Fadd(temp, input, dot_five); - __ Fcvtms(result, temp); - - // The result is correct if: - // result is not 0, as the input could be NaN or [-0.5, -0.0]. - // result is not 1, as 0.499...94 will wrongly map to 1. - // result fits in 32 bits. - __ Cmp(result, Operand(result.W(), SXTW)); - __ Ccmp(result, 1, ZFlag, eq); - __ B(hi, &done); - - // At this point, we have to handle possible inputs of NaN or numbers in the - // range [-0.5, 1.5[, or numbers larger than 32 bits. - - // Deoptimize if the result > 1, as it must be larger than 32 bits. - __ Cmp(result, 1); - DeoptimizeIf(hi, instr, DeoptimizeReason::kOverflow); - - // Deoptimize for negative inputs, which at this point are only numbers in - // the range [-0.5, -0.0] - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Fmov(result, input); - DeoptimizeIfNegative(result, instr, DeoptimizeReason::kMinusZero); - } - - // Deoptimize if the input was NaN. - __ Fcmp(input, dot_five); - DeoptimizeIf(vs, instr, DeoptimizeReason::kNaN); - - // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ - // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, - // else 0; we avoid dealing with 0.499...94 directly. - __ Cset(result, ge); - __ Bind(&done); -} - - -void LCodeGen::DoMathFround(LMathFround* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ Fcvt(result.S(), input); - __ Fcvt(result, result.S()); -} - - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ Fsqrt(result, input); -} - - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - HMathMinMax::Operation op = instr->hydrogen()->operation(); - if (instr->hydrogen()->representation().IsInteger32()) { - Register result = ToRegister32(instr->result()); - Register left = ToRegister32(instr->left()); - Operand right = ToOperand32(instr->right()); - - __ Cmp(left, right); - __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); - } else if (instr->hydrogen()->representation().IsSmi()) { - Register result = ToRegister(instr->result()); - Register left = ToRegister(instr->left()); - Operand right = ToOperand(instr->right()); - - __ Cmp(left, right); - __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - - if (op == HMathMinMax::kMathMax) { - __ Fmax(result, left, right); - } else { - DCHECK(op == HMathMinMax::kMathMin); - __ Fmin(result, left, right); - } - } -} - - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister32(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister32(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - Label dividend_is_not_negative, done; - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); - // Note that this is correct even for kMinInt operands. - __ Neg(dividend, dividend); - __ And(dividend, dividend, mask); - __ Negs(dividend, dividend); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - __ B(&done); - } - - __ bind(÷nd_is_not_negative); - __ And(dividend, dividend, mask); - __ bind(&done); -} - - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister32(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister32(instr->result()); - Register temp = ToRegister32(instr->temp()); - DCHECK(!AreAliased(dividend, result, temp)); - - if (divisor == 0) { - Deoptimize(instr, DeoptimizeReason::kDivisionByZero); - return; - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - __ Sxtw(dividend.X(), dividend); - __ Mov(temp, Abs(divisor)); - __ Smsubl(result.X(), result, temp, dividend.X()); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ Cbnz(result, &remainder_not_zero); - DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero); - __ bind(&remainder_not_zero); - } -} - - -void LCodeGen::DoModI(LModI* instr) { - Register dividend = ToRegister32(instr->left()); - Register divisor = ToRegister32(instr->right()); - Register result = ToRegister32(instr->result()); - - Label done; - // modulo = dividend - quotient * divisor - __ Sdiv(result, dividend, divisor); - if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero); - } - __ Msub(result, result, divisor, dividend); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Cbnz(result, &done); - DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero); - } - __ Bind(&done); -} - - -void LCodeGen::DoMulConstIS(LMulConstIS* instr) { - DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); - bool is_smi = instr->hydrogen()->representation().IsSmi(); - Register result = - is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); - Register left = - is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()); - int32_t right = ToInteger32(instr->right()); - DCHECK((right > -kMaxInt) && (right < kMaxInt)); - - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - - if (bailout_on_minus_zero) { - if (right < 0) { - // The result is -0 if right is negative and left is zero. - DeoptimizeIfZero(left, instr, DeoptimizeReason::kMinusZero); - } else if (right == 0) { - // The result is -0 if the right is zero and the left is negative. - DeoptimizeIfNegative(left, instr, DeoptimizeReason::kMinusZero); - } - } - - switch (right) { - // Cases which can detect overflow. - case -1: - if (can_overflow) { - // Only 0x80000000 can overflow here. - __ Negs(result, left); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ Neg(result, left); - } - break; - case 0: - // This case can never overflow. - __ Mov(result, 0); - break; - case 1: - // This case can never overflow. - __ Mov(result, left, kDiscardForSameWReg); - break; - case 2: - if (can_overflow) { - __ Adds(result, left, left); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ Add(result, left, left); - } - break; - - default: - // Multiplication by constant powers of two (and some related values) - // can be done efficiently with shifted operands. - int32_t right_abs = Abs(right); - - if (base::bits::IsPowerOfTwo32(right_abs)) { - int right_log2 = WhichPowerOf2(right_abs); - - if (can_overflow) { - Register scratch = result; - DCHECK(!AreAliased(scratch, left)); - __ Cls(scratch, left); - __ Cmp(scratch, right_log2); - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow); - } - - if (right >= 0) { - // result = left << log2(right) - __ Lsl(result, left, right_log2); - } else { - // result = -left << log2(-right) - if (can_overflow) { - __ Negs(result, Operand(left, LSL, right_log2)); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ Neg(result, Operand(left, LSL, right_log2)); - } - } - return; - } - - - // For the following cases, we could perform a conservative overflow check - // with CLS as above. However the few cycles saved are likely not worth - // the risk of deoptimizing more often than required. - DCHECK(!can_overflow); - - if (right >= 0) { - if (base::bits::IsPowerOfTwo32(right - 1)) { - // result = left + left << log2(right - 1) - __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1))); - } else if (base::bits::IsPowerOfTwo32(right + 1)) { - // result = -left + left << log2(right + 1) - __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1))); - __ Neg(result, result); - } else { - UNREACHABLE(); - } - } else { - if (base::bits::IsPowerOfTwo32(-right + 1)) { - // result = left - left << log2(-right + 1) - __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1))); - } else if (base::bits::IsPowerOfTwo32(-right - 1)) { - // result = -left - left << log2(-right - 1) - __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1))); - __ Neg(result, result); - } else { - UNREACHABLE(); - } - } - } -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register result = ToRegister32(instr->result()); - Register left = ToRegister32(instr->left()); - Register right = ToRegister32(instr->right()); - - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - - if (bailout_on_minus_zero && !left.Is(right)) { - // If one operand is zero and the other is negative, the result is -0. - // - Set Z (eq) if either left or right, or both, are 0. - __ Cmp(left, 0); - __ Ccmp(right, 0, ZFlag, ne); - // - If so (eq), set N (mi) if left + right is negative. - // - Otherwise, clear N. - __ Ccmn(left, right, NoFlag, eq); - DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); - } - - if (can_overflow) { - __ Smull(result.X(), left, right); - __ Cmp(result.X(), Operand(result, SXTW)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - } else { - __ Mul(result, left, right); - } -} - - -void LCodeGen::DoMulS(LMulS* instr) { - Register result = ToRegister(instr->result()); - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - - if (bailout_on_minus_zero && !left.Is(right)) { - // If one operand is zero and the other is negative, the result is -0. - // - Set Z (eq) if either left or right, or both, are 0. - __ Cmp(left, 0); - __ Ccmp(right, 0, ZFlag, ne); - // - If so (eq), set N (mi) if left + right is negative. - // - Otherwise, clear N. - __ Ccmn(left, right, NoFlag, eq); - DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); - } - - STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); - if (can_overflow) { - __ Smulh(result, left, right); - __ Cmp(result, Operand(result.W(), SXTW)); - __ SmiTag(result); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - } else { - if (AreAliased(result, left, right)) { - // All three registers are the same: half untag the input and then - // multiply, giving a tagged result. - STATIC_ASSERT((kSmiShift % 2) == 0); - __ Asr(result, left, kSmiShift / 2); - __ Mul(result, result, result); - } else if (result.Is(left) && !left.Is(right)) { - // Registers result and left alias, right is distinct: untag left into - // result, and then multiply by right, giving a tagged result. - __ SmiUntag(result, left); - __ Mul(result, result, right); - } else { - DCHECK(!left.Is(result)); - // Registers result and right alias, left is distinct, or all registers - // are distinct: untag right into result, and then multiply by left, - // giving a tagged result. - __ SmiUntag(result, right); - __ Mul(result, left, result); - } - } -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = ToRegister(instr->result()); - __ Mov(result, 0); - - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!result.is(cp)) { - __ Mov(cp, 0); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(x0, result); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD: public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LNumberTagD* instr_; - }; - - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); - - DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2); - } else { - __ B(deferred->entry()); - } - - __ Bind(deferred->exit()); - __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoDeferredNumberTagU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2) { - Label slow, convert_and_store; - Register src = ToRegister32(value); - Register dst = ToRegister(instr->result()); - Register scratch1 = ToRegister(temp1); - - if (FLAG_inline_new) { - Register scratch2 = ToRegister(temp2); - __ AllocateHeapNumber(dst, &slow, scratch1, scratch2); - __ B(&convert_and_store); - } - - // Slow case: call the runtime system to do the number allocation. - __ Bind(&slow); - // TODO(3095996): Put a valid pointer value in the stack slot where the result - // register is stored, as this register is in the pointer map, but contains an - // integer value. - __ Mov(dst, 0); - { - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!dst.is(cp)) { - __ Mov(cp, 0); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(x0, dst); - } - - // Convert number to floating point and store in the newly allocated heap - // number. - __ Bind(&convert_and_store); - DoubleRegister dbl_scratch = double_scratch(); - __ Ucvtf(dbl_scratch, src); - __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU: public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { - codegen()->DoDeferredNumberTagU(instr_, - instr_->value(), - instr_->temp1(), - instr_->temp2()); - } - virtual LInstruction* instr() { return instr_; } - private: - LNumberTagU* instr_; - }; - - Register value = ToRegister32(instr->value()); - Register result = ToRegister(instr->result()); - - DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); - __ Cmp(value, Smi::kMaxValue); - __ B(hi, deferred->entry()); - __ SmiTag(result, value.X()); - __ Bind(deferred->exit()); -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - Register input = ToRegister(instr->value()); - Register scratch = ToRegister(instr->temp()); - DoubleRegister result = ToDoubleRegister(instr->result()); - bool can_convert_undefined_to_nan = instr->truncating(); - - Label done, load_smi; - - // Work out what untag mode we're working with. - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - __ JumpIfSmi(input, &load_smi); - - Label convert_undefined; - - // Heap number map check. - if (can_convert_undefined_to_nan) { - __ JumpIfNotHeapNumber(input, &convert_undefined); - } else { - DeoptimizeIfNotHeapNumber(input, instr); - } - - // Load heap number. - __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); - if (instr->hydrogen()->deoptimize_on_minus_zero()) { - DeoptimizeIfMinusZero(result, instr, DeoptimizeReason::kMinusZero); - } - __ B(&done); - - if (can_convert_undefined_to_nan) { - __ Bind(&convert_undefined); - DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, - DeoptimizeReason::kNotAHeapNumberUndefined); - - __ LoadRoot(scratch, Heap::kNanValueRootIndex); - __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); - __ B(&done); - } - - } else { - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - // Fall through to load_smi. - } - - // Smi to double register conversion. - __ Bind(&load_smi); - __ SmiUntagToDouble(result, input); - - __ Bind(&done); -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) { - __ PushPreamble(instr->argc(), kPointerSize); -} - - -void LCodeGen::DoPushArguments(LPushArguments* instr) { - MacroAssembler::PushPopQueue args(masm()); - - for (int i = 0; i < instr->ArgumentCount(); ++i) { - LOperand* arg = instr->argument(i); - if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) { - Abort(kDoPushArgumentNotImplementedForDoubleType); - return; - } - args.Queue(ToRegister(arg)); - } - - // The preamble was done by LPreparePushArguments. - args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE); - - RecordPushedArgumentsDelta(instr->ArgumentCount()); -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in x0. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ Push(x0); - __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - - if (info()->saves_caller_doubles()) { - RestoreCallerDoubles(); - } - - if (NeedsEagerFrame()) { - Register stack_pointer = masm()->StackPointer(); - __ Mov(stack_pointer, fp); - __ Pop(fp, lr); - } - - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - __ Drop(parameter_count + 1); - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register parameter_count = ToRegister(instr->parameter_count()); - __ DropBySMI(parameter_count); - } - __ Ret(); -} - - -MemOperand LCodeGen::BuildSeqStringOperand(Register string, - Register temp, - LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToInteger32(LConstantOperand::cast(index)); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldMemOperand(string, SeqString::kHeaderSize + offset); - } - - __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag); - if (encoding == String::ONE_BYTE_ENCODING) { - return MemOperand(temp, ToRegister32(index), SXTW); - } else { - STATIC_ASSERT(kUC16Size == 2); - return MemOperand(temp, ToRegister32(index), SXTW, 1); - } -} - - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - Register temp = ToRegister(instr->temp()); - - if (FLAG_debug_code) { - // Even though this lithium instruction comes with a temp register, we - // can't use it here because we want to use "AtStart" constraints on the - // inputs and the debug code here needs a scratch register. - UseScratchRegisterScope temps(masm()); - Register dbg_temp = temps.AcquireX(); - - __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset)); - __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset)); - - __ And(dbg_temp, dbg_temp, - Operand(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(eq, kUnexpectedStringType); - } - - MemOperand operand = - BuildSeqStringOperand(string, temp, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ Ldrb(result, operand); - } else { - __ Ldrh(result, operand); - } -} - - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register value = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (FLAG_debug_code) { - DCHECK(ToRegister(instr->context()).is(cp)); - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp, - encoding_mask); - } - MemOperand operand = - BuildSeqStringOperand(string, temp, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ Strb(value, operand); - } else { - __ Strh(value, operand); - } -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - Register output = ToRegister(instr->result()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - DeoptimizeIfNegative(input.W(), instr, DeoptimizeReason::kOverflow); - } - __ SmiTag(output, input); -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Label done, untag; - - if (instr->needs_check()) { - DeoptimizeIfNotSmi(input, instr, DeoptimizeReason::kNotASmi); - } - - __ Bind(&untag); - __ SmiUntag(result, input); - __ Bind(&done); -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - LOperand* right_op = instr->right(); - Register left = ToRegister32(instr->left()); - Register result = ToRegister32(instr->result()); - - if (right_op->IsRegister()) { - Register right = ToRegister32(instr->right()); - switch (instr->op()) { - case Token::ROR: __ Ror(result, left, right); break; - case Token::SAR: __ Asr(result, left, right); break; - case Token::SHL: __ Lsl(result, left, right); break; - case Token::SHR: - __ Lsr(result, left, right); - if (instr->can_deopt()) { - // If `left >>> right` >= 0x80000000, the result is not representable - // in a signed 32-bit smi. - DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue); - } - break; - default: UNREACHABLE(); - } - } else { - DCHECK(right_op->IsConstantOperand()); - int shift_count = JSShiftAmountFromLConstant(right_op); - if (shift_count == 0) { - if ((instr->op() == Token::SHR) && instr->can_deopt()) { - DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue); - } - __ Mov(result, left, kDiscardForSameWReg); - } else { - switch (instr->op()) { - case Token::ROR: __ Ror(result, left, shift_count); break; - case Token::SAR: __ Asr(result, left, shift_count); break; - case Token::SHL: __ Lsl(result, left, shift_count); break; - case Token::SHR: __ Lsr(result, left, shift_count); break; - default: UNREACHABLE(); - } - } - } -} - - -void LCodeGen::DoShiftS(LShiftS* instr) { - LOperand* right_op = instr->right(); - Register left = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); - - if (right_op->IsRegister()) { - Register right = ToRegister(instr->right()); - - // JavaScript shifts only look at the bottom 5 bits of the 'right' operand. - // Since we're handling smis in X registers, we have to extract these bits - // explicitly. - __ Ubfx(result, right, kSmiShift, 5); - - switch (instr->op()) { - case Token::ROR: { - // This is the only case that needs a scratch register. To keep things - // simple for the other cases, borrow a MacroAssembler scratch register. - UseScratchRegisterScope temps(masm()); - Register temp = temps.AcquireW(); - __ SmiUntag(temp, left); - __ Ror(result.W(), temp.W(), result.W()); - __ SmiTag(result); - break; - } - case Token::SAR: - __ Asr(result, left, result); - __ Bic(result, result, kSmiShiftMask); - break; - case Token::SHL: - __ Lsl(result, left, result); - break; - case Token::SHR: - __ Lsr(result, left, result); - __ Bic(result, result, kSmiShiftMask); - if (instr->can_deopt()) { - // If `left >>> right` >= 0x80000000, the result is not representable - // in a signed 32-bit smi. - DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue); - } - break; - default: UNREACHABLE(); - } - } else { - DCHECK(right_op->IsConstantOperand()); - int shift_count = JSShiftAmountFromLConstant(right_op); - if (shift_count == 0) { - if ((instr->op() == Token::SHR) && instr->can_deopt()) { - DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue); - } - __ Mov(result, left); - } else { - switch (instr->op()) { - case Token::ROR: - __ SmiUntag(result, left); - __ Ror(result.W(), result.W(), shift_count); - __ SmiTag(result); - break; - case Token::SAR: - __ Asr(result, left, shift_count); - __ Bic(result, result, kSmiShiftMask); - break; - case Token::SHL: - __ Lsl(result, left, shift_count); - break; - case Token::SHR: - __ Lsr(result, left, shift_count); - __ Bic(result, result, kSmiShiftMask); - break; - default: UNREACHABLE(); - } - } - } -} - - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { - __ Debug("LDebugBreak", 0, BREAK); -} - - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - Register scratch1 = x5; - Register scratch2 = x6; - DCHECK(instr->IsMarkedAsCall()); - - // TODO(all): if Mov could handle object in new space then it could be used - // here. - __ LoadHeapObject(scratch1, instr->hydrogen()->declarations()); - __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags())); - __ Push(scratch1, scratch2); - __ LoadHeapObject(scratch1, instr->hydrogen()->feedback_vector()); - __ Push(scratch1); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck: public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); - __ B(hs, &done); - - PredictableCodeSizeScope predictable(masm_, - Assembler::kCallSizeWithRelocation); - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(cp)); - CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); - __ Bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new(zone()) DeferredStackCheck(this, instr); - __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); - __ B(lo, deferred_stack_check->entry()); - - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ Bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - Register temp = ToRegister(instr->temp()); - __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag); - __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); -} - - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - Register scratch = ToRegister(instr->temp()); - MemOperand target = ContextMemOperand(context, instr->slot_index()); - - Label skip_assignment; - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ Ldr(scratch, target); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, - DeoptimizeReason::kHole); - } else { - __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); - } - } - - __ Str(value, target); - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - __ RecordWriteContextSlot(context, static_cast(target.offset()), value, - scratch, GetLinkRegisterState(), kSaveFPRegs, - EMIT_REMEMBERED_SET, check_needed); - } - __ Bind(&skip_assignment); -} - - -void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { - Register ext_ptr = ToRegister(instr->elements()); - Register key = no_reg; - Register scratch; - ElementsKind elements_kind = instr->elements_kind(); - - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - DCHECK(instr->temp() == NULL); - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xf0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - scratch = ToRegister(instr->temp()); - } - - MemOperand dst = - PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, - key_is_constant, constant_key, - elements_kind, - instr->base_offset()); - - if (elements_kind == FLOAT32_ELEMENTS) { - DoubleRegister value = ToDoubleRegister(instr->value()); - DoubleRegister dbl_scratch = double_scratch(); - __ Fcvt(dbl_scratch.S(), value); - __ Str(dbl_scratch.S(), dst); - } else if (elements_kind == FLOAT64_ELEMENTS) { - DoubleRegister value = ToDoubleRegister(instr->value()); - __ Str(value, dst); - } else { - Register value = ToRegister(instr->value()); - - switch (elements_kind) { - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - case INT8_ELEMENTS: - __ Strb(value, dst); - break; - case INT16_ELEMENTS: - case UINT16_ELEMENTS: - __ Strh(value, dst); - break; - case INT32_ELEMENTS: - case UINT32_ELEMENTS: - __ Str(value.W(), dst); - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) { - Register elements = ToRegister(instr->elements()); - DoubleRegister value = ToDoubleRegister(instr->value()); - MemOperand mem_op; - - if (instr->key()->IsConstantOperand()) { - int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xf0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - int offset = instr->base_offset() + constant_key * kDoubleSize; - mem_op = MemOperand(elements, offset); - } else { - Register store_base = ToRegister(instr->temp()); - Register key = ToRegister(instr->key()); - bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind(), - instr->hydrogen()->representation(), - instr->base_offset()); - } - - if (instr->NeedsCanonicalization()) { - __ CanonicalizeNaN(double_scratch(), value); - __ Str(double_scratch(), mem_op); - } else { - __ Str(value, mem_op); - } -} - - -void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register scratch = no_reg; - Register store_base = no_reg; - Register key = no_reg; - MemOperand mem_op; - - if (!instr->key()->IsConstantOperand() || - instr->hydrogen()->NeedsWriteBarrier()) { - scratch = ToRegister(instr->temp()); - } - - Representation representation = instr->hydrogen()->value()->representation(); - if (instr->key()->IsConstantOperand()) { - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - int offset = instr->base_offset() + - ToInteger32(const_operand) * kPointerSize; - store_base = elements; - if (representation.IsInteger32()) { - DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); - DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); - STATIC_ASSERT(kSmiTag == 0); - mem_op = UntagSmiMemOperand(store_base, offset); - } else { - mem_op = MemOperand(store_base, offset); - } - } else { - store_base = scratch; - key = ToRegister(instr->key()); - bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - - mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind(), - representation, instr->base_offset()); - } - - __ Store(value, mem_op, representation); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - DCHECK(representation.IsTagged()); - // This assignment may cause element_addr to alias store_base. - Register element_addr = scratch; - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand()); - __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(), - kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed, - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = x0; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ B(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ Cmp(ToRegister(current_capacity), Operand(constant_key)); - __ B(le, deferred->entry()); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ Cmp(ToRegister(key), Operand(constant_capacity)); - __ B(ge, deferred->entry()); - } else { - __ Cmp(ToRegister(key), ToRegister(current_capacity)); - __ B(ge, deferred->entry()); - } - - __ Mov(result, ToRegister(instr->elements())); - - __ Bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = x0; - __ Mov(result, 0); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - __ Move(result, ToRegister(instr->object())); - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key)))); - } else { - __ Mov(x3, ToRegister(key)); - __ SmiTag(x3); - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - DeoptimizeIfSmi(result, instr, DeoptimizeReason::kSmi); -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - Representation representation = instr->representation(); - - Register object = ToRegister(instr->object()); - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - DCHECK(!instr->hydrogen()->has_transition()); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - Register value = ToRegister(instr->value()); - __ Store(value, MemOperand(object, offset), representation); - return; - } - - __ AssertNotSmi(object); - - if (!FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!instr->hydrogen()->has_transition()); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - VRegister value = ToDoubleRegister(instr->value()); - __ Str(value, FieldMemOperand(object, offset)); - return; - } - - DCHECK(!representation.IsSmi() || - !instr->value()->IsConstantOperand() || - IsInteger32Constant(LConstantOperand::cast(instr->value()))); - - if (instr->hydrogen()->has_transition()) { - Handle transition = instr->hydrogen()->transition_map(); - AddDeprecationDependency(transition); - // Store the new map value. - Register new_map_value = ToRegister(instr->temp0()); - __ Mov(new_map_value, Operand(transition)); - __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset)); - if (instr->hydrogen()->NeedsWriteBarrierForMap()) { - // Update the write barrier for the map field. - __ RecordWriteForMap(object, - new_map_value, - ToRegister(instr->temp1()), - GetLinkRegisterState(), - kSaveFPRegs); - } - } - - // Do the store. - Register destination; - if (access.IsInobject()) { - destination = object; - } else { - Register temp0 = ToRegister(instr->temp0()); - __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset)); - destination = temp0; - } - - if (FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - VRegister value = ToDoubleRegister(instr->value()); - __ Str(value, FieldMemOperand(object, offset)); - } else if (representation.IsSmi() && - instr->hydrogen()->value()->representation().IsInteger32()) { - DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); -#ifdef DEBUG - Register temp0 = ToRegister(instr->temp0()); - __ Ldr(temp0, FieldMemOperand(destination, offset)); - __ AssertSmi(temp0); - // If destination aliased temp0, restore it to the address calculated - // earlier. - if (destination.Is(temp0)) { - DCHECK(!access.IsInobject()); - __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); - } -#endif - STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); - STATIC_ASSERT(kSmiTag == 0); - Register value = ToRegister(instr->value()); - __ Store(value, UntagSmiFieldMemOperand(destination, offset), - Representation::Integer32()); - } else { - Register value = ToRegister(instr->value()); - __ Store(value, FieldMemOperand(destination, offset), representation); - } - if (instr->hydrogen()->NeedsWriteBarrier()) { - Register value = ToRegister(instr->value()); - __ RecordWriteField(destination, - offset, - value, // Clobbered. - ToRegister(instr->temp1()), // Clobbered. - GetLinkRegisterState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - instr->hydrogen()->SmiCheckForWriteBarrier(), - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).Is(x1)); - DCHECK(ToRegister(instr->right()).Is(x0)); - StringAddStub stub(isolate(), - instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt: public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new(zone()) DeferredStringCharCodeAt(this, instr); - - StringCharLoadGenerator::Generate(masm(), - ToRegister(instr->string()), - ToRegister32(instr->index()), - ToRegister(instr->result()), - deferred->entry()); - __ Bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Mov(result, 0); - - PushSafepointRegistersScope scope(this); - __ Push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - Register index = ToRegister(instr->index()); - __ SmiTagAndPush(index); - - CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, - instr->context()); - __ AssertSmi(x0); - __ SmiUntag(x0); - __ StoreToSafepointRegisterSlot(x0, result); -} - - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode: public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new(zone()) DeferredStringCharFromCode(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister32(instr->char_code()); - Register result = ToRegister(instr->result()); - - __ Cmp(char_code, String::kMaxOneByteCharCode); - __ B(hi, deferred->entry()); - __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag); - __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2)); - __ CompareRoot(result, Heap::kUndefinedValueRootIndex); - __ B(eq, deferred->entry()); - __ Bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Mov(result, 0); - - PushSafepointRegistersScope scope(this); - __ SmiTagAndPush(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(x0, result); -} - - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(x1)); - DCHECK(ToRegister(instr->right()).is(x0)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ CompareRoot(x0, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq); -} - - -void LCodeGen::DoSubI(LSubI* instr) { - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - Register result = ToRegister32(instr->result()); - Register left = ToRegister32(instr->left()); - Operand right = ToShiftedRightOperand32(instr->right(), instr); - - if (can_overflow) { - __ Subs(result, left, right); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ Sub(result, left, right); - } -} - - -void LCodeGen::DoSubS(LSubS* instr) { - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - Register result = ToRegister(instr->result()); - Register left = ToRegister(instr->left()); - Operand right = ToOperand(instr->right()); - if (can_overflow) { - __ Subs(result, left, right); - DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); - } else { - __ Sub(result, left, right); - } -} - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2) { - Register input = ToRegister(value); - Register scratch1 = ToRegister(temp1); - DoubleRegister dbl_scratch1 = double_scratch(); - - Label done; - - if (instr->truncating()) { - UseScratchRegisterScope temps(masm()); - Register output = ToRegister(instr->result()); - Register input_map = temps.AcquireX(); - Register input_instance_type = input_map; - Label truncate; - __ CompareObjectType(input, input_map, input_instance_type, - HEAP_NUMBER_TYPE); - __ B(eq, &truncate); - __ Cmp(input_instance_type, ODDBALL_TYPE); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball); - __ Bind(&truncate); - __ TruncateHeapNumberToI(output, input); - } else { - Register output = ToRegister32(instr->result()); - DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); - - DeoptimizeIfNotHeapNumber(input, instr); - - // A heap number: load value and convert to int32 using non-truncating - // function. If the result is out of range, branch to deoptimize. - __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); - __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Cmp(output, 0); - __ B(ne, &done); - __ Fmov(scratch1, dbl_scratch1); - DeoptimizeIfNegative(scratch1, instr, DeoptimizeReason::kMinusZero); - } - } - __ Bind(&done); -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { - codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(), - instr_->temp2()); - } - - virtual LInstruction* instr() { return instr_; } - private: - LTaggedToI* instr_; - }; - - Register input = ToRegister(instr->value()); - Register output = ToRegister(instr->result()); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiUntag(output, input); - } else { - DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); - - __ JumpIfNotSmi(input, deferred->entry()); - __ SmiUntag(output, input); - __ Bind(deferred->exit()); - } -} - - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); -} - - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); - - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound); - __ Bind(&no_memento_found); -} - - -void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - __ TruncateDoubleToI(result, input); - if (instr->tag_result()) { - __ SmiTag(result, result); - } -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->value()).is(x3)); - DCHECK(ToRegister(instr->result()).is(x0)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ Mov(x0, Immediate(isolate()->factory()->number_string())); - __ B(&end); - __ Bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ Bind(&end); -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Handle type_name = instr->type_literal(); - Label* true_label = instr->TrueLabel(chunk_); - Label* false_label = instr->FalseLabel(chunk_); - Register value = ToRegister(instr->value()); - - Factory* factory = isolate()->factory(); - if (String::Equals(type_name, factory->number_string())) { - __ JumpIfSmi(value, true_label); - - int true_block = instr->TrueDestination(chunk_); - int false_block = instr->FalseDestination(chunk_); - int next_block = GetNextEmittedBlock(); - - if (true_block == false_block) { - EmitGoto(true_block); - } else if (true_block == next_block) { - __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block)); - } else { - __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block)); - if (false_block != next_block) { - __ B(chunk_->GetAssemblyLabel(false_block)); - } - } - - } else if (String::Equals(type_name, factory->string_string())) { - DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); - Register map = ToRegister(instr->temp1()); - Register scratch = ToRegister(instr->temp2()); - - __ JumpIfSmi(value, false_label); - __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE); - EmitBranch(instr, lt); - - } else if (String::Equals(type_name, factory->symbol_string())) { - DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); - Register map = ToRegister(instr->temp1()); - Register scratch = ToRegister(instr->temp2()); - - __ JumpIfSmi(value, false_label); - __ CompareObjectType(value, map, scratch, SYMBOL_TYPE); - EmitBranch(instr, eq); - - } else if (String::Equals(type_name, factory->boolean_string())) { - __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label); - __ CompareRoot(value, Heap::kFalseValueRootIndex); - EmitBranch(instr, eq); - - } else if (String::Equals(type_name, factory->undefined_string())) { - DCHECK(instr->temp1() != NULL); - Register scratch = ToRegister(instr->temp1()); - - __ JumpIfRoot(value, Heap::kNullValueRootIndex, false_label); - __ JumpIfSmi(value, false_label); - // Check for undetectable objects and jump to the true branch in this case. - __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); - __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable); - - } else if (String::Equals(type_name, factory->function_string())) { - DCHECK(instr->temp1() != NULL); - Register scratch = ToRegister(instr->temp1()); - - __ JumpIfSmi(value, false_label); - __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); - __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ And(scratch, scratch, - (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); - EmitCompareAndBranch(instr, eq, scratch, 1 << Map::kIsCallable); - - } else if (String::Equals(type_name, factory->object_string())) { - DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); - Register map = ToRegister(instr->temp1()); - Register scratch = ToRegister(instr->temp2()); - - __ JumpIfSmi(value, false_label); - __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ JumpIfObjectType(value, map, scratch, FIRST_JS_RECEIVER_TYPE, - false_label, lt); - // Check for callable or undetectable objects => false. - __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); - EmitTestAndBranch(instr, eq, scratch, - (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); - - } else { - __ B(false_label); - } -} - - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); -} - - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - Register map = ToRegister(instr->map()); - Register temp = ToRegister(instr->temp()); - __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); - __ Cmp(map, temp); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); -} - - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - - // If the receiver is null or undefined, we have to pass the global object as - // a receiver to normal functions. Values have to be passed unchanged to - // builtins and strict-mode functions. - Label global_object, done, copy_receiver; - - if (!instr->hydrogen()->known_function()) { - __ Ldr(result, FieldMemOperand(function, - JSFunction::kSharedFunctionInfoOffset)); - - // CompilerHints is an int32 field. See objects.h. - __ Ldr(result.W(), - FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset)); - - // Do not transform the receiver to object for strict mode functions. - __ Tbnz(result, SharedFunctionInfo::IsStrictBit::kShift, ©_receiver); - - // Do not transform the receiver to object for builtins. - __ Tbnz(result, SharedFunctionInfo::IsNativeBit::kShift, ©_receiver); - } - - // Normal function. Replace undefined or null with global receiver. - __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); - __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); - - // Deoptimize if the receiver is not a JS object. - DeoptimizeIfSmi(receiver, instr, DeoptimizeReason::kSmi); - __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE); - __ B(ge, ©_receiver); - Deoptimize(instr, DeoptimizeReason::kNotAJavaScriptObject); - - __ Bind(&global_object); - __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); - __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); - __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); - __ B(&done); - - __ Bind(©_receiver); - __ Mov(result, receiver); - __ Bind(&done); -} - - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, - Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ Push(object); - __ Push(index); - __ Mov(cp, 0); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters( - instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(x0, result); -} - - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, - LLoadFieldByIndex* instr, - Register result, - Register object, - Register index) - : LDeferredCode(codegen), - instr_(instr), - result_(result), - object_(object), - index_(index) { - } - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register result_; - Register object_; - Register index_; - }; - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - Register result = ToRegister(instr->result()); - - __ AssertSmi(index); - - DeferredLoadMutableDouble* deferred; - deferred = new(zone()) DeferredLoadMutableDouble( - this, instr, result, object, index); - - Label out_of_object, done; - - __ TestAndBranchIfAnySet( - index, reinterpret_cast(Smi::FromInt(1)), deferred->entry()); - __ Mov(index, Operand(index, ASR, 1)); - - __ Cmp(index, Smi::kZero); - __ B(lt, &out_of_object); - - STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); - __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); - __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize)); - - __ B(&done); - - __ Bind(&out_of_object); - __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - // Index is equal to negated out of object property index plus 1. - __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); - __ Ldr(result, FieldMemOperand(result, - FixedArray::kHeaderSize - kPointerSize)); - __ Bind(deferred->exit()); - __ Bind(&done); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.h b/src/crankshaft/arm64/lithium-codegen-arm64.h deleted file mode 100644 index 9da7744d9c..0000000000 --- a/src/crankshaft/arm64/lithium-codegen-arm64.h +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_ -#define V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_ - -#include "src/crankshaft/arm64/lithium-arm64.h" - -#include "src/ast/scopes.h" -#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class SafepointGenerator; -class BranchGenerator; - -class LCodeGen: public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple), - pushed_arguments_(0) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - // Simple accessors. - Scope* scope() const { return scope_; } - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - LinkRegisterStatus GetLinkRegisterState() const { - return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; - } - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - // Support for converting LOperands to assembler types. - Register ToRegister(LOperand* op) const; - Register ToRegister32(LOperand* op) const; - Operand ToOperand(LOperand* op); - Operand ToOperand32(LOperand* op); - enum StackMode { kMustUseFramePointer, kCanUseStackPointer }; - MemOperand ToMemOperand(LOperand* op, - StackMode stack_mode = kCanUseStackPointer) const; - Handle ToHandle(LConstantOperand* op) const; - - template - Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info); - - int JSShiftAmountFromLConstant(LOperand* constant) { - return ToInteger32(LConstantOperand::cast(constant)) & 0x1f; - } - - // TODO(jbramley): Examine these helpers and check that they make sense. - // IsInteger32Constant returns true for smi constants, for example. - bool IsInteger32Constant(LConstantOperand* op) const; - bool IsSmi(LConstantOperand* op) const; - - int32_t ToInteger32(LConstantOperand* op) const; - Smi* ToSmi(LConstantOperand* op) const; - double ToDouble(LConstantOperand* op) const; - DoubleRegister ToDoubleRegister(LOperand* op) const; - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - // Return a double scratch register which can be used locally - // when generating code for a lithium instruction. - DoubleRegister double_scratch() { return crankshaft_fp_scratch; } - - // Deferred code support. - void DoDeferredNumberTagD(LNumberTagD* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredMathAbsTagged(LMathAbsTagged* instr, - Label* exit, - Label* allocation_entry); - - void DoDeferredNumberTagU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2); - void DoDeferredTaggedToI(LTaggedToI* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2); - void DoDeferredAllocate(LAllocate* instr); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, - Register object, - Register index); - - static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block); - void DoGap(LGap* instr); - - // Generic version of EmitBranch. It contains some code to avoid emitting a - // branch on the next emitted basic block where we could just fall-through. - // You shouldn't use that directly but rather consider one of the helper like - // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch... - template - void EmitBranchGeneric(InstrType instr, - const BranchGenerator& branch); - - template - void EmitBranch(InstrType instr, Condition condition); - - template - void EmitCompareAndBranch(InstrType instr, - Condition condition, - const Register& lhs, - const Operand& rhs); - - template - void EmitTestAndBranch(InstrType instr, - Condition condition, - const Register& value, - uint64_t mask); - - template - void EmitBranchIfNonZeroNumber(InstrType instr, const VRegister& value, - const VRegister& scratch); - - template - void EmitBranchIfHeapNumber(InstrType instr, - const Register& value); - - template - void EmitBranchIfRoot(InstrType instr, - const Register& value, - Heap::RootListIndex index); - - // Emits optimized code to deep-copy the contents of statically known object - // graphs (e.g. object literal boilerplate). Expects a pointer to the - // allocated destination object in the result register, and a pointer to the - // source object in the source register. - void EmitDeepCopy(Handle object, - Register result, - Register source, - Register scratch, - int* offset, - AllocationSiteMode mode); - - template - void EmitVectorLoadICRegisters(T* instr); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, Register temp1, Label* is_not_string, - SmiCheck check_needed); - - MemOperand BuildSeqStringOperand(Register string, - Register temp, - LOperand* index, - String::Encoding encoding); - void DeoptimizeBranch(LInstruction* instr, DeoptimizeReason deopt_reason, - BranchType branch_type, Register reg = NoReg, - int bit = -1, - Deoptimizer::BailoutType* override_bailout_type = NULL); - void Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType* override_bailout_type = NULL); - void DeoptimizeIf(Condition cond, LInstruction* instr, - DeoptimizeReason deopt_reason); - void DeoptimizeIfZero(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason); - void DeoptimizeIfNotZero(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason); - void DeoptimizeIfNegative(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason); - void DeoptimizeIfSmi(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason); - void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, - DeoptimizeReason deopt_reason); - void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, - LInstruction* instr, DeoptimizeReason deopt_reason); - void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, - LInstruction* instr, DeoptimizeReason deopt_reason); - void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr); - void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, - DeoptimizeReason deopt_reason); - void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, - DeoptimizeReason deopt_reason); - void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, - DeoptimizeReason deopt_reason); - - MemOperand PrepareKeyedExternalArrayOperand(Register key, - Register base, - Register scratch, - bool key_is_smi, - bool key_is_constant, - int constant_key, - ElementsKind elements_kind, - int base_offset); - MemOperand PrepareKeyedArrayOperand(Register base, - Register elements, - Register key, - bool key_is_tagged, - ElementsKind elements_kind, - Representation representation, - int base_offset); - - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - - void AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - void SaveCallerDoubles(); - void RestoreCallerDoubles(); - - // Code generation steps. Returns true if code generation should continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS - }; - - void CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr); - - void CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode); - - void CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, - int num_arguments, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, num_arguments, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void LoadContextFromDeferred(LOperand* context); - void CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in x1. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode); - - void EnsureSpaceForLazyDeopt(int space_needed) override; - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - // Builder that keeps track of safepoints in the code. The table itself is - // emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - // The number of arguments pushed onto the stack, either by this block or by a - // predecessor. - int pushed_arguments_; - - void RecordPushedArgumentsDelta(int delta) { - pushed_arguments_ += delta; - DCHECK(pushed_arguments_ >= 0); - } - - int old_position_; - - class PushSafepointRegistersScope BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen); - - ~PushSafepointRegistersScope(); - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode: public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() { } - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; - int instruction_index_; -}; - - -// This is the abstract class used by EmitBranchGeneric. -// It is used to emit code for conditional branching. The Emit() function -// emits code to branch when the condition holds and EmitInverted() emits -// the branch when the inverted condition is verified. -// -// For actual examples of condition see the concrete implementation in -// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch). -class BranchGenerator BASE_EMBEDDED { - public: - explicit BranchGenerator(LCodeGen* codegen) - : codegen_(codegen) { } - - virtual ~BranchGenerator() { } - - virtual void Emit(Label* label) const = 0; - virtual void EmitInverted(Label* label) const = 0; - - protected: - MacroAssembler* masm() const { return codegen_->masm(); } - - LCodeGen* codegen_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_ diff --git a/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc b/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc deleted file mode 100644 index 37db921b62..0000000000 --- a/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h" -#include "src/crankshaft/arm64/delayed-masm-arm64-inl.h" -#include "src/crankshaft/arm64/lithium-codegen-arm64.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -#define __ ACCESS_MASM((&masm_)) - -DelayedGapMasm::DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm) - : DelayedMasm(owner, masm, root) { - // We use the root register as an extra scratch register. - // The root register has two advantages: - // - It is not in crankshaft allocatable registers list, so it can't - // interfere with the allocatable registers. - // - We don't need to push it on the stack, as we can reload it with its - // value once we have finish. -} - -DelayedGapMasm::~DelayedGapMasm() {} - -void DelayedGapMasm::EndDelayedUse() { - DelayedMasm::EndDelayedUse(); - if (scratch_register_used()) { - DCHECK(ScratchRegister().Is(root)); - DCHECK(!pending()); - InitializeRootRegister(); - reset_scratch_register_used(); - } -} - - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()), - root_index_(0), in_cycle_(false), saved_destination_(NULL) { -} - - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(moves_.is_empty()); - DCHECK(!masm_.pending()); - - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - root_index_ = i; // Any cycle is found when we reach this move again. - PerformMove(i); - if (in_cycle_) RestoreValue(); - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - - if (!move.IsEliminated()) { - DCHECK(move.source()->IsConstantOperand()); - EmitMove(i); - } - } - - __ EndDelayedUse(); - - moves_.Rewind(0); -} - - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); - } - Verify(); -} - - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. - LMoveOperands& current_move = moves_[index]; - - DCHECK(!current_move.IsPending()); - DCHECK(!current_move.IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved in a stack allocated local. Multiple moves can - // be pending because this function is recursive. - DCHECK(current_move.source() != NULL); // Otherwise it will look eliminated. - LOperand* destination = current_move.destination(); - current_move.set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - PerformMove(i); - // If there is a blocking, pending move it must be moves_[root_index_] - // and all other moves with the same source as moves_[root_index_] are - // sucessfully executed (because they are cycle-free) by this loop. - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - current_move.set_destination(destination); - - // The move may be blocked on a pending move, which must be the starting move. - // In this case, we have a cycle, and we save the source of this move to - // a scratch register to break it. - LMoveOperands other_move = moves_[root_index_]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - BreakCycle(index); - return; - } - - // This move is no longer blocked. - EmitMove(index); -} - - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - - -void LGapResolver::BreakCycle(int index) { - DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); - DCHECK(!in_cycle_); - - // We save in a register the source of that move and we remember its - // destination. Then we mark this move as resolved so the cycle is - // broken and we can perform the other moves. - in_cycle_ = true; - LOperand* source = moves_[index].source(); - saved_destination_ = moves_[index].destination(); - - if (source->IsRegister()) { - AcquireSavedValueRegister(); - __ Mov(SavedValueRegister(), cgen_->ToRegister(source)); - } else if (source->IsStackSlot()) { - AcquireSavedValueRegister(); - __ Load(SavedValueRegister(), cgen_->ToMemOperand(source)); - } else if (source->IsDoubleRegister()) { - __ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source)); - } else if (source->IsDoubleStackSlot()) { - __ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source)); - } else { - UNREACHABLE(); - } - - // Mark this move as resolved. - // This move will be actually performed by moving the saved value to this - // move's destination in LGapResolver::RestoreValue(). - moves_[index].Eliminate(); -} - - -void LGapResolver::RestoreValue() { - DCHECK(in_cycle_); - DCHECK(saved_destination_ != NULL); - - if (saved_destination_->IsRegister()) { - __ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister()); - ReleaseSavedValueRegister(); - } else if (saved_destination_->IsStackSlot()) { - __ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_)); - ReleaseSavedValueRegister(); - } else if (saved_destination_->IsDoubleRegister()) { - __ Fmov(cgen_->ToDoubleRegister(saved_destination_), - SavedFPValueRegister()); - } else if (saved_destination_->IsDoubleStackSlot()) { - __ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_)); - } else { - UNREACHABLE(); - } - - in_cycle_ = false; - saved_destination_ = NULL; -} - - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - - if (source->IsRegister()) { - Register source_register = cgen_->ToRegister(source); - if (destination->IsRegister()) { - __ Mov(cgen_->ToRegister(destination), source_register); - } else { - DCHECK(destination->IsStackSlot()); - __ Store(source_register, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsRegister()) { - __ Load(cgen_->ToRegister(destination), source_operand); - } else { - DCHECK(destination->IsStackSlot()); - EmitStackSlotMove(index); - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - if (cgen_->IsSmi(constant_source)) { - __ Mov(dst, cgen_->ToSmi(constant_source)); - } else if (cgen_->IsInteger32Constant(constant_source)) { - __ Mov(dst, cgen_->ToInteger32(constant_source)); - } else { - __ LoadObject(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - DoubleRegister result = cgen_->ToDoubleRegister(destination); - __ Fmov(result, cgen_->ToDouble(constant_source)); - } else { - DCHECK(destination->IsStackSlot()); - DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. - if (cgen_->IsSmi(constant_source)) { - Smi* smi = cgen_->ToSmi(constant_source); - __ StoreConstant(reinterpret_cast(smi), - cgen_->ToMemOperand(destination)); - } else if (cgen_->IsInteger32Constant(constant_source)) { - __ StoreConstant(cgen_->ToInteger32(constant_source), - cgen_->ToMemOperand(destination)); - } else { - Handle handle = cgen_->ToHandle(constant_source); - AllowDeferredHandleDereference smi_object_check; - if (handle->IsSmi()) { - Object* obj = *handle; - DCHECK(!obj->IsHeapObject()); - __ StoreConstant(reinterpret_cast(obj), - cgen_->ToMemOperand(destination)); - } else { - AcquireSavedValueRegister(); - __ LoadObject(SavedValueRegister(), handle); - __ Store(SavedValueRegister(), cgen_->ToMemOperand(destination)); - ReleaseSavedValueRegister(); - } - } - } - - } else if (source->IsDoubleRegister()) { - DoubleRegister src = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - __ Fmov(cgen_->ToDoubleRegister(destination), src); - } else { - DCHECK(destination->IsDoubleStackSlot()); - __ Store(src, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsDoubleStackSlot()) { - MemOperand src = cgen_->ToMemOperand(source); - if (destination->IsDoubleRegister()) { - __ Load(cgen_->ToDoubleRegister(destination), src); - } else { - DCHECK(destination->IsDoubleStackSlot()); - EmitStackSlotMove(index); - } - - } else { - UNREACHABLE(); - } - - // The move has been emitted, we can eliminate it. - moves_[index].Eliminate(); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/arm64/lithium-gap-resolver-arm64.h b/src/crankshaft/arm64/lithium-gap-resolver-arm64.h deleted file mode 100644 index 28f0990fe2..0000000000 --- a/src/crankshaft/arm64/lithium-gap-resolver-arm64.h +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_ -#define V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_ - -#include "src/crankshaft/arm64/delayed-masm-arm64.h" -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class DelayedGapMasm : public DelayedMasm { - public: - DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm); - ~DelayedGapMasm(); - - void EndDelayedUse(); -}; - - -class LGapResolver BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // If a cycle is found in the series of moves, save the blocking value to - // a scratch register. The cycle must be found by hitting the root of the - // depth-first search. - void BreakCycle(int index); - - // After a cycle has been resolved, restore the value from the scratch - // register to its proper destination. - void RestoreValue(); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Emit a move from one stack slot to another. - void EmitStackSlotMove(int index) { - masm_.StackSlotMove(moves_[index].source(), moves_[index].destination()); - } - - // Verify the move list before performing moves. - void Verify(); - - // Registers used to solve cycles. - const Register& SavedValueRegister() { - DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode( - masm_.ScratchRegister().code())); - return masm_.ScratchRegister(); - } - // The scratch register is used to break cycles and to store constant. - // These two methods switch from one mode to the other. - void AcquireSavedValueRegister() { masm_.AcquireScratchRegister(); } - void ReleaseSavedValueRegister() { masm_.ReleaseScratchRegister(); } - const VRegister& SavedFPValueRegister() { - // We use the Crankshaft floating-point scratch register to break a cycle - // involving double values as the MacroAssembler will not need it for the - // operations performed by the gap resolver. - DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode( - crankshaft_fp_scratch.code())); - return crankshaft_fp_scratch; - } - - LCodeGen* cgen_; - DelayedGapMasm masm_; - - // List of moves not yet resolved. - ZoneList moves_; - - int root_index_; - bool in_cycle_; - LOperand* saved_destination_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_ diff --git a/src/crankshaft/compilation-phase.cc b/src/crankshaft/compilation-phase.cc deleted file mode 100644 index 11300701b0..0000000000 --- a/src/crankshaft/compilation-phase.cc +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/compilation-phase.h" - -#include "src/crankshaft/hydrogen.h" -#include "src/isolate.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info) - : name_(name), info_(info), zone_(info->isolate()->allocator(), ZONE_NAME) { - if (FLAG_hydrogen_stats) { - info_zone_start_allocation_size_ = info->zone()->allocation_size(); - timer_.Start(); - } -} - -CompilationPhase::~CompilationPhase() { - if (FLAG_hydrogen_stats) { - size_t size = zone()->allocation_size(); - size += info_->zone()->allocation_size() - info_zone_start_allocation_size_; - isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size); - } -} - -bool CompilationPhase::ShouldProduceTraceOutput() const { - // Trace if the appropriate trace flag is set and the phase name's first - // character is in the FLAG_trace_phase command line parameter. - AllowHandleDereference allow_deref; - bool tracing_on = - info()->IsStub() - ? FLAG_trace_hydrogen_stubs - : (FLAG_trace_hydrogen && - info()->shared_info()->PassesFilter(FLAG_trace_hydrogen_filter)); - return (tracing_on && - base::OS::StrChr(const_cast(FLAG_trace_phase), name_[0]) != - NULL); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/compilation-phase.h b/src/crankshaft/compilation-phase.h deleted file mode 100644 index 8d6468d4dc..0000000000 --- a/src/crankshaft/compilation-phase.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_COMPILATION_PHASE_H_ -#define V8_CRANKSHAFT_COMPILATION_PHASE_H_ - -#include "src/allocation.h" -#include "src/base/platform/elapsed-timer.h" -#include "src/compilation-info.h" -#include "src/zone/zone.h" - -namespace v8 { -namespace internal { - -class CompilationPhase BASE_EMBEDDED { - public: - CompilationPhase(const char* name, CompilationInfo* info); - ~CompilationPhase(); - - protected: - bool ShouldProduceTraceOutput() const; - - const char* name() const { return name_; } - CompilationInfo* info() const { return info_; } - Isolate* isolate() const { return info()->isolate(); } - Zone* zone() { return &zone_; } - - private: - const char* name_; - CompilationInfo* info_; - Zone zone_; - size_t info_zone_start_allocation_size_; - base::ElapsedTimer timer_; - - DISALLOW_COPY_AND_ASSIGN(CompilationPhase); -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_COMPILATION_PHASE_H_ diff --git a/src/crankshaft/hydrogen-alias-analysis.h b/src/crankshaft/hydrogen-alias-analysis.h deleted file mode 100644 index d06aabc76e..0000000000 --- a/src/crankshaft/hydrogen-alias-analysis.h +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_ -#define V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - -enum HAliasing { - kMustAlias, - kMayAlias, - kNoAlias -}; - - -// Defines the interface to alias analysis for the rest of the compiler. -// A simple implementation can use only local reasoning, but a more powerful -// analysis might employ points-to analysis. -class HAliasAnalyzer : public ZoneObject { - public: - // Simple alias analysis distinguishes allocations, parameters, - // and constants using only local reasoning. - HAliasing Query(HValue* a, HValue* b) { - // The same SSA value always references the same object. - if (a == b) return kMustAlias; - - if (a->IsAllocate() || a->IsInnerAllocatedObject()) { - // Two non-identical allocations can never be aliases. - if (b->IsAllocate()) return kNoAlias; - if (b->IsInnerAllocatedObject()) return kNoAlias; - // An allocation can never alias a parameter or a constant. - if (b->IsParameter()) return kNoAlias; - if (b->IsConstant()) return kNoAlias; - } - if (b->IsAllocate() || b->IsInnerAllocatedObject()) { - // An allocation can never alias a parameter or a constant. - if (a->IsParameter()) return kNoAlias; - if (a->IsConstant()) return kNoAlias; - } - - // Constant objects can be distinguished statically. - if (a->IsConstant() && b->IsConstant()) { - return a->Equals(b) ? kMustAlias : kNoAlias; - } - return kMayAlias; - } - - // Checks whether the objects referred to by the given instructions may - // ever be aliases. Note that this is more conservative than checking - // {Query(a, b) == kMayAlias}, since this method considers kMustAlias - // objects to also be may-aliasing. - inline bool MayAlias(HValue* a, HValue* b) { - return Query(a, b) != kNoAlias; - } - - inline bool MustAlias(HValue* a, HValue* b) { - return Query(a, b) == kMustAlias; - } - - inline bool NoAlias(HValue* a, HValue* b) { - return Query(a, b) == kNoAlias; - } -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_ diff --git a/src/crankshaft/hydrogen-bce.cc b/src/crankshaft/hydrogen-bce.cc deleted file mode 100644 index 333fafbf13..0000000000 --- a/src/crankshaft/hydrogen-bce.cc +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-bce.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -// We try to "factor up" HBoundsCheck instructions towards the root of the -// dominator tree. -// For now we handle checks where the index is like "exp + int32value". -// If in the dominator tree we check "exp + v1" and later (dominated) -// "exp + v2", if v2 <= v1 we can safely remove the second check, and if -// v2 > v1 we can use v2 in the 1st check and again remove the second. -// To do so we keep a dictionary of all checks where the key if the pair -// "exp, length". -// The class BoundsCheckKey represents this key. -class BoundsCheckKey : public ZoneObject { - public: - HValue* IndexBase() const { return index_base_; } - HValue* Length() const { return length_; } - - uint32_t Hash() { - return static_cast(index_base_->Hashcode() ^ length_->Hashcode()); - } - - static BoundsCheckKey* Create(Zone* zone, - HBoundsCheck* check, - int32_t* offset) { - if (!check->index()->representation().IsSmiOrInteger32()) return NULL; - - HValue* index_base = NULL; - HConstant* constant = NULL; - bool is_sub = false; - - if (check->index()->IsAdd()) { - HAdd* index = HAdd::cast(check->index()); - if (index->left()->IsConstant()) { - constant = HConstant::cast(index->left()); - index_base = index->right(); - } else if (index->right()->IsConstant()) { - constant = HConstant::cast(index->right()); - index_base = index->left(); - } - } else if (check->index()->IsSub()) { - HSub* index = HSub::cast(check->index()); - is_sub = true; - if (index->right()->IsConstant()) { - constant = HConstant::cast(index->right()); - index_base = index->left(); - } - } else if (check->index()->IsConstant()) { - index_base = check->block()->graph()->GetConstant0(); - constant = HConstant::cast(check->index()); - } - - if (constant != NULL && constant->HasInteger32Value() && - constant->Integer32Value() != kMinInt) { - *offset = is_sub ? - constant->Integer32Value() - : constant->Integer32Value(); - } else { - *offset = 0; - index_base = check->index(); - } - - return new(zone) BoundsCheckKey(index_base, check->length()); - } - - private: - BoundsCheckKey(HValue* index_base, HValue* length) - : index_base_(index_base), - length_(length) { } - - HValue* index_base_; - HValue* length_; - - DISALLOW_COPY_AND_ASSIGN(BoundsCheckKey); -}; - - -// Data about each HBoundsCheck that can be eliminated or moved. -// It is the "value" in the dictionary indexed by "base-index, length" -// (the key is BoundsCheckKey). -// We scan the code with a dominator tree traversal. -// Traversing the dominator tree we keep a stack (implemented as a singly -// linked list) of "data" for each basic block that contains a relevant check -// with the same key (the dictionary holds the head of the list). -// We also keep all the "data" created for a given basic block in a list, and -// use it to "clean up" the dictionary when backtracking in the dominator tree -// traversal. -// Doing this each dictionary entry always directly points to the check that -// is dominating the code being examined now. -// We also track the current "offset" of the index expression and use it to -// decide if any check is already "covered" (so it can be removed) or not. -class BoundsCheckBbData: public ZoneObject { - public: - BoundsCheckKey* Key() const { return key_; } - int32_t LowerOffset() const { return lower_offset_; } - int32_t UpperOffset() const { return upper_offset_; } - HBasicBlock* BasicBlock() const { return basic_block_; } - HBoundsCheck* LowerCheck() const { return lower_check_; } - HBoundsCheck* UpperCheck() const { return upper_check_; } - BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; } - BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; } - - bool OffsetIsCovered(int32_t offset) const { - return offset >= LowerOffset() && offset <= UpperOffset(); - } - - bool HasSingleCheck() { return lower_check_ == upper_check_; } - - void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) { - BoundsCheckBbData* data = FatherInDominatorTree(); - while (data != NULL && data->UpperCheck() == check) { - DCHECK(data->upper_offset_ < offset); - data->upper_offset_ = offset; - data = data->FatherInDominatorTree(); - } - } - - void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) { - BoundsCheckBbData* data = FatherInDominatorTree(); - while (data != NULL && data->LowerCheck() == check) { - DCHECK(data->lower_offset_ > offset); - data->lower_offset_ = offset; - data = data->FatherInDominatorTree(); - } - } - - // The goal of this method is to modify either upper_offset_ or - // lower_offset_ so that also new_offset is covered (the covered - // range grows). - // - // The precondition is that new_check follows UpperCheck() and - // LowerCheck() in the same basic block, and that new_offset is not - // covered (otherwise we could simply remove new_check). - // - // If HasSingleCheck() is true then new_check is added as "second check" - // (either upper or lower; note that HasSingleCheck() becomes false). - // Otherwise one of the current checks is modified so that it also covers - // new_offset, and new_check is removed. - void CoverCheck(HBoundsCheck* new_check, - int32_t new_offset) { - DCHECK(new_check->index()->representation().IsSmiOrInteger32()); - bool keep_new_check = false; - - if (new_offset > upper_offset_) { - upper_offset_ = new_offset; - if (HasSingleCheck()) { - keep_new_check = true; - upper_check_ = new_check; - } else { - TightenCheck(upper_check_, new_check, new_offset); - UpdateUpperOffsets(upper_check_, upper_offset_); - } - } else if (new_offset < lower_offset_) { - lower_offset_ = new_offset; - if (HasSingleCheck()) { - keep_new_check = true; - lower_check_ = new_check; - } else { - TightenCheck(lower_check_, new_check, new_offset); - UpdateLowerOffsets(lower_check_, lower_offset_); - } - } else { - // Should never have called CoverCheck() in this case. - UNREACHABLE(); - } - - if (!keep_new_check) { - if (FLAG_trace_bce) { - base::OS::Print("Eliminating check #%d after tightening\n", - new_check->id()); - } - new_check->block()->graph()->isolate()->counters()-> - bounds_checks_eliminated()->Increment(); - new_check->DeleteAndReplaceWith(new_check->ActualValue()); - } else { - HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_ - : lower_check_; - if (FLAG_trace_bce) { - base::OS::Print("Moving second check #%d after first check #%d\n", - new_check->id(), first_check->id()); - } - // The length is guaranteed to be live at first_check. - DCHECK(new_check->length() == first_check->length()); - HInstruction* old_position = new_check->next(); - new_check->Unlink(); - new_check->InsertAfter(first_check); - MoveIndexIfNecessary(new_check->index(), new_check, old_position); - } - } - - BoundsCheckBbData(BoundsCheckKey* key, - int32_t lower_offset, - int32_t upper_offset, - HBasicBlock* bb, - HBoundsCheck* lower_check, - HBoundsCheck* upper_check, - BoundsCheckBbData* next_in_bb, - BoundsCheckBbData* father_in_dt) - : key_(key), - lower_offset_(lower_offset), - upper_offset_(upper_offset), - basic_block_(bb), - lower_check_(lower_check), - upper_check_(upper_check), - next_in_bb_(next_in_bb), - father_in_dt_(father_in_dt) { } - - private: - BoundsCheckKey* key_; - int32_t lower_offset_; - int32_t upper_offset_; - HBasicBlock* basic_block_; - HBoundsCheck* lower_check_; - HBoundsCheck* upper_check_; - BoundsCheckBbData* next_in_bb_; - BoundsCheckBbData* father_in_dt_; - - void MoveIndexIfNecessary(HValue* index_raw, - HBoundsCheck* insert_before, - HInstruction* end_of_scan_range) { - // index_raw can be HAdd(index_base, offset), HSub(index_base, offset), - // HConstant(offset) or index_base directly. - // In the latter case, no need to move anything. - if (index_raw->IsAdd() || index_raw->IsSub()) { - HArithmeticBinaryOperation* index = - HArithmeticBinaryOperation::cast(index_raw); - HValue* left_input = index->left(); - HValue* right_input = index->right(); - HValue* context = index->context(); - bool must_move_index = false; - bool must_move_left_input = false; - bool must_move_right_input = false; - bool must_move_context = false; - for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) { - if (cursor == left_input) must_move_left_input = true; - if (cursor == right_input) must_move_right_input = true; - if (cursor == context) must_move_context = true; - if (cursor == index) must_move_index = true; - if (cursor->previous() == NULL) { - cursor = cursor->block()->dominator()->end(); - } else { - cursor = cursor->previous(); - } - } - if (must_move_index) { - index->Unlink(); - index->InsertBefore(insert_before); - } - // The BCE algorithm only selects mergeable bounds checks that share - // the same "index_base", so we'll only ever have to move constants. - if (must_move_left_input) { - HConstant::cast(left_input)->Unlink(); - HConstant::cast(left_input)->InsertBefore(index); - } - if (must_move_right_input) { - HConstant::cast(right_input)->Unlink(); - HConstant::cast(right_input)->InsertBefore(index); - } - if (must_move_context) { - // Contexts are always constants. - HConstant::cast(context)->Unlink(); - HConstant::cast(context)->InsertBefore(index); - } - } else if (index_raw->IsConstant()) { - HConstant* index = HConstant::cast(index_raw); - bool must_move = false; - for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) { - if (cursor == index) must_move = true; - if (cursor->previous() == NULL) { - cursor = cursor->block()->dominator()->end(); - } else { - cursor = cursor->previous(); - } - } - if (must_move) { - index->Unlink(); - index->InsertBefore(insert_before); - } - } - } - - void TightenCheck(HBoundsCheck* original_check, - HBoundsCheck* tighter_check, - int32_t new_offset) { - DCHECK(original_check->length() == tighter_check->length()); - MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check); - original_check->ReplaceAllUsesWith(original_check->index()); - original_check->SetOperandAt(0, tighter_check->index()); - if (FLAG_trace_bce) { - base::OS::Print("Tightened check #%d with offset %d from #%d\n", - original_check->id(), new_offset, tighter_check->id()); - } - } - - DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData); -}; - - -static bool BoundsCheckKeyMatch(void* key1, void* key2) { - BoundsCheckKey* k1 = static_cast(key1); - BoundsCheckKey* k2 = static_cast(key2); - return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length(); -} - -BoundsCheckTable::BoundsCheckTable(Zone* zone) - : CustomMatcherZoneHashMap(BoundsCheckKeyMatch, - ZoneHashMap::kDefaultHashMapCapacity, - ZoneAllocationPolicy(zone)) {} - -BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key, - Zone* zone) { - return reinterpret_cast( - &(CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(), - ZoneAllocationPolicy(zone)) - ->value)); -} - - -void BoundsCheckTable::Insert(BoundsCheckKey* key, - BoundsCheckBbData* data, - Zone* zone) { - CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(), - ZoneAllocationPolicy(zone)) - ->value = data; -} - - -void BoundsCheckTable::Delete(BoundsCheckKey* key) { - Remove(key, key->Hash()); -} - - -class HBoundsCheckEliminationState { - public: - HBasicBlock* block_; - BoundsCheckBbData* bb_data_list_; - int index_; -}; - - -// Eliminates checks in bb and recursively in the dominated blocks. -// Also replace the results of check instructions with the original value, if -// the result is used. This is safe now, since we don't do code motion after -// this point. It enables better register allocation since the value produced -// by check instructions is really a copy of the original value. -void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks( - HBasicBlock* entry) { - // Allocate the stack. - HBoundsCheckEliminationState* stack = - zone()->NewArray(graph()->blocks()->length()); - - // Explicitly push the entry block. - stack[0].block_ = entry; - stack[0].bb_data_list_ = PreProcessBlock(entry); - stack[0].index_ = 0; - int stack_depth = 1; - - // Implement depth-first traversal with a stack. - while (stack_depth > 0) { - int current = stack_depth - 1; - HBoundsCheckEliminationState* state = &stack[current]; - const ZoneList* children = state->block_->dominated_blocks(); - - if (state->index_ < children->length()) { - // Recursively visit children blocks. - HBasicBlock* child = children->at(state->index_++); - int next = stack_depth++; - stack[next].block_ = child; - stack[next].bb_data_list_ = PreProcessBlock(child); - stack[next].index_ = 0; - } else { - // Finished with all children; post process the block. - PostProcessBlock(state->block_, state->bb_data_list_); - stack_depth--; - } - } -} - - -BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock( - HBasicBlock* bb) { - BoundsCheckBbData* bb_data_list = NULL; - - for (HInstructionIterator it(bb); !it.Done(); it.Advance()) { - HInstruction* i = it.Current(); - if (!i->IsBoundsCheck()) continue; - - HBoundsCheck* check = HBoundsCheck::cast(i); - int32_t offset = 0; - BoundsCheckKey* key = - BoundsCheckKey::Create(zone(), check, &offset); - if (key == NULL) continue; - BoundsCheckBbData** data_p = table_.LookupOrInsert(key, zone()); - BoundsCheckBbData* data = *data_p; - if (data == NULL) { - bb_data_list = new(zone()) BoundsCheckBbData(key, - offset, - offset, - bb, - check, - check, - bb_data_list, - NULL); - *data_p = bb_data_list; - if (FLAG_trace_bce) { - base::OS::Print("Fresh bounds check data for block #%d: [%d]\n", - bb->block_id(), offset); - } - } else if (data->OffsetIsCovered(offset)) { - bb->graph()->isolate()->counters()-> - bounds_checks_eliminated()->Increment(); - if (FLAG_trace_bce) { - base::OS::Print("Eliminating bounds check #%d, offset %d is covered\n", - check->id(), offset); - } - check->DeleteAndReplaceWith(check->ActualValue()); - } else if (data->BasicBlock() == bb) { - // TODO(jkummerow): I think the following logic would be preferable: - // if (data->Basicblock() == bb || - // graph()->use_optimistic_licm() || - // bb->IsLoopSuccessorDominator()) { - // data->CoverCheck(check, offset) - // } else { - // /* add pristine BCBbData like in (data == NULL) case above */ - // } - // Even better would be: distinguish between read-only dominator-imposed - // knowledge and modifiable upper/lower checks. - // What happens currently is that the first bounds check in a dominated - // block will stay around while any further checks are hoisted out, - // which doesn't make sense. Investigate/fix this in a future CL. - data->CoverCheck(check, offset); - } else if (graph()->use_optimistic_licm() || - bb->IsLoopSuccessorDominator()) { - int32_t new_lower_offset = offset < data->LowerOffset() - ? offset - : data->LowerOffset(); - int32_t new_upper_offset = offset > data->UpperOffset() - ? offset - : data->UpperOffset(); - bb_data_list = new(zone()) BoundsCheckBbData(key, - new_lower_offset, - new_upper_offset, - bb, - data->LowerCheck(), - data->UpperCheck(), - bb_data_list, - data); - if (FLAG_trace_bce) { - base::OS::Print("Updated bounds check data for block #%d: [%d - %d]\n", - bb->block_id(), new_lower_offset, new_upper_offset); - } - table_.Insert(key, bb_data_list, zone()); - } - } - - return bb_data_list; -} - - -void HBoundsCheckEliminationPhase::PostProcessBlock( - HBasicBlock* block, BoundsCheckBbData* data) { - while (data != NULL) { - if (data->FatherInDominatorTree()) { - table_.Insert(data->Key(), data->FatherInDominatorTree(), zone()); - } else { - table_.Delete(data->Key()); - } - data = data->NextInBasicBlock(); - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-bce.h b/src/crankshaft/hydrogen-bce.h deleted file mode 100644 index 237fb953f2..0000000000 --- a/src/crankshaft/hydrogen-bce.h +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_BCE_H_ -#define V8_CRANKSHAFT_HYDROGEN_BCE_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class BoundsCheckBbData; -class BoundsCheckKey; -class BoundsCheckTable : private CustomMatcherZoneHashMap { - public: - explicit BoundsCheckTable(Zone* zone); - - INLINE(BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone)); - INLINE(void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone)); - INLINE(void Delete(BoundsCheckKey* key)); - - private: - DISALLOW_COPY_AND_ASSIGN(BoundsCheckTable); -}; - - -class HBoundsCheckEliminationPhase : public HPhase { - public: - explicit HBoundsCheckEliminationPhase(HGraph* graph) - : HPhase("H_Bounds checks elimination", graph), table_(zone()) { } - - void Run() { - EliminateRedundantBoundsChecks(graph()->entry_block()); - } - - private: - void EliminateRedundantBoundsChecks(HBasicBlock* bb); - BoundsCheckBbData* PreProcessBlock(HBasicBlock* bb); - void PostProcessBlock(HBasicBlock* bb, BoundsCheckBbData* data); - - BoundsCheckTable table_; - - DISALLOW_COPY_AND_ASSIGN(HBoundsCheckEliminationPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_ diff --git a/src/crankshaft/hydrogen-canonicalize.cc b/src/crankshaft/hydrogen-canonicalize.cc deleted file mode 100644 index 20e771763f..0000000000 --- a/src/crankshaft/hydrogen-canonicalize.cc +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-canonicalize.h" - -#include "src/counters.h" -#include "src/crankshaft/hydrogen-redundant-phi.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -void HCanonicalizePhase::Run() { - const ZoneList* blocks(graph()->blocks()); - // Before removing no-op instructions, save their semantic value. - // We must be careful not to set the flag unnecessarily, because GVN - // cannot identify two instructions when their flag value differs. - for (int i = 0; i < blocks->length(); ++i) { - for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (instr->IsArithmeticBinaryOperation()) { - if (instr->representation().IsInteger32()) { - if (instr->HasAtLeastOneUseWithFlagAndNoneWithout( - HInstruction::kTruncatingToInt32)) { - instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32); - } - } else if (instr->representation().IsSmi()) { - if (instr->HasAtLeastOneUseWithFlagAndNoneWithout( - HInstruction::kTruncatingToSmi)) { - instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi); - } else if (instr->HasAtLeastOneUseWithFlagAndNoneWithout( - HInstruction::kTruncatingToInt32)) { - // Avoid redundant minus zero check - instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32); - } - } - } - } - } - - // Perform actual Canonicalization pass. - HRedundantPhiEliminationPhase redundant_phi_eliminator(graph()); - for (int i = 0; i < blocks->length(); ++i) { - // Eliminate redundant phis in the block first; changes to their inputs - // might have made them redundant, and eliminating them creates more - // opportunities for constant folding and strength reduction. - redundant_phi_eliminator.ProcessBlock(blocks->at(i)); - // Now canonicalize each instruction. - for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - HValue* value = instr->Canonicalize(); - if (value != instr) instr->DeleteAndReplaceWith(value); - } - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-canonicalize.h b/src/crankshaft/hydrogen-canonicalize.h deleted file mode 100644 index a17557ac8b..0000000000 --- a/src/crankshaft/hydrogen-canonicalize.h +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_ -#define V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HCanonicalizePhase : public HPhase { - public: - explicit HCanonicalizePhase(HGraph* graph) - : HPhase("H_Canonicalize", graph) { } - - void Run(); - - private: - DISALLOW_COPY_AND_ASSIGN(HCanonicalizePhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_ diff --git a/src/crankshaft/hydrogen-check-elimination.cc b/src/crankshaft/hydrogen-check-elimination.cc deleted file mode 100644 index aecd607ed4..0000000000 --- a/src/crankshaft/hydrogen-check-elimination.cc +++ /dev/null @@ -1,913 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-check-elimination.h" - -#include "src/crankshaft/hydrogen-alias-analysis.h" -#include "src/crankshaft/hydrogen-flow-engine.h" -#include "src/objects-inl.h" - -#define GLOBAL 1 - -// Only collect stats in debug mode. -#if DEBUG -#define INC_STAT(x) phase_->x++ -#else -#define INC_STAT(x) -#endif - -// For code de-uglification. -#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x - -namespace v8 { -namespace internal { - -typedef const UniqueSet* MapSet; - -struct HCheckTableEntry { - enum State { - // We have seen a map check (i.e. an HCheckMaps) for these maps, so we can - // use this information to eliminate further map checks, elements kind - // transitions, etc. - CHECKED, - // Same as CHECKED, but we also know that these maps are stable. - CHECKED_STABLE, - // These maps are stable, but not checked (i.e. we learned this via field - // type tracking or from a constant, or they were initially CHECKED_STABLE, - // but became UNCHECKED_STABLE because of an instruction that changes maps - // or elements kind), and we need a stability check for them in order to use - // this information for check elimination (which turns them back to - // CHECKED_STABLE). - UNCHECKED_STABLE - }; - - static const char* State2String(State state) { - switch (state) { - case CHECKED: return "checked"; - case CHECKED_STABLE: return "checked stable"; - case UNCHECKED_STABLE: return "unchecked stable"; - } - UNREACHABLE(); - } - - static State StateMerge(State state1, State state2) { - if (state1 == state2) return state1; - if ((state1 == CHECKED && state2 == CHECKED_STABLE) || - (state2 == CHECKED && state1 == CHECKED_STABLE)) { - return CHECKED; - } - DCHECK((state1 == CHECKED_STABLE && state2 == UNCHECKED_STABLE) || - (state2 == CHECKED_STABLE && state1 == UNCHECKED_STABLE)); - return UNCHECKED_STABLE; - } - - HValue* object_; // The object being approximated. NULL => invalid entry. - HInstruction* check_; // The last check instruction. - MapSet maps_; // The set of known maps for the object. - State state_; // The state of this entry. -}; - - -// The main data structure used during check elimination, which stores a -// set of known maps for each object. -class HCheckTable : public ZoneObject { - public: - static const int kMaxTrackedObjects = 16; - - explicit HCheckTable(HCheckEliminationPhase* phase) - : phase_(phase), - cursor_(0), - size_(0) { - } - - // The main processing of instructions. - HCheckTable* Process(HInstruction* instr, Zone* zone) { - switch (instr->opcode()) { - case HValue::kCheckMaps: { - ReduceCheckMaps(HCheckMaps::cast(instr)); - break; - } - case HValue::kLoadNamedField: { - ReduceLoadNamedField(HLoadNamedField::cast(instr)); - break; - } - case HValue::kStoreNamedField: { - ReduceStoreNamedField(HStoreNamedField::cast(instr)); - break; - } - case HValue::kCompareMap: { - ReduceCompareMap(HCompareMap::cast(instr)); - break; - } - case HValue::kCompareObjectEqAndBranch: { - ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch::cast(instr)); - break; - } - case HValue::kIsStringAndBranch: { - ReduceIsStringAndBranch(HIsStringAndBranch::cast(instr)); - break; - } - case HValue::kTransitionElementsKind: { - ReduceTransitionElementsKind( - HTransitionElementsKind::cast(instr)); - break; - } - case HValue::kCheckHeapObject: { - ReduceCheckHeapObject(HCheckHeapObject::cast(instr)); - break; - } - case HValue::kCheckInstanceType: { - ReduceCheckInstanceType(HCheckInstanceType::cast(instr)); - break; - } - default: { - // If the instruction changes maps uncontrollably, drop everything. - if (instr->CheckChangesFlag(kOsrEntries)) { - Kill(); - break; - } - if (instr->CheckChangesFlag(kElementsKind) || - instr->CheckChangesFlag(kMaps)) { - KillUnstableEntries(); - } - } - // Improvements possible: - // - eliminate redundant HCheckSmi instructions - // - track which values have been HCheckHeapObject'd - } - - return this; - } - - // Support for global analysis with HFlowEngine: Merge given state with - // the other incoming state. - static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block, - HCheckTable* pred_state, HBasicBlock* pred_block, - Zone* zone) { - if (pred_state == NULL || pred_block->IsUnreachable()) { - return succ_state; - } - if (succ_state == NULL) { - return pred_state->Copy(succ_block, pred_block, zone); - } else { - return succ_state->Merge(succ_block, pred_state, pred_block, zone); - } - } - - // Support for global analysis with HFlowEngine: Given state merged with all - // the other incoming states, prepare it for use. - static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block, - Zone* zone) { - if (state == NULL) { - block->MarkUnreachable(); - } else if (block->IsUnreachable()) { - state = NULL; - } - if (FLAG_trace_check_elimination) { - PrintF("Processing B%d, checkmaps-table:\n", block->block_id()); - Print(state); - } - return state; - } - - private: - // Copy state to successor block. - HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) { - HCheckTable* copy = new(zone) HCheckTable(phase_); - for (int i = 0; i < size_; i++) { - HCheckTableEntry* old_entry = &entries_[i]; - DCHECK(old_entry->maps_->size() > 0); - HCheckTableEntry* new_entry = ©->entries_[i]; - new_entry->object_ = old_entry->object_; - new_entry->maps_ = old_entry->maps_; - new_entry->state_ = old_entry->state_; - // Keep the check if the existing check's block dominates the successor. - if (old_entry->check_ != NULL && - old_entry->check_->block()->Dominates(succ)) { - new_entry->check_ = old_entry->check_; - } else { - // Leave it NULL till we meet a new check instruction for this object - // in the control flow. - new_entry->check_ = NULL; - } - } - copy->cursor_ = cursor_; - copy->size_ = size_; - - // Create entries for succ block's phis. - if (!succ->IsLoopHeader() && succ->phis()->length() > 0) { - int pred_index = succ->PredecessorIndexOf(from_block); - for (int phi_index = 0; - phi_index < succ->phis()->length(); - ++phi_index) { - HPhi* phi = succ->phis()->at(phi_index); - HValue* phi_operand = phi->OperandAt(pred_index); - - HCheckTableEntry* pred_entry = copy->Find(phi_operand); - if (pred_entry != NULL) { - // Create an entry for a phi in the table. - copy->Insert(phi, NULL, pred_entry->maps_, pred_entry->state_); - } - } - } - - // Branch-sensitive analysis for certain comparisons may add more facts - // to the state for the successor on the true branch. - bool learned = false; - if (succ->predecessors()->length() == 1) { - HControlInstruction* end = succ->predecessors()->at(0)->end(); - bool is_true_branch = end->SuccessorAt(0) == succ; - if (end->IsCompareMap()) { - HCompareMap* cmp = HCompareMap::cast(end); - HValue* object = cmp->value()->ActualValue(); - HCheckTableEntry* entry = copy->Find(object); - if (is_true_branch) { - HCheckTableEntry::State state = cmp->map_is_stable() - ? HCheckTableEntry::CHECKED_STABLE - : HCheckTableEntry::CHECKED; - // Learn on the true branch of if(CompareMap(x)). - if (entry == NULL) { - copy->Insert(object, cmp, cmp->map(), state); - } else { - entry->maps_ = new(zone) UniqueSet(cmp->map(), zone); - entry->check_ = cmp; - entry->state_ = state; - } - } else { - // Learn on the false branch of if(CompareMap(x)). - if (entry != NULL) { - EnsureChecked(entry, object, cmp); - UniqueSet* maps = entry->maps_->Copy(zone); - maps->Remove(cmp->map()); - entry->maps_ = maps; - DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_); - } - } - learned = true; - } else if (is_true_branch && end->IsCompareObjectEqAndBranch()) { - // Learn on the true branch of if(CmpObjectEq(x, y)). - HCompareObjectEqAndBranch* cmp = - HCompareObjectEqAndBranch::cast(end); - HValue* left = cmp->left()->ActualValue(); - HValue* right = cmp->right()->ActualValue(); - HCheckTableEntry* le = copy->Find(left); - HCheckTableEntry* re = copy->Find(right); - if (le == NULL) { - if (re != NULL) { - copy->Insert(left, NULL, re->maps_, re->state_); - } - } else if (re == NULL) { - copy->Insert(right, NULL, le->maps_, le->state_); - } else { - EnsureChecked(le, cmp->left(), cmp); - EnsureChecked(re, cmp->right(), cmp); - le->maps_ = re->maps_ = le->maps_->Intersect(re->maps_, zone); - le->state_ = re->state_ = HCheckTableEntry::StateMerge( - le->state_, re->state_); - DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, le->state_); - DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, re->state_); - } - learned = true; - } else if (end->IsIsStringAndBranch()) { - HIsStringAndBranch* cmp = HIsStringAndBranch::cast(end); - HValue* object = cmp->value()->ActualValue(); - HCheckTableEntry* entry = copy->Find(object); - if (is_true_branch) { - // Learn on the true branch of if(IsString(x)). - if (entry == NULL) { - copy->Insert(object, NULL, string_maps(), - HCheckTableEntry::CHECKED); - } else { - EnsureChecked(entry, object, cmp); - entry->maps_ = entry->maps_->Intersect(string_maps(), zone); - DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_); - } - } else { - // Learn on the false branch of if(IsString(x)). - if (entry != NULL) { - EnsureChecked(entry, object, cmp); - entry->maps_ = entry->maps_->Subtract(string_maps(), zone); - DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_); - } - } - } - // Learning on false branches requires storing negative facts. - } - - if (FLAG_trace_check_elimination) { - PrintF("B%d checkmaps-table %s from B%d:\n", - succ->block_id(), - learned ? "learned" : "copied", - from_block->block_id()); - Print(copy); - } - - return copy; - } - - // Merge this state with the other incoming state. - HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that, - HBasicBlock* pred_block, Zone* zone) { - if (that->size_ == 0) { - // If the other state is empty, simply reset. - size_ = 0; - cursor_ = 0; - } else { - int pred_index = succ->PredecessorIndexOf(pred_block); - bool compact = false; - for (int i = 0; i < size_; i++) { - HCheckTableEntry* this_entry = &entries_[i]; - HCheckTableEntry* that_entry; - if (this_entry->object_->IsPhi() && - this_entry->object_->block() == succ) { - HPhi* phi = HPhi::cast(this_entry->object_); - HValue* phi_operand = phi->OperandAt(pred_index); - that_entry = that->Find(phi_operand); - - } else { - that_entry = that->Find(this_entry->object_); - } - - if (that_entry == NULL || - (that_entry->state_ == HCheckTableEntry::CHECKED && - this_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) || - (this_entry->state_ == HCheckTableEntry::CHECKED && - that_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE)) { - this_entry->object_ = NULL; - compact = true; - } else { - this_entry->maps_ = - this_entry->maps_->Union(that_entry->maps_, zone); - this_entry->state_ = HCheckTableEntry::StateMerge( - this_entry->state_, that_entry->state_); - if (this_entry->check_ != that_entry->check_) { - this_entry->check_ = NULL; - } - DCHECK(this_entry->maps_->size() > 0); - } - } - if (compact) Compact(); - } - - if (FLAG_trace_check_elimination) { - PrintF("B%d checkmaps-table merged with B%d table:\n", - succ->block_id(), pred_block->block_id()); - Print(this); - } - return this; - } - - void ReduceCheckMaps(HCheckMaps* instr) { - HValue* object = instr->value()->ActualValue(); - HCheckTableEntry* entry = Find(object); - if (entry != NULL) { - // entry found; - HGraph* graph = instr->block()->graph(); - if (entry->maps_->IsSubset(instr->maps())) { - // The first check is more strict; the second is redundant. - if (entry->check_ != NULL) { - DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_); - TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n", - instr->id(), instr->block()->block_id(), entry->check_->id())); - instr->DeleteAndReplaceWith(entry->check_); - INC_STAT(redundant_); - } else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) { - DCHECK_NULL(entry->check_); - TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n", - instr->id(), instr->block()->block_id())); - instr->set_maps(entry->maps_->Copy(graph->zone())); - instr->MarkAsStabilityCheck(); - entry->state_ = HCheckTableEntry::CHECKED_STABLE; - } else if (!instr->IsStabilityCheck()) { - TRACE(("Marking redundant CheckMaps #%d at B%d as dead\n", - instr->id(), instr->block()->block_id())); - // Mark check as dead but leave it in the graph as a checkpoint for - // subsequent checks. - instr->SetFlag(HValue::kIsDead); - entry->check_ = instr; - INC_STAT(removed_); - } - return; - } - MapSet intersection = instr->maps()->Intersect( - entry->maps_, graph->zone()); - if (intersection->size() == 0) { - // Intersection is empty; probably megamorphic. - INC_STAT(empty_); - entry->object_ = NULL; - Compact(); - } else { - // Update set of maps in the entry. - entry->maps_ = intersection; - // Update state of the entry. - if (instr->maps_are_stable() || - entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) { - entry->state_ = HCheckTableEntry::CHECKED_STABLE; - } - if (intersection->size() != instr->maps()->size()) { - // Narrow set of maps in the second check maps instruction. - if (entry->check_ != NULL && - entry->check_->block() == instr->block() && - entry->check_->IsCheckMaps()) { - // There is a check in the same block so replace it with a more - // strict check and eliminate the second check entirely. - HCheckMaps* check = HCheckMaps::cast(entry->check_); - DCHECK(!check->IsStabilityCheck()); - TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(), - check->block()->block_id())); - // Update map set and ensure that the check is alive. - check->set_maps(intersection); - check->ClearFlag(HValue::kIsDead); - TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n", - instr->id(), instr->block()->block_id(), entry->check_->id())); - instr->DeleteAndReplaceWith(entry->check_); - } else { - TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(), - instr->block()->block_id())); - instr->set_maps(intersection); - entry->check_ = instr->IsStabilityCheck() ? NULL : instr; - } - - if (FLAG_trace_check_elimination) { - Print(this); - } - INC_STAT(narrowed_); - } - } - } else { - // No entry; insert a new one. - HCheckTableEntry::State state = instr->maps_are_stable() - ? HCheckTableEntry::CHECKED_STABLE - : HCheckTableEntry::CHECKED; - HCheckMaps* check = instr->IsStabilityCheck() ? NULL : instr; - Insert(object, check, instr->maps(), state); - } - } - - void ReduceCheckInstanceType(HCheckInstanceType* instr) { - HValue* value = instr->value()->ActualValue(); - HCheckTableEntry* entry = Find(value); - if (entry == NULL) { - if (instr->check() == HCheckInstanceType::IS_STRING) { - Insert(value, NULL, string_maps(), HCheckTableEntry::CHECKED); - } - return; - } - UniqueSet* maps = new(zone()) UniqueSet( - entry->maps_->size(), zone()); - for (int i = 0; i < entry->maps_->size(); ++i) { - InstanceType type; - Unique map = entry->maps_->at(i); - { - // This is safe, because maps don't move and their instance type does - // not change. - AllowHandleDereference allow_deref; - type = map.handle()->instance_type(); - } - if (instr->is_interval_check()) { - InstanceType first_type, last_type; - instr->GetCheckInterval(&first_type, &last_type); - if (first_type <= type && type <= last_type) maps->Add(map, zone()); - } else { - uint8_t mask, tag; - instr->GetCheckMaskAndTag(&mask, &tag); - if ((type & mask) == tag) maps->Add(map, zone()); - } - } - if (maps->size() == entry->maps_->size()) { - TRACE(("Removing redundant CheckInstanceType #%d at B%d\n", - instr->id(), instr->block()->block_id())); - EnsureChecked(entry, value, instr); - instr->DeleteAndReplaceWith(value); - INC_STAT(removed_cit_); - } else if (maps->size() != 0) { - entry->maps_ = maps; - if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) { - entry->state_ = HCheckTableEntry::CHECKED_STABLE; - } - } - } - - void ReduceLoadNamedField(HLoadNamedField* instr) { - // Reduce a load of the map field when it is known to be a constant. - if (!instr->access().IsMap()) { - // Check if we introduce field maps here. - MapSet maps = instr->maps(); - if (maps != NULL) { - DCHECK_NE(0, maps->size()); - Insert(instr, NULL, maps, HCheckTableEntry::UNCHECKED_STABLE); - } - return; - } - - HValue* object = instr->object()->ActualValue(); - HCheckTableEntry* entry = Find(object); - if (entry == NULL || entry->maps_->size() != 1) return; // Not a constant. - - EnsureChecked(entry, object, instr); - Unique map = entry->maps_->at(0); - bool map_is_stable = (entry->state_ != HCheckTableEntry::CHECKED); - HConstant* constant = HConstant::CreateAndInsertBefore( - instr->block()->graph()->zone(), map, map_is_stable, instr); - instr->DeleteAndReplaceWith(constant); - INC_STAT(loads_); - } - - void ReduceCheckHeapObject(HCheckHeapObject* instr) { - HValue* value = instr->value()->ActualValue(); - if (Find(value) != NULL) { - // If the object has known maps, it's definitely a heap object. - instr->DeleteAndReplaceWith(value); - INC_STAT(removed_cho_); - } - } - - void ReduceStoreNamedField(HStoreNamedField* instr) { - HValue* object = instr->object()->ActualValue(); - if (instr->has_transition()) { - // This store transitions the object to a new map. - Kill(object); - HConstant* c_transition = HConstant::cast(instr->transition()); - HCheckTableEntry::State state = c_transition->HasStableMapValue() - ? HCheckTableEntry::CHECKED_STABLE - : HCheckTableEntry::CHECKED; - Insert(object, NULL, c_transition->MapValue(), state); - } else if (instr->access().IsMap()) { - // This is a store directly to the map field of the object. - Kill(object); - if (!instr->value()->IsConstant()) return; - HConstant* c_value = HConstant::cast(instr->value()); - HCheckTableEntry::State state = c_value->HasStableMapValue() - ? HCheckTableEntry::CHECKED_STABLE - : HCheckTableEntry::CHECKED; - Insert(object, NULL, c_value->MapValue(), state); - } else { - // If the instruction changes maps, it should be handled above. - CHECK(!instr->CheckChangesFlag(kMaps)); - } - } - - void ReduceCompareMap(HCompareMap* instr) { - HCheckTableEntry* entry = Find(instr->value()->ActualValue()); - if (entry == NULL) return; - - EnsureChecked(entry, instr->value(), instr); - - int succ; - if (entry->maps_->Contains(instr->map())) { - if (entry->maps_->size() != 1) { - TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: " - "ambiguous set of maps\n", instr->id(), instr->value()->id(), - instr->block()->block_id())); - return; - } - succ = 0; - INC_STAT(compares_true_); - } else { - succ = 1; - INC_STAT(compares_false_); - } - - TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n", - instr->id(), instr->value()->id(), instr->block()->block_id(), - succ == 0 ? "true" : "false")); - instr->set_known_successor_index(succ); - - int unreachable_succ = 1 - succ; - instr->block()->MarkSuccEdgeUnreachable(unreachable_succ); - } - - void ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) { - HValue* left = instr->left()->ActualValue(); - HCheckTableEntry* le = Find(left); - if (le == NULL) return; - HValue* right = instr->right()->ActualValue(); - HCheckTableEntry* re = Find(right); - if (re == NULL) return; - - EnsureChecked(le, left, instr); - EnsureChecked(re, right, instr); - - // TODO(bmeurer): Add a predicate here instead of computing the intersection - MapSet intersection = le->maps_->Intersect(re->maps_, zone()); - if (intersection->size() > 0) return; - - TRACE(("Marking redundant CompareObjectEqAndBranch #%d at B%d as false\n", - instr->id(), instr->block()->block_id())); - int succ = 1; - instr->set_known_successor_index(succ); - - int unreachable_succ = 1 - succ; - instr->block()->MarkSuccEdgeUnreachable(unreachable_succ); - } - - void ReduceIsStringAndBranch(HIsStringAndBranch* instr) { - HValue* value = instr->value()->ActualValue(); - HCheckTableEntry* entry = Find(value); - if (entry == NULL) return; - EnsureChecked(entry, value, instr); - int succ; - if (entry->maps_->IsSubset(string_maps())) { - TRACE(("Marking redundant IsStringAndBranch #%d at B%d as true\n", - instr->id(), instr->block()->block_id())); - succ = 0; - } else { - MapSet intersection = entry->maps_->Intersect(string_maps(), zone()); - if (intersection->size() > 0) return; - TRACE(("Marking redundant IsStringAndBranch #%d at B%d as false\n", - instr->id(), instr->block()->block_id())); - succ = 1; - } - instr->set_known_successor_index(succ); - int unreachable_succ = 1 - succ; - instr->block()->MarkSuccEdgeUnreachable(unreachable_succ); - } - - void ReduceTransitionElementsKind(HTransitionElementsKind* instr) { - HValue* object = instr->object()->ActualValue(); - HCheckTableEntry* entry = Find(object); - // Can only learn more about an object that already has a known set of maps. - if (entry == NULL) { - Kill(object); - return; - } - EnsureChecked(entry, object, instr); - if (entry->maps_->Contains(instr->original_map())) { - // If the object has the original map, it will be transitioned. - UniqueSet* maps = entry->maps_->Copy(zone()); - maps->Remove(instr->original_map()); - maps->Add(instr->transitioned_map(), zone()); - HCheckTableEntry::State state = - (entry->state_ == HCheckTableEntry::CHECKED_STABLE && - instr->map_is_stable()) - ? HCheckTableEntry::CHECKED_STABLE - : HCheckTableEntry::CHECKED; - Kill(object); - Insert(object, NULL, maps, state); - } else { - // Object does not have the given map, thus the transition is redundant. - instr->DeleteAndReplaceWith(object); - INC_STAT(transitions_); - } - } - - void EnsureChecked(HCheckTableEntry* entry, - HValue* value, - HInstruction* instr) { - if (entry->state_ != HCheckTableEntry::UNCHECKED_STABLE) return; - HGraph* graph = instr->block()->graph(); - HCheckMaps* check = HCheckMaps::CreateAndInsertBefore( - graph->zone(), value, entry->maps_->Copy(graph->zone()), true, instr); - check->MarkAsStabilityCheck(); - entry->state_ = HCheckTableEntry::CHECKED_STABLE; - entry->check_ = NULL; - } - - // Kill everything in the table. - void Kill() { - size_ = 0; - cursor_ = 0; - } - - // Kill all unstable entries in the table. - void KillUnstableEntries() { - bool compact = false; - for (int i = 0; i < size_; ++i) { - HCheckTableEntry* entry = &entries_[i]; - DCHECK_NOT_NULL(entry->object_); - if (entry->state_ == HCheckTableEntry::CHECKED) { - entry->object_ = NULL; - compact = true; - } else { - // All checked stable entries become unchecked stable. - entry->state_ = HCheckTableEntry::UNCHECKED_STABLE; - entry->check_ = NULL; - } - } - if (compact) Compact(); - } - - // Kill everything in the table that may alias {object}. - void Kill(HValue* object) { - bool compact = false; - for (int i = 0; i < size_; i++) { - HCheckTableEntry* entry = &entries_[i]; - DCHECK_NOT_NULL(entry->object_); - if (phase_->aliasing_->MayAlias(entry->object_, object)) { - entry->object_ = NULL; - compact = true; - } - } - if (compact) Compact(); - DCHECK_NULL(Find(object)); - } - - void Compact() { - // First, compact the array in place. - int max = size_, dest = 0, old_cursor = cursor_; - for (int i = 0; i < max; i++) { - if (entries_[i].object_ != NULL) { - if (dest != i) entries_[dest] = entries_[i]; - dest++; - } else { - if (i < old_cursor) cursor_--; - size_--; - } - } - DCHECK(size_ == dest); - DCHECK(cursor_ <= size_); - - // Preserve the age of the entries by moving the older entries to the end. - if (cursor_ == size_) return; // Cursor already points at end. - if (cursor_ != 0) { - // | L = oldest | R = newest | | - // ^ cursor ^ size ^ MAX - HCheckTableEntry tmp_entries[kMaxTrackedObjects]; - int L = cursor_; - int R = size_ - cursor_; - - MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry)); - MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry)); - MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry)); - } - - cursor_ = size_; // Move cursor to end. - } - - static void Print(HCheckTable* table) { - if (table == NULL) { - PrintF(" unreachable\n"); - return; - } - - for (int i = 0; i < table->size_; i++) { - HCheckTableEntry* entry = &table->entries_[i]; - DCHECK(entry->object_ != NULL); - PrintF(" checkmaps-table @%d: %s #%d ", i, - entry->object_->IsPhi() ? "phi" : "object", entry->object_->id()); - if (entry->check_ != NULL) { - PrintF("check #%d ", entry->check_->id()); - } - MapSet list = entry->maps_; - PrintF("%d %s maps { ", list->size(), - HCheckTableEntry::State2String(entry->state_)); - for (int j = 0; j < list->size(); j++) { - if (j > 0) PrintF(", "); - PrintF("%" V8PRIxPTR, list->at(j).Hashcode()); - } - PrintF(" }\n"); - } - } - - HCheckTableEntry* Find(HValue* object) { - for (int i = size_ - 1; i >= 0; i--) { - // Search from most-recently-inserted to least-recently-inserted. - HCheckTableEntry* entry = &entries_[i]; - DCHECK(entry->object_ != NULL); - if (phase_->aliasing_->MustAlias(entry->object_, object)) return entry; - } - return NULL; - } - - void Insert(HValue* object, - HInstruction* check, - Unique map, - HCheckTableEntry::State state) { - Insert(object, check, new(zone()) UniqueSet(map, zone()), state); - } - - void Insert(HValue* object, - HInstruction* check, - MapSet maps, - HCheckTableEntry::State state) { - DCHECK(state != HCheckTableEntry::UNCHECKED_STABLE || check == NULL); - HCheckTableEntry* entry = &entries_[cursor_++]; - entry->object_ = object; - entry->check_ = check; - entry->maps_ = maps; - entry->state_ = state; - // If the table becomes full, wrap around and overwrite older entries. - if (cursor_ == kMaxTrackedObjects) cursor_ = 0; - if (size_ < kMaxTrackedObjects) size_++; - } - - Zone* zone() const { return phase_->zone(); } - MapSet string_maps() const { return phase_->string_maps(); } - - friend class HCheckMapsEffects; - friend class HCheckEliminationPhase; - - HCheckEliminationPhase* phase_; - HCheckTableEntry entries_[kMaxTrackedObjects]; - int16_t cursor_; // Must be <= kMaxTrackedObjects - int16_t size_; // Must be <= kMaxTrackedObjects - STATIC_ASSERT(kMaxTrackedObjects < (1 << 15)); -}; - - -// Collects instructions that can cause effects that invalidate information -// needed for check elimination. -class HCheckMapsEffects : public ZoneObject { - public: - explicit HCheckMapsEffects(Zone* zone) : objects_(0, zone) { } - - // Effects are _not_ disabled. - inline bool Disabled() const { return false; } - - // Process a possibly side-effecting instruction. - void Process(HInstruction* instr, Zone* zone) { - switch (instr->opcode()) { - case HValue::kStoreNamedField: { - HStoreNamedField* store = HStoreNamedField::cast(instr); - if (store->access().IsMap() || store->has_transition()) { - objects_.Add(store->object(), zone); - } - break; - } - case HValue::kTransitionElementsKind: { - objects_.Add(HTransitionElementsKind::cast(instr)->object(), zone); - break; - } - default: { - flags_.Add(instr->ChangesFlags()); - break; - } - } - } - - // Apply these effects to the given check elimination table. - void Apply(HCheckTable* table) { - if (flags_.Contains(kOsrEntries)) { - // Uncontrollable map modifications; kill everything. - table->Kill(); - return; - } - - // Kill all unstable entries. - if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) { - table->KillUnstableEntries(); - } - - // Kill maps for each object contained in these effects. - for (int i = 0; i < objects_.length(); ++i) { - table->Kill(objects_[i]->ActualValue()); - } - } - - // Union these effects with the other effects. - void Union(HCheckMapsEffects* that, Zone* zone) { - flags_.Add(that->flags_); - for (int i = 0; i < that->objects_.length(); ++i) { - objects_.Add(that->objects_[i], zone); - } - } - - private: - ZoneList objects_; - GVNFlagSet flags_; -}; - - -// The main routine of the analysis phase. Use the HFlowEngine for either a -// local or a global analysis. -void HCheckEliminationPhase::Run() { - HFlowEngine engine(graph(), zone()); - HCheckTable* table = new(zone()) HCheckTable(this); - - if (GLOBAL) { - // Perform a global analysis. - engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table); - } else { - // Perform only local analysis. - for (int i = 0; i < graph()->blocks()->length(); i++) { - table->Kill(); - engine.AnalyzeOneBlock(graph()->blocks()->at(i), table); - } - } - - if (FLAG_trace_check_elimination) PrintStats(); -} - - -// Are we eliminated yet? -void HCheckEliminationPhase::PrintStats() { -#if DEBUG - #define PRINT_STAT(x) if (x##_ > 0) PrintF(" %-16s = %2d\n", #x, x##_) -#else - #define PRINT_STAT(x) -#endif - PRINT_STAT(redundant); - PRINT_STAT(removed); - PRINT_STAT(removed_cho); - PRINT_STAT(removed_cit); - PRINT_STAT(narrowed); - PRINT_STAT(loads); - PRINT_STAT(empty); - PRINT_STAT(compares_true); - PRINT_STAT(compares_false); - PRINT_STAT(transitions); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-check-elimination.h b/src/crankshaft/hydrogen-check-elimination.h deleted file mode 100644 index d6339df34c..0000000000 --- a/src/crankshaft/hydrogen-check-elimination.h +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_ -#define V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/hydrogen-alias-analysis.h" - -namespace v8 { -namespace internal { - - -// Remove CheckMaps instructions through flow- and branch-sensitive analysis. -class HCheckEliminationPhase : public HPhase { - public: - explicit HCheckEliminationPhase(HGraph* graph) - : HPhase("H_Check Elimination", graph), aliasing_(), - string_maps_(kStringMapsSize, zone()) { - // Compute the set of string maps. - #define ADD_STRING_MAP(type, size, name, Name) \ - string_maps_.Add(Unique::CreateImmovable( \ - graph->isolate()->factory()->name##_map()), zone()); - STRING_TYPE_LIST(ADD_STRING_MAP) - #undef ADD_STRING_MAP - DCHECK_EQ(kStringMapsSize, string_maps_.size()); -#ifdef DEBUG - redundant_ = 0; - removed_ = 0; - removed_cho_ = 0; - removed_cit_ = 0; - narrowed_ = 0; - loads_ = 0; - empty_ = 0; - compares_true_ = 0; - compares_false_ = 0; - transitions_ = 0; -#endif - } - - void Run(); - - friend class HCheckTable; - - private: - const UniqueSet* string_maps() const { return &string_maps_; } - - void PrintStats(); - - HAliasAnalyzer* aliasing_; - #define COUNT(type, size, name, Name) + 1 - static const int kStringMapsSize = 0 STRING_TYPE_LIST(COUNT); - #undef COUNT - UniqueSet string_maps_; -#ifdef DEBUG - int redundant_; - int removed_; - int removed_cho_; - int removed_cit_; - int narrowed_; - int loads_; - int empty_; - int compares_true_; - int compares_false_; - int transitions_; -#endif -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_ diff --git a/src/crankshaft/hydrogen-dce.cc b/src/crankshaft/hydrogen-dce.cc deleted file mode 100644 index 60b41cda76..0000000000 --- a/src/crankshaft/hydrogen-dce.cc +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-dce.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -void HDeadCodeEliminationPhase::MarkLive( - HValue* instr, ZoneList* worklist) { - if (instr->CheckFlag(HValue::kIsLive)) return; // Already live. - - if (FLAG_trace_dead_code_elimination) PrintLive(NULL, instr); - - // Transitively mark all inputs of live instructions live. - worklist->Add(instr, zone()); - while (!worklist->is_empty()) { - HValue* instr = worklist->RemoveLast(); - instr->SetFlag(HValue::kIsLive); - for (int i = 0; i < instr->OperandCount(); ++i) { - HValue* input = instr->OperandAt(i); - if (!input->CheckFlag(HValue::kIsLive)) { - input->SetFlag(HValue::kIsLive); - worklist->Add(input, zone()); - if (FLAG_trace_dead_code_elimination) PrintLive(instr, input); - } - } - } -} - - -void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) { - AllowHandleDereference allow_deref; - OFStream os(stdout); - os << "[MarkLive "; - if (ref != NULL) { - os << *ref; - } else { - os << "root"; - } - os << " -> " << *instr << "]" << std::endl; -} - - -void HDeadCodeEliminationPhase::MarkLiveInstructions() { - ZoneList worklist(10, zone()); - - // Transitively mark all live instructions, starting from roots. - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (instr->CannotBeEliminated()) MarkLive(instr, &worklist); - } - for (int j = 0; j < block->phis()->length(); j++) { - HPhi* phi = block->phis()->at(j); - if (phi->CannotBeEliminated()) MarkLive(phi, &worklist); - } - } - - DCHECK(worklist.is_empty()); // Should have processed everything. -} - - -void HDeadCodeEliminationPhase::RemoveDeadInstructions() { - ZoneList worklist(graph()->blocks()->length(), zone()); - - // Remove any instruction not marked kIsLive. - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (!instr->CheckFlag(HValue::kIsLive)) { - // Instruction has not been marked live, so remove it. - instr->DeleteAndReplaceWith(NULL); - } else { - // Clear the liveness flag to leave the graph clean for the next DCE. - instr->ClearFlag(HValue::kIsLive); - } - } - // Collect phis that are dead and remove them in the next pass. - for (int j = 0; j < block->phis()->length(); j++) { - HPhi* phi = block->phis()->at(j); - if (!phi->CheckFlag(HValue::kIsLive)) { - worklist.Add(phi, zone()); - } else { - phi->ClearFlag(HValue::kIsLive); - } - } - } - - // Process phis separately to avoid simultaneously mutating the phi list. - while (!worklist.is_empty()) { - HPhi* phi = worklist.RemoveLast(); - HBasicBlock* block = phi->block(); - phi->DeleteAndReplaceWith(NULL); - if (phi->HasMergedIndex()) { - block->RecordDeletedPhi(phi->merged_index()); - } - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-dce.h b/src/crankshaft/hydrogen-dce.h deleted file mode 100644 index f620a3cfa8..0000000000 --- a/src/crankshaft/hydrogen-dce.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_DCE_H_ -#define V8_CRANKSHAFT_HYDROGEN_DCE_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HDeadCodeEliminationPhase : public HPhase { - public: - explicit HDeadCodeEliminationPhase(HGraph* graph) - : HPhase("H_Dead code elimination", graph) { } - - void Run() { - MarkLiveInstructions(); - RemoveDeadInstructions(); - } - - private: - void MarkLive(HValue* instr, ZoneList* worklist); - void PrintLive(HValue* ref, HValue* instr); - void MarkLiveInstructions(); - void RemoveDeadInstructions(); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_DCE_H_ diff --git a/src/crankshaft/hydrogen-dehoist.cc b/src/crankshaft/hydrogen-dehoist.cc deleted file mode 100644 index 0fccecc4d3..0000000000 --- a/src/crankshaft/hydrogen-dehoist.cc +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-dehoist.h" - -#include "src/base/safe_math.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) { - HValue* index = array_operation->GetKey()->ActualValue(); - if (!index->representation().IsSmiOrInteger32()) return; - if (!index->IsAdd() && !index->IsSub()) return; - - HConstant* constant; - HValue* subexpression; - HBinaryOperation* binary_operation = HBinaryOperation::cast(index); - if (binary_operation->left()->IsConstant() && index->IsAdd()) { - subexpression = binary_operation->right(); - constant = HConstant::cast(binary_operation->left()); - } else if (binary_operation->right()->IsConstant()) { - subexpression = binary_operation->left(); - constant = HConstant::cast(binary_operation->right()); - } else { - return; - } - - if (!constant->HasInteger32Value()) return; - v8::base::internal::CheckedNumeric checked_value = - constant->Integer32Value(); - int32_t sign = binary_operation->IsSub() ? -1 : 1; - checked_value = checked_value * sign; - - // Multiply value by elements size, bailing out on overflow. - int32_t elements_kind_size = - 1 << ElementsKindToShiftSize(array_operation->elements_kind()); - checked_value = checked_value * elements_kind_size; - if (!checked_value.IsValid()) return; - int32_t value = checked_value.ValueOrDie(); - if (value < 0) return; - - // Ensure that the array operation can add value to existing base offset - // without overflowing. - if (!array_operation->TryIncreaseBaseOffset(value)) return; - - array_operation->SetKey(subexpression); - if (binary_operation->HasNoUses()) { - binary_operation->DeleteAndReplaceWith(NULL); - } - - array_operation->SetDehoisted(true); -} - - -void HDehoistIndexComputationsPhase::Run() { - const ZoneList* blocks(graph()->blocks()); - for (int i = 0; i < blocks->length(); ++i) { - for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (instr->IsLoadKeyed()) { - DehoistArrayIndex(HLoadKeyed::cast(instr)); - } else if (instr->IsStoreKeyed()) { - DehoistArrayIndex(HStoreKeyed::cast(instr)); - } - } - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-dehoist.h b/src/crankshaft/hydrogen-dehoist.h deleted file mode 100644 index d68f62cf7b..0000000000 --- a/src/crankshaft/hydrogen-dehoist.h +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_ -#define V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HDehoistIndexComputationsPhase : public HPhase { - public: - explicit HDehoistIndexComputationsPhase(HGraph* graph) - : HPhase("H_Dehoist index computations", graph) { } - - void Run(); - - private: - DISALLOW_COPY_AND_ASSIGN(HDehoistIndexComputationsPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_ diff --git a/src/crankshaft/hydrogen-environment-liveness.cc b/src/crankshaft/hydrogen-environment-liveness.cc deleted file mode 100644 index 215c3125cb..0000000000 --- a/src/crankshaft/hydrogen-environment-liveness.cc +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - - -#include "src/crankshaft/hydrogen-environment-liveness.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -HEnvironmentLivenessAnalysisPhase::HEnvironmentLivenessAnalysisPhase( - HGraph* graph) - : HPhase("H_Environment liveness analysis", graph), - block_count_(graph->blocks()->length()), - maximum_environment_size_(graph->maximum_environment_size()), - live_at_block_start_(block_count_, zone()), - first_simulate_(block_count_, zone()), - first_simulate_invalid_for_index_(block_count_, zone()), - markers_(maximum_environment_size_, zone()), - collect_markers_(true), - last_simulate_(NULL), - went_live_since_last_simulate_(maximum_environment_size_, zone()) { - DCHECK(maximum_environment_size_ > 0); - for (int i = 0; i < block_count_; ++i) { - live_at_block_start_.Add( - new(zone()) BitVector(maximum_environment_size_, zone()), zone()); - first_simulate_.Add(NULL, zone()); - first_simulate_invalid_for_index_.Add( - new(zone()) BitVector(maximum_environment_size_, zone()), zone()); - } -} - - -void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot( - int index, HSimulate* simulate) { - int operand_index = simulate->ToOperandIndex(index); - if (operand_index == -1) { - simulate->AddAssignedValue(index, graph()->GetConstantOptimizedOut()); - } else { - simulate->SetOperandAt(operand_index, graph()->GetConstantOptimizedOut()); - } -} - - -void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsInSuccessors( - HBasicBlock* block, BitVector* live) { - // When a value is live in successor A but dead in B, we must - // explicitly zap it in B. - for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) { - HBasicBlock* successor = it.Current(); - int successor_id = successor->block_id(); - BitVector* live_in_successor = live_at_block_start_[successor_id]; - if (live_in_successor->Equals(*live)) continue; - for (int i = 0; i < live->length(); ++i) { - if (!live->Contains(i)) continue; - if (live_in_successor->Contains(i)) continue; - if (first_simulate_invalid_for_index_.at(successor_id)->Contains(i)) { - continue; - } - HSimulate* simulate = first_simulate_.at(successor_id); - if (simulate == NULL) continue; - DCHECK(VerifyClosures(simulate->closure(), - block->last_environment()->closure())); - ZapEnvironmentSlot(i, simulate); - } - } -} - - -void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsForInstruction( - HEnvironmentMarker* marker) { - if (!marker->CheckFlag(HValue::kEndsLiveRange)) return; - HSimulate* simulate = marker->next_simulate(); - if (simulate != NULL) { - DCHECK(VerifyClosures(simulate->closure(), marker->closure())); - ZapEnvironmentSlot(marker->index(), simulate); - } -} - - -void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtBlockEnd( - HBasicBlock* block, - BitVector* live) { - // Liveness at the end of each block: union of liveness in successors. - live->Clear(); - for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) { - live->Union(*live_at_block_start_[it.Current()->block_id()]); - } -} - - -void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction( - HInstruction* instr, - BitVector* live) { - switch (instr->opcode()) { - case HValue::kEnvironmentMarker: { - HEnvironmentMarker* marker = HEnvironmentMarker::cast(instr); - int index = marker->index(); - if (!live->Contains(index)) { - marker->SetFlag(HValue::kEndsLiveRange); - } else { - marker->ClearFlag(HValue::kEndsLiveRange); - } - if (!went_live_since_last_simulate_.Contains(index)) { - marker->set_next_simulate(last_simulate_); - } - if (marker->kind() == HEnvironmentMarker::LOOKUP) { - live->Add(index); - } else { - DCHECK(marker->kind() == HEnvironmentMarker::BIND); - live->Remove(index); - went_live_since_last_simulate_.Add(index); - } - if (collect_markers_) { - // Populate |markers_| list during the first pass. - markers_.Add(marker, zone()); - } - break; - } - case HValue::kLeaveInlined: - // No environment values are live at the end of an inlined section. - live->Clear(); - last_simulate_ = NULL; - - // The following DCHECKs guard the assumption used in case - // kEnterInlined below: - DCHECK(instr->next()->IsSimulate()); - DCHECK(instr->next()->next()->IsGoto()); - - break; - case HValue::kEnterInlined: { - // Those environment values are live that are live at any return - // target block. Here we make use of the fact that the end of an - // inline sequence always looks like this: HLeaveInlined, HSimulate, - // HGoto (to return_target block), with no environment lookups in - // between (see DCHECKs above). - HEnterInlined* enter = HEnterInlined::cast(instr); - live->Clear(); - for (int i = 0; i < enter->return_targets()->length(); ++i) { - int return_id = enter->return_targets()->at(i)->block_id(); - live->Union(*live_at_block_start_[return_id]); - } - last_simulate_ = NULL; - break; - } - case HValue::kSimulate: - last_simulate_ = HSimulate::cast(instr); - went_live_since_last_simulate_.Clear(); - break; - default: - break; - } -} - - -void HEnvironmentLivenessAnalysisPhase::Run() { - DCHECK(maximum_environment_size_ > 0); - - // Main iteration. Compute liveness of environment slots, and store it - // for each block until it doesn't change any more. For efficiency, visit - // blocks in reverse order and walk backwards through each block. We - // need several iterations to propagate liveness through nested loops. - BitVector live(maximum_environment_size_, zone()); - BitVector worklist(block_count_, zone()); - for (int i = 0; i < block_count_; ++i) { - worklist.Add(i); - } - while (!worklist.IsEmpty()) { - for (int block_id = block_count_ - 1; block_id >= 0; --block_id) { - if (!worklist.Contains(block_id)) { - continue; - } - worklist.Remove(block_id); - last_simulate_ = NULL; - - HBasicBlock* block = graph()->blocks()->at(block_id); - UpdateLivenessAtBlockEnd(block, &live); - - for (HInstruction* instr = block->end(); instr != NULL; - instr = instr->previous()) { - UpdateLivenessAtInstruction(instr, &live); - } - - // Reached the start of the block, do necessary bookkeeping: - // store computed information for this block and add predecessors - // to the work list as necessary. - first_simulate_.Set(block_id, last_simulate_); - first_simulate_invalid_for_index_[block_id]->CopyFrom( - went_live_since_last_simulate_); - if (live_at_block_start_[block_id]->UnionIsChanged(live)) { - for (int i = 0; i < block->predecessors()->length(); ++i) { - worklist.Add(block->predecessors()->at(i)->block_id()); - } - } - } - // Only collect bind/lookup instructions during the first pass. - collect_markers_ = false; - } - - // Analysis finished. Zap dead environment slots. - for (int i = 0; i < markers_.length(); ++i) { - ZapEnvironmentSlotsForInstruction(markers_[i]); - } - for (int block_id = block_count_ - 1; block_id >= 0; --block_id) { - HBasicBlock* block = graph()->blocks()->at(block_id); - UpdateLivenessAtBlockEnd(block, &live); - ZapEnvironmentSlotsInSuccessors(block, &live); - } - - // Finally, remove the HEnvironment{Bind,Lookup} markers. - for (int i = 0; i < markers_.length(); ++i) { - markers_[i]->DeleteAndReplaceWith(NULL); - } -} - - -#ifdef DEBUG -bool HEnvironmentLivenessAnalysisPhase::VerifyClosures( - Handle a, Handle b) { - base::LockGuard guard(isolate()->heap()->relocation_mutex()); - AllowHandleDereference for_verification; - return a.is_identical_to(b); -} -#endif - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-environment-liveness.h b/src/crankshaft/hydrogen-environment-liveness.h deleted file mode 100644 index d9e156b7e9..0000000000 --- a/src/crankshaft/hydrogen-environment-liveness.h +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_ -#define V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -// Trims live ranges of environment slots by doing explicit liveness analysis. -// Values in the environment are kept alive by every subsequent LInstruction -// that is assigned an LEnvironment, which creates register pressure and -// unnecessary spill slot moves. Therefore it is beneficial to trim the -// live ranges of environment slots by zapping them with a constant after -// the last lookup that refers to them. -// Slots are identified by their index and only affected if whitelisted in -// HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis(). -class HEnvironmentLivenessAnalysisPhase : public HPhase { - public: - explicit HEnvironmentLivenessAnalysisPhase(HGraph* graph); - - void Run(); - - private: - void ZapEnvironmentSlot(int index, HSimulate* simulate); - void ZapEnvironmentSlotsInSuccessors(HBasicBlock* block, BitVector* live); - void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker); - void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live); - void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live); -#ifdef DEBUG - bool VerifyClosures(Handle a, Handle b); -#endif - - int block_count_; - - // Largest number of local variables in any environment in the graph - // (including inlined environments). - int maximum_environment_size_; - - // Per-block data. All these lists are indexed by block_id. - ZoneList live_at_block_start_; - ZoneList first_simulate_; - ZoneList first_simulate_invalid_for_index_; - - // List of all HEnvironmentMarker instructions for quick iteration/deletion. - // It is populated during the first pass over the graph, controlled by - // |collect_markers_|. - ZoneList markers_; - bool collect_markers_; - - // Keeps track of the last simulate seen, as well as the environment slots - // for which a new live range has started since (so they must not be zapped - // in that simulate when the end of another live range of theirs is found). - HSimulate* last_simulate_; - BitVector went_live_since_last_simulate_; - - DISALLOW_COPY_AND_ASSIGN(HEnvironmentLivenessAnalysisPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_ diff --git a/src/crankshaft/hydrogen-escape-analysis.cc b/src/crankshaft/hydrogen-escape-analysis.cc deleted file mode 100644 index baba926338..0000000000 --- a/src/crankshaft/hydrogen-escape-analysis.cc +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-escape-analysis.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -bool HEscapeAnalysisPhase::HasNoEscapingUses(HValue* value, int size) { - for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - if (use->HasEscapingOperandAt(it.index())) { - if (FLAG_trace_escape_analysis) { - PrintF("#%d (%s) escapes through #%d (%s) @%d\n", value->id(), - value->Mnemonic(), use->id(), use->Mnemonic(), it.index()); - } - return false; - } - if (use->HasOutOfBoundsAccess(size)) { - if (FLAG_trace_escape_analysis) { - PrintF("#%d (%s) out of bounds at #%d (%s) @%d\n", value->id(), - value->Mnemonic(), use->id(), use->Mnemonic(), it.index()); - } - return false; - } - int redefined_index = use->RedefinedOperandIndex(); - if (redefined_index == it.index() && !HasNoEscapingUses(use, size)) { - if (FLAG_trace_escape_analysis) { - PrintF("#%d (%s) escapes redefinition #%d (%s) @%d\n", value->id(), - value->Mnemonic(), use->id(), use->Mnemonic(), it.index()); - } - return false; - } - } - return true; -} - - -void HEscapeAnalysisPhase::CollectCapturedValues() { - int block_count = graph()->blocks()->length(); - for (int i = 0; i < block_count; ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (!instr->IsAllocate()) continue; - HAllocate* allocate = HAllocate::cast(instr); - if (!allocate->size()->IsInteger32Constant()) continue; - int size_in_bytes = allocate->size()->GetInteger32Constant(); - if (HasNoEscapingUses(instr, size_in_bytes)) { - if (FLAG_trace_escape_analysis) { - PrintF("#%d (%s) is being captured\n", instr->id(), - instr->Mnemonic()); - } - captured_.Add(instr, zone()); - } - } - } -} - - -HCapturedObject* HEscapeAnalysisPhase::NewState(HInstruction* previous) { - Zone* zone = graph()->zone(); - HCapturedObject* state = - new(zone) HCapturedObject(number_of_values_, number_of_objects_, zone); - state->InsertAfter(previous); - return state; -} - - -// Create a new state for replacing HAllocate instructions. -HCapturedObject* HEscapeAnalysisPhase::NewStateForAllocation( - HInstruction* previous) { - HConstant* undefined = graph()->GetConstantUndefined(); - HCapturedObject* state = NewState(previous); - for (int index = 0; index < number_of_values_; index++) { - state->SetOperandAt(index, undefined); - } - return state; -} - - -// Create a new state full of phis for loop header entries. -HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader( - HInstruction* previous, - HCapturedObject* old_state) { - HBasicBlock* block = previous->block(); - HCapturedObject* state = NewState(previous); - for (int index = 0; index < number_of_values_; index++) { - HValue* operand = old_state->OperandAt(index); - HPhi* phi = NewPhiAndInsert(block, operand, index); - state->SetOperandAt(index, phi); - } - return state; -} - - -// Create a new state by copying an existing one. -HCapturedObject* HEscapeAnalysisPhase::NewStateCopy( - HInstruction* previous, - HCapturedObject* old_state) { - HCapturedObject* state = NewState(previous); - for (int index = 0; index < number_of_values_; index++) { - HValue* operand = old_state->OperandAt(index); - state->SetOperandAt(index, operand); - } - return state; -} - - -// Insert a newly created phi into the given block and fill all incoming -// edges with the given value. -HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(HBasicBlock* block, - HValue* incoming_value, - int index) { - Zone* zone = graph()->zone(); - HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone); - for (int i = 0; i < block->predecessors()->length(); i++) { - phi->AddInput(incoming_value); - } - block->AddPhi(phi); - return phi; -} - - -// Insert a newly created value check as a replacement for map checks. -HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state, - HCheckMaps* mapcheck) { - Zone* zone = graph()->zone(); - HValue* value = state->map_value(); - // TODO(mstarzinger): This will narrow a map check against a set of maps - // down to the first element in the set. Revisit and fix this. - HCheckValue* check = HCheckValue::New(graph()->isolate(), zone, NULL, value, - mapcheck->maps()->at(0), false); - check->InsertBefore(mapcheck); - return check; -} - - -// Replace a field load with a given value, forcing Smi representation if -// necessary. -HValue* HEscapeAnalysisPhase::NewLoadReplacement( - HLoadNamedField* load, HValue* load_value) { - HValue* replacement = load_value; - Representation representation = load->representation(); - if (representation.IsSmiOrInteger32() || representation.IsDouble()) { - Zone* zone = graph()->zone(); - HInstruction* new_instr = HForceRepresentation::New( - graph()->isolate(), zone, NULL, load_value, representation); - new_instr->InsertAfter(load); - replacement = new_instr; - } - return replacement; -} - - -// Performs a forward data-flow analysis of all loads and stores on the -// given captured allocation. This uses a reverse post-order iteration -// over affected basic blocks. All non-escaping instructions are handled -// and replaced during the analysis. -void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) { - HBasicBlock* allocate_block = allocate->block(); - block_states_.AddBlock(NULL, graph()->blocks()->length(), zone()); - - // Iterate all blocks starting with the allocation block, since the - // allocation cannot dominate blocks that come before. - int start = allocate_block->block_id(); - for (int i = start; i < graph()->blocks()->length(); i++) { - HBasicBlock* block = graph()->blocks()->at(i); - HCapturedObject* state = StateAt(block); - - // Skip blocks that are not dominated by the captured allocation. - if (!allocate_block->Dominates(block) && allocate_block != block) continue; - if (FLAG_trace_escape_analysis) { - PrintF("Analyzing data-flow in B%d\n", block->block_id()); - } - - // Go through all instructions of the current block. - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - switch (instr->opcode()) { - case HValue::kAllocate: { - if (instr != allocate) continue; - state = NewStateForAllocation(allocate); - break; - } - case HValue::kLoadNamedField: { - HLoadNamedField* load = HLoadNamedField::cast(instr); - int index = load->access().offset() / kPointerSize; - if (load->object() != allocate) continue; - DCHECK(load->access().IsInobject()); - HValue* replacement = - NewLoadReplacement(load, state->OperandAt(index)); - load->DeleteAndReplaceWith(replacement); - if (FLAG_trace_escape_analysis) { - PrintF("Replacing load #%d with #%d (%s)\n", load->id(), - replacement->id(), replacement->Mnemonic()); - } - break; - } - case HValue::kStoreNamedField: { - HStoreNamedField* store = HStoreNamedField::cast(instr); - int index = store->access().offset() / kPointerSize; - if (store->object() != allocate) continue; - DCHECK(store->access().IsInobject()); - state = NewStateCopy(store->previous(), state); - state->SetOperandAt(index, store->value()); - if (store->has_transition()) { - state->SetOperandAt(0, store->transition()); - } - if (store->HasObservableSideEffects()) { - state->ReuseSideEffectsFromStore(store); - } - store->DeleteAndReplaceWith(store->ActualValue()); - if (FLAG_trace_escape_analysis) { - PrintF("Replacing store #%d%s\n", instr->id(), - store->has_transition() ? " (with transition)" : ""); - } - break; - } - case HValue::kArgumentsObject: - case HValue::kCapturedObject: - case HValue::kSimulate: { - for (int i = 0; i < instr->OperandCount(); i++) { - if (instr->OperandAt(i) != allocate) continue; - instr->SetOperandAt(i, state); - } - break; - } - case HValue::kCheckHeapObject: { - HCheckHeapObject* check = HCheckHeapObject::cast(instr); - if (check->value() != allocate) continue; - check->DeleteAndReplaceWith(check->ActualValue()); - break; - } - case HValue::kCheckMaps: { - HCheckMaps* mapcheck = HCheckMaps::cast(instr); - if (mapcheck->value() != allocate) continue; - NewMapCheckAndInsert(state, mapcheck); - mapcheck->DeleteAndReplaceWith(mapcheck->ActualValue()); - break; - } - default: - // Nothing to see here, move along ... - break; - } - } - - // Propagate the block state forward to all successor blocks. - for (int i = 0; i < block->end()->SuccessorCount(); i++) { - HBasicBlock* succ = block->end()->SuccessorAt(i); - if (!allocate_block->Dominates(succ)) continue; - if (succ->predecessors()->length() == 1) { - // Case 1: This is the only predecessor, just reuse state. - SetStateAt(succ, state); - } else if (StateAt(succ) == NULL && succ->IsLoopHeader()) { - // Case 2: This is a state that enters a loop header, be - // pessimistic about loop headers, add phis for all values. - SetStateAt(succ, NewStateForLoopHeader(succ->first(), state)); - } else if (StateAt(succ) == NULL) { - // Case 3: This is the first state propagated forward to the - // successor, leave a copy of the current state. - SetStateAt(succ, NewStateCopy(succ->first(), state)); - } else { - // Case 4: This is a state that needs merging with previously - // propagated states, potentially introducing new phis lazily or - // adding values to existing phis. - HCapturedObject* succ_state = StateAt(succ); - for (int index = 0; index < number_of_values_; index++) { - HValue* operand = state->OperandAt(index); - HValue* succ_operand = succ_state->OperandAt(index); - if (succ_operand->IsPhi() && succ_operand->block() == succ) { - // Phi already exists, add operand. - HPhi* phi = HPhi::cast(succ_operand); - phi->SetOperandAt(succ->PredecessorIndexOf(block), operand); - } else if (succ_operand != operand) { - // Phi does not exist, introduce one. - HPhi* phi = NewPhiAndInsert(succ, succ_operand, index); - phi->SetOperandAt(succ->PredecessorIndexOf(block), operand); - succ_state->SetOperandAt(index, phi); - } - } - } - } - } - - // All uses have been handled. - DCHECK(allocate->HasNoUses()); - allocate->DeleteAndReplaceWith(NULL); -} - - -void HEscapeAnalysisPhase::PerformScalarReplacement() { - for (int i = 0; i < captured_.length(); i++) { - HAllocate* allocate = HAllocate::cast(captured_.at(i)); - - // Compute number of scalar values and start with clean slate. - int size_in_bytes = allocate->size()->GetInteger32Constant(); - number_of_values_ = size_in_bytes / kPointerSize; - number_of_objects_++; - block_states_.Rewind(0); - - // Perform actual analysis step. - AnalyzeDataFlow(allocate); - - cumulative_values_ += number_of_values_; - DCHECK(allocate->HasNoUses()); - DCHECK(!allocate->IsLinked()); - } -} - - -void HEscapeAnalysisPhase::Run() { - int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations; - for (int i = 0; i < max_fixpoint_iteration_count; i++) { - CollectCapturedValues(); - if (captured_.is_empty()) break; - PerformScalarReplacement(); - captured_.Rewind(0); - } -} - - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-escape-analysis.h b/src/crankshaft/hydrogen-escape-analysis.h deleted file mode 100644 index 7dac6debe0..0000000000 --- a/src/crankshaft/hydrogen-escape-analysis.h +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_ -#define V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_ - -#include "src/allocation.h" -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HEscapeAnalysisPhase : public HPhase { - public: - explicit HEscapeAnalysisPhase(HGraph* graph) - : HPhase("H_Escape analysis", graph), - captured_(0, zone()), - number_of_objects_(0), - number_of_values_(0), - cumulative_values_(0), - block_states_(graph->blocks()->length(), zone()) { } - - void Run(); - - private: - void CollectCapturedValues(); - bool HasNoEscapingUses(HValue* value, int size); - void PerformScalarReplacement(); - void AnalyzeDataFlow(HInstruction* instr); - - HCapturedObject* NewState(HInstruction* prev); - HCapturedObject* NewStateForAllocation(HInstruction* prev); - HCapturedObject* NewStateForLoopHeader(HInstruction* prev, HCapturedObject*); - HCapturedObject* NewStateCopy(HInstruction* prev, HCapturedObject* state); - - HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index); - - HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck); - - HValue* NewLoadReplacement(HLoadNamedField* load, HValue* load_value); - - HCapturedObject* StateAt(HBasicBlock* block) { - return block_states_.at(block->block_id()); - } - - void SetStateAt(HBasicBlock* block, HCapturedObject* state) { - block_states_.Set(block->block_id(), state); - } - - // List of allocations captured during collection phase. - ZoneList captured_; - - // Number of captured objects on which scalar replacement was done. - int number_of_objects_; - - // Number of scalar values tracked during scalar replacement phase. - int number_of_values_; - int cumulative_values_; - - // Map of block IDs to the data-flow state at block entry during the - // scalar replacement phase. - ZoneList block_states_; -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_ diff --git a/src/crankshaft/hydrogen-flow-engine.h b/src/crankshaft/hydrogen-flow-engine.h deleted file mode 100644 index 149c99bec5..0000000000 --- a/src/crankshaft/hydrogen-flow-engine.h +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_ -#define V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_ - -#include "src/crankshaft/hydrogen-instructions.h" -#include "src/crankshaft/hydrogen.h" -#include "src/zone/zone.h" - -namespace v8 { -namespace internal { - -// An example implementation of effects that doesn't collect anything. -class NoEffects : public ZoneObject { - public: - explicit NoEffects(Zone* zone) { } - - inline bool Disabled() { - return true; // Nothing to do. - } - template - inline void Apply(State* state) { - // do nothing. - } - inline void Process(HInstruction* value, Zone* zone) { - // do nothing. - } - inline void Union(NoEffects* other, Zone* zone) { - // do nothing. - } -}; - - -// An example implementation of state that doesn't track anything. -class NoState { - public: - inline NoState* Copy(HBasicBlock* succ, Zone* zone) { - return this; - } - inline NoState* Process(HInstruction* value, Zone* zone) { - return this; - } - inline NoState* Merge(HBasicBlock* succ, NoState* other, Zone* zone) { - return this; - } -}; - - -// This class implements an engine that can drive flow-sensitive analyses -// over a graph of basic blocks, either one block at a time (local analysis) -// or over the entire graph (global analysis). The flow engine is parameterized -// by the type of the state and the effects collected while walking over the -// graph. -// -// The "State" collects which facts are known while passing over instructions -// in control flow order, and the "Effects" collect summary information about -// which facts could be invalidated on other control flow paths. The effects -// are necessary to correctly handle loops in the control flow graph without -// doing a fixed-point iteration. Thus the flow engine is guaranteed to visit -// each block at most twice; once for state, and optionally once for effects. -// -// The flow engine requires the State and Effects classes to implement methods -// like the example NoState and NoEffects above. It's not necessary to provide -// an effects implementation for local analysis. -template -class HFlowEngine { - public: - HFlowEngine(HGraph* graph, Zone* zone) - : graph_(graph), - zone_(zone), -#if DEBUG - pred_counts_(graph->blocks()->length(), zone), -#endif - block_states_(graph->blocks()->length(), zone), - loop_effects_(graph->blocks()->length(), zone) { - loop_effects_.AddBlock(NULL, graph_->blocks()->length(), zone); - } - - // Local analysis. Iterates over the instructions in the given block. - State* AnalyzeOneBlock(HBasicBlock* block, State* state) { - // Go through all instructions of the current block, updating the state. - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - state = state->Process(it.Current(), zone_); - } - return state; - } - - // Global analysis. Iterates over all blocks that are dominated by the given - // block, starting with the initial state. Computes effects for nested loops. - void AnalyzeDominatedBlocks(HBasicBlock* root, State* initial) { - InitializeStates(); - SetStateAt(root, initial); - - // Iterate all dominated blocks starting from the given start block. - for (int i = root->block_id(); i < graph_->blocks()->length(); i++) { - HBasicBlock* block = graph_->blocks()->at(i); - - // Skip blocks not dominated by the root node. - if (SkipNonDominatedBlock(root, block)) continue; - State* state = State::Finish(StateAt(block), block, zone_); - - if (block->IsReachable()) { - DCHECK(state != NULL); - if (block->IsLoopHeader()) { - // Apply loop effects before analyzing loop body. - ComputeLoopEffects(block)->Apply(state); - } else { - // Must have visited all predecessors before this block. - CheckPredecessorCount(block); - } - - // Go through all instructions of the current block, updating the state. - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - state = state->Process(it.Current(), zone_); - } - } - - // Propagate the block state forward to all successor blocks. - int max = block->end()->SuccessorCount(); - for (int i = 0; i < max; i++) { - HBasicBlock* succ = block->end()->SuccessorAt(i); - IncrementPredecessorCount(succ); - - if (max == 1 && succ->predecessors()->length() == 1) { - // Optimization: successor can inherit this state. - SetStateAt(succ, state); - } else { - // Merge the current state with the state already at the successor. - SetStateAt(succ, - State::Merge(StateAt(succ), succ, state, block, zone_)); - } - } - } - } - - private: - // Computes and caches the loop effects for the loop which has the given - // block as its loop header. - Effects* ComputeLoopEffects(HBasicBlock* block) { - DCHECK(block->IsLoopHeader()); - Effects* effects = loop_effects_[block->block_id()]; - if (effects != NULL) return effects; // Already analyzed this loop. - - effects = new(zone_) Effects(zone_); - loop_effects_[block->block_id()] = effects; - if (effects->Disabled()) return effects; // No effects for this analysis. - - HLoopInformation* loop = block->loop_information(); - int end = loop->GetLastBackEdge()->block_id(); - // Process the blocks between the header and the end. - for (int i = block->block_id(); i <= end; i++) { - HBasicBlock* member = graph_->blocks()->at(i); - if (i != block->block_id() && member->IsLoopHeader()) { - // Recursively compute and cache the effects of the nested loop. - DCHECK(member->loop_information()->parent_loop() == loop); - Effects* nested = ComputeLoopEffects(member); - effects->Union(nested, zone_); - // Skip the nested loop's blocks. - i = member->loop_information()->GetLastBackEdge()->block_id(); - } else { - // Process all the effects of the block. - if (member->IsUnreachable()) continue; - DCHECK(member->current_loop() == loop); - for (HInstructionIterator it(member); !it.Done(); it.Advance()) { - effects->Process(it.Current(), zone_); - } - } - } - return effects; - } - - inline bool SkipNonDominatedBlock(HBasicBlock* root, HBasicBlock* other) { - if (root->block_id() == 0) return false; // Visit the whole graph. - if (root == other) return false; // Always visit the root. - return !root->Dominates(other); // Only visit dominated blocks. - } - - inline State* StateAt(HBasicBlock* block) { - return block_states_.at(block->block_id()); - } - - inline void SetStateAt(HBasicBlock* block, State* state) { - block_states_.Set(block->block_id(), state); - } - - inline void InitializeStates() { -#if DEBUG - pred_counts_.Rewind(0); - pred_counts_.AddBlock(0, graph_->blocks()->length(), zone_); -#endif - block_states_.Rewind(0); - block_states_.AddBlock(NULL, graph_->blocks()->length(), zone_); - } - - inline void CheckPredecessorCount(HBasicBlock* block) { - DCHECK(block->predecessors()->length() == pred_counts_[block->block_id()]); - } - - inline void IncrementPredecessorCount(HBasicBlock* block) { -#if DEBUG - pred_counts_[block->block_id()]++; -#endif - } - - HGraph* graph_; // The hydrogen graph. - Zone* zone_; // Temporary zone. -#if DEBUG - ZoneList pred_counts_; // Finished predecessors (by block id). -#endif - ZoneList block_states_; // Block states (by block id). - ZoneList loop_effects_; // Loop effects (by block id). -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_ diff --git a/src/crankshaft/hydrogen-gvn.cc b/src/crankshaft/hydrogen-gvn.cc deleted file mode 100644 index 70320052b0..0000000000 --- a/src/crankshaft/hydrogen-gvn.cc +++ /dev/null @@ -1,895 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-gvn.h" - -#include "src/crankshaft/hydrogen.h" -#include "src/objects-inl.h" -#include "src/v8.h" - -namespace v8 { -namespace internal { - -class HInstructionMap final : public ZoneObject { - public: - HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker) - : array_size_(0), - lists_size_(0), - count_(0), - array_(NULL), - lists_(NULL), - free_list_head_(kNil), - side_effects_tracker_(side_effects_tracker) { - ResizeLists(kInitialSize, zone); - Resize(kInitialSize, zone); - } - - void Kill(SideEffects side_effects); - - void Add(HInstruction* instr, Zone* zone) { - present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr)); - Insert(instr, zone); - } - - HInstruction* Lookup(HInstruction* instr) const; - - HInstructionMap* Copy(Zone* zone) const { - return new(zone) HInstructionMap(zone, this); - } - - bool IsEmpty() const { return count_ == 0; } - - private: - // A linked list of HInstruction* values. Stored in arrays. - struct HInstructionMapListElement { - HInstruction* instr; - int next; // Index in the array of the next list element. - }; - static const int kNil = -1; // The end of a linked list - - // Must be a power of 2. - static const int kInitialSize = 16; - - HInstructionMap(Zone* zone, const HInstructionMap* other); - - void Resize(int new_size, Zone* zone); - void ResizeLists(int new_size, Zone* zone); - void Insert(HInstruction* instr, Zone* zone); - uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); } - - int array_size_; - int lists_size_; - int count_; // The number of values stored in the HInstructionMap. - SideEffects present_depends_on_; - HInstructionMapListElement* array_; - // Primary store - contains the first value - // with a given hash. Colliding elements are stored in linked lists. - HInstructionMapListElement* lists_; - // The linked lists containing hash collisions. - int free_list_head_; // Unused elements in lists_ are on the free list. - SideEffectsTracker* side_effects_tracker_; -}; - - -class HSideEffectMap final BASE_EMBEDDED { - public: - HSideEffectMap(); - explicit HSideEffectMap(HSideEffectMap* other); - HSideEffectMap& operator= (const HSideEffectMap& other); - - void Kill(SideEffects side_effects); - - void Store(SideEffects side_effects, HInstruction* instr); - - bool IsEmpty() const { return count_ == 0; } - - inline HInstruction* operator[](int i) const { - DCHECK(0 <= i); - DCHECK(i < kNumberOfTrackedSideEffects); - return data_[i]; - } - inline HInstruction* at(int i) const { return operator[](i); } - - private: - int count_; - HInstruction* data_[kNumberOfTrackedSideEffects]; -}; - - -void TraceGVN(const char* msg, ...) { - va_list arguments; - va_start(arguments, msg); - base::OS::VPrint(msg, arguments); - va_end(arguments); -} - - -// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when -// --trace-gvn is off. -#define TRACE_GVN_1(msg, a1) \ - if (FLAG_trace_gvn) { \ - TraceGVN(msg, a1); \ - } - -#define TRACE_GVN_2(msg, a1, a2) \ - if (FLAG_trace_gvn) { \ - TraceGVN(msg, a1, a2); \ - } - -#define TRACE_GVN_3(msg, a1, a2, a3) \ - if (FLAG_trace_gvn) { \ - TraceGVN(msg, a1, a2, a3); \ - } - -#define TRACE_GVN_4(msg, a1, a2, a3, a4) \ - if (FLAG_trace_gvn) { \ - TraceGVN(msg, a1, a2, a3, a4); \ - } - -#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \ - if (FLAG_trace_gvn) { \ - TraceGVN(msg, a1, a2, a3, a4, a5); \ - } - - -HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other) - : array_size_(other->array_size_), - lists_size_(other->lists_size_), - count_(other->count_), - present_depends_on_(other->present_depends_on_), - array_(zone->NewArray(other->array_size_)), - lists_(zone->NewArray(other->lists_size_)), - free_list_head_(other->free_list_head_), - side_effects_tracker_(other->side_effects_tracker_) { - MemCopy(array_, other->array_, - array_size_ * sizeof(HInstructionMapListElement)); - MemCopy(lists_, other->lists_, - lists_size_ * sizeof(HInstructionMapListElement)); -} - - -void HInstructionMap::Kill(SideEffects changes) { - if (!present_depends_on_.ContainsAnyOf(changes)) return; - present_depends_on_.RemoveAll(); - for (int i = 0; i < array_size_; ++i) { - HInstruction* instr = array_[i].instr; - if (instr != NULL) { - // Clear list of collisions first, so we know if it becomes empty. - int kept = kNil; // List of kept elements. - int next; - for (int current = array_[i].next; current != kNil; current = next) { - next = lists_[current].next; - HInstruction* instr = lists_[current].instr; - SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr); - if (depends_on.ContainsAnyOf(changes)) { - // Drop it. - count_--; - lists_[current].next = free_list_head_; - free_list_head_ = current; - } else { - // Keep it. - lists_[current].next = kept; - kept = current; - present_depends_on_.Add(depends_on); - } - } - array_[i].next = kept; - - // Now possibly drop directly indexed element. - instr = array_[i].instr; - SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr); - if (depends_on.ContainsAnyOf(changes)) { // Drop it. - count_--; - int head = array_[i].next; - if (head == kNil) { - array_[i].instr = NULL; - } else { - array_[i].instr = lists_[head].instr; - array_[i].next = lists_[head].next; - lists_[head].next = free_list_head_; - free_list_head_ = head; - } - } else { - present_depends_on_.Add(depends_on); // Keep it. - } - } - } -} - - -HInstruction* HInstructionMap::Lookup(HInstruction* instr) const { - uint32_t hash = static_cast(instr->Hashcode()); - uint32_t pos = Bound(hash); - if (array_[pos].instr != NULL) { - if (array_[pos].instr->Equals(instr)) return array_[pos].instr; - int next = array_[pos].next; - while (next != kNil) { - if (lists_[next].instr->Equals(instr)) return lists_[next].instr; - next = lists_[next].next; - } - } - return NULL; -} - - -void HInstructionMap::Resize(int new_size, Zone* zone) { - DCHECK(new_size > count_); - // Hashing the values into the new array has no more collisions than in the - // old hash map, so we can use the existing lists_ array, if we are careful. - - // Make sure we have at least one free element. - if (free_list_head_ == kNil) { - ResizeLists(lists_size_ << 1, zone); - } - - HInstructionMapListElement* new_array = - zone->NewArray(new_size); - memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size); - - HInstructionMapListElement* old_array = array_; - int old_size = array_size_; - - int old_count = count_; - count_ = 0; - // Do not modify present_depends_on_. It is currently correct. - array_size_ = new_size; - array_ = new_array; - - if (old_array != NULL) { - // Iterate over all the elements in lists, rehashing them. - for (int i = 0; i < old_size; ++i) { - if (old_array[i].instr != NULL) { - int current = old_array[i].next; - while (current != kNil) { - Insert(lists_[current].instr, zone); - int next = lists_[current].next; - lists_[current].next = free_list_head_; - free_list_head_ = current; - current = next; - } - // Rehash the directly stored instruction. - Insert(old_array[i].instr, zone); - } - } - } - USE(old_count); - DCHECK(count_ == old_count); -} - - -void HInstructionMap::ResizeLists(int new_size, Zone* zone) { - DCHECK(new_size > lists_size_); - - HInstructionMapListElement* new_lists = - zone->NewArray(new_size); - memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size); - - HInstructionMapListElement* old_lists = lists_; - int old_size = lists_size_; - - lists_size_ = new_size; - lists_ = new_lists; - - if (old_lists != NULL) { - MemCopy(lists_, old_lists, old_size * sizeof(HInstructionMapListElement)); - } - for (int i = old_size; i < lists_size_; ++i) { - lists_[i].next = free_list_head_; - free_list_head_ = i; - } -} - - -void HInstructionMap::Insert(HInstruction* instr, Zone* zone) { - DCHECK(instr != NULL); - // Resizing when half of the hashtable is filled up. - if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone); - DCHECK(count_ < array_size_); - count_++; - uint32_t pos = Bound(static_cast(instr->Hashcode())); - if (array_[pos].instr == NULL) { - array_[pos].instr = instr; - array_[pos].next = kNil; - } else { - if (free_list_head_ == kNil) { - ResizeLists(lists_size_ << 1, zone); - } - int new_element_pos = free_list_head_; - DCHECK(new_element_pos != kNil); - free_list_head_ = lists_[free_list_head_].next; - lists_[new_element_pos].instr = instr; - lists_[new_element_pos].next = array_[pos].next; - DCHECK(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL); - array_[pos].next = new_element_pos; - } -} - - -HSideEffectMap::HSideEffectMap() : count_(0) { - memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize); -} - - -HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) { - *this = *other; // Calls operator=. -} - - -HSideEffectMap& HSideEffectMap::operator=(const HSideEffectMap& other) { - if (this != &other) { - MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize); - } - return *this; -} - - -void HSideEffectMap::Kill(SideEffects side_effects) { - for (int i = 0; i < kNumberOfTrackedSideEffects; i++) { - if (side_effects.ContainsFlag(GVNFlagFromInt(i))) { - if (data_[i] != NULL) count_--; - data_[i] = NULL; - } - } -} - - -void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) { - for (int i = 0; i < kNumberOfTrackedSideEffects; i++) { - if (side_effects.ContainsFlag(GVNFlagFromInt(i))) { - if (data_[i] == NULL) count_++; - data_[i] = instr; - } - } -} - - -SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) { - int index; - SideEffects result(instr->ChangesFlags()); - if (result.ContainsFlag(kGlobalVars)) { - if (instr->IsStoreNamedField()) { - HStoreNamedField* store = HStoreNamedField::cast(instr); - HConstant* target = HConstant::cast(store->object()); - if (ComputeGlobalVar(Unique::cast(target->GetUnique()), - &index)) { - result.RemoveFlag(kGlobalVars); - result.AddSpecial(GlobalVar(index)); - return result; - } - } - for (index = 0; index < kNumberOfGlobalVars; ++index) { - result.AddSpecial(GlobalVar(index)); - } - } else if (result.ContainsFlag(kInobjectFields)) { - if (instr->IsStoreNamedField() && - ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) { - result.RemoveFlag(kInobjectFields); - result.AddSpecial(InobjectField(index)); - } else { - for (index = 0; index < kNumberOfInobjectFields; ++index) { - result.AddSpecial(InobjectField(index)); - } - } - } - return result; -} - - -SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) { - int index; - SideEffects result(instr->DependsOnFlags()); - if (result.ContainsFlag(kGlobalVars)) { - if (instr->IsLoadNamedField()) { - HLoadNamedField* load = HLoadNamedField::cast(instr); - HConstant* target = HConstant::cast(load->object()); - if (ComputeGlobalVar(Unique::cast(target->GetUnique()), - &index)) { - result.RemoveFlag(kGlobalVars); - result.AddSpecial(GlobalVar(index)); - return result; - } - } - for (index = 0; index < kNumberOfGlobalVars; ++index) { - result.AddSpecial(GlobalVar(index)); - } - } else if (result.ContainsFlag(kInobjectFields)) { - if (instr->IsLoadNamedField() && - ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) { - result.RemoveFlag(kInobjectFields); - result.AddSpecial(InobjectField(index)); - } else { - for (index = 0; index < kNumberOfInobjectFields; ++index) { - result.AddSpecial(InobjectField(index)); - } - } - } - return result; -} - - -std::ostream& operator<<(std::ostream& os, const TrackedEffects& te) { - SideEffectsTracker* t = te.tracker; - const char* separator = ""; - os << "["; - for (int bit = 0; bit < kNumberOfFlags; ++bit) { - GVNFlag flag = GVNFlagFromInt(bit); - if (te.effects.ContainsFlag(flag)) { - os << separator; - separator = ", "; - switch (flag) { -#define DECLARE_FLAG(Type) \ - case k##Type: \ - os << #Type; \ - break; -GVN_TRACKED_FLAG_LIST(DECLARE_FLAG) -GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG) -#undef DECLARE_FLAG - default: - break; - } - } - } - for (int index = 0; index < t->num_global_vars_; ++index) { - if (te.effects.ContainsSpecial(t->GlobalVar(index))) { - os << separator << "[" << *t->global_vars_[index].handle() << "]"; - separator = ", "; - } - } - for (int index = 0; index < t->num_inobject_fields_; ++index) { - if (te.effects.ContainsSpecial(t->InobjectField(index))) { - os << separator << t->inobject_fields_[index]; - separator = ", "; - } - } - os << "]"; - return os; -} - - -bool SideEffectsTracker::ComputeGlobalVar(Unique cell, - int* index) { - for (int i = 0; i < num_global_vars_; ++i) { - if (cell == global_vars_[i]) { - *index = i; - return true; - } - } - if (num_global_vars_ < kNumberOfGlobalVars) { - if (FLAG_trace_gvn) { - OFStream os(stdout); - os << "Tracking global var [" << *cell.handle() << "] " - << "(mapped to index " << num_global_vars_ << ")" << std::endl; - } - *index = num_global_vars_; - global_vars_[num_global_vars_++] = cell; - return true; - } - return false; -} - - -bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access, - int* index) { - for (int i = 0; i < num_inobject_fields_; ++i) { - if (access.Equals(inobject_fields_[i])) { - *index = i; - return true; - } - } - if (num_inobject_fields_ < kNumberOfInobjectFields) { - if (FLAG_trace_gvn) { - OFStream os(stdout); - os << "Tracking inobject field access " << access << " (mapped to index " - << num_inobject_fields_ << ")" << std::endl; - } - *index = num_inobject_fields_; - inobject_fields_[num_inobject_fields_++] = access; - return true; - } - return false; -} - - -HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph) - : HPhase("H_Global value numbering", graph), - removed_side_effects_(false), - block_side_effects_(graph->blocks()->length(), zone()), - loop_side_effects_(graph->blocks()->length(), zone()), - visited_on_paths_(graph->blocks()->length(), zone()) { - DCHECK(!AllowHandleAllocation::IsAllowed()); - block_side_effects_.AddBlock( - SideEffects(), graph->blocks()->length(), zone()); - loop_side_effects_.AddBlock( - SideEffects(), graph->blocks()->length(), zone()); -} - - -void HGlobalValueNumberingPhase::Run() { - DCHECK(!removed_side_effects_); - for (int i = FLAG_gvn_iterations; i > 0; --i) { - // Compute the side effects. - ComputeBlockSideEffects(); - - // Perform loop invariant code motion if requested. - if (FLAG_loop_invariant_code_motion) LoopInvariantCodeMotion(); - - // Perform the actual value numbering. - AnalyzeGraph(); - - // Continue GVN if we removed any side effects. - if (!removed_side_effects_) break; - removed_side_effects_ = false; - - // Clear all side effects. - DCHECK_EQ(block_side_effects_.length(), graph()->blocks()->length()); - DCHECK_EQ(loop_side_effects_.length(), graph()->blocks()->length()); - for (int i = 0; i < graph()->blocks()->length(); ++i) { - block_side_effects_[i].RemoveAll(); - loop_side_effects_[i].RemoveAll(); - } - visited_on_paths_.Clear(); - } -} - - -void HGlobalValueNumberingPhase::ComputeBlockSideEffects() { - for (int i = graph()->blocks()->length() - 1; i >= 0; --i) { - // Compute side effects for the block. - HBasicBlock* block = graph()->blocks()->at(i); - SideEffects side_effects; - if (block->IsReachable() && !block->IsDeoptimizing()) { - int id = block->block_id(); - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - side_effects.Add(side_effects_tracker_.ComputeChanges(instr)); - } - block_side_effects_[id].Add(side_effects); - - // Loop headers are part of their loop. - if (block->IsLoopHeader()) { - loop_side_effects_[id].Add(side_effects); - } - - // Propagate loop side effects upwards. - if (block->HasParentLoopHeader()) { - HBasicBlock* with_parent = block; - if (block->IsLoopHeader()) side_effects = loop_side_effects_[id]; - do { - HBasicBlock* parent_block = with_parent->parent_loop_header(); - loop_side_effects_[parent_block->block_id()].Add(side_effects); - with_parent = parent_block; - } while (with_parent->HasParentLoopHeader()); - } - } - } -} - - -void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() { - TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n", - graph()->use_optimistic_licm() ? "yes" : "no"); - for (int i = graph()->blocks()->length() - 1; i >= 0; --i) { - HBasicBlock* block = graph()->blocks()->at(i); - if (block->IsLoopHeader()) { - SideEffects side_effects = loop_side_effects_[block->block_id()]; - if (FLAG_trace_gvn) { - OFStream os(stdout); - os << "Try loop invariant motion for " << *block << " changes " - << Print(side_effects) << std::endl; - } - HBasicBlock* last = block->loop_information()->GetLastBackEdge(); - for (int j = block->block_id(); j <= last->block_id(); ++j) { - ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects); - } - } - } -} - - -void HGlobalValueNumberingPhase::ProcessLoopBlock( - HBasicBlock* block, - HBasicBlock* loop_header, - SideEffects loop_kills) { - HBasicBlock* pre_header = loop_header->predecessors()->at(0); - if (FLAG_trace_gvn) { - OFStream os(stdout); - os << "Loop invariant code motion for " << *block << " depends on " - << Print(loop_kills) << std::endl; - } - HInstruction* instr = block->first(); - while (instr != NULL) { - HInstruction* next = instr->next(); - if (instr->CheckFlag(HValue::kUseGVN)) { - SideEffects changes = side_effects_tracker_.ComputeChanges(instr); - SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr); - if (FLAG_trace_gvn) { - OFStream os(stdout); - os << "Checking instruction i" << instr->id() << " (" - << instr->Mnemonic() << ") changes " << Print(changes) - << ", depends on " << Print(depends_on) << ". Loop changes " - << Print(loop_kills) << std::endl; - } - bool can_hoist = !depends_on.ContainsAnyOf(loop_kills); - if (can_hoist && !graph()->use_optimistic_licm()) { - can_hoist = block->IsLoopSuccessorDominator(); - } - - if (can_hoist) { - bool inputs_loop_invariant = true; - for (int i = 0; i < instr->OperandCount(); ++i) { - if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) { - inputs_loop_invariant = false; - } - } - - if (inputs_loop_invariant && ShouldMove(instr, loop_header)) { - TRACE_GVN_2("Hoisting loop invariant instruction i%d to block B%d\n", - instr->id(), pre_header->block_id()); - // Move the instruction out of the loop. - instr->Unlink(); - instr->InsertBefore(pre_header->end()); - if (instr->HasSideEffects()) removed_side_effects_ = true; - } - } - } - instr = next; - } -} - - -bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr, - HBasicBlock* loop_header) { - // If we've disabled code motion or we're in a block that unconditionally - // deoptimizes, don't move any instructions. - return graph()->allow_code_motion() && !instr->block()->IsDeoptimizing() && - instr->block()->IsReachable(); -} - - -SideEffects -HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock( - HBasicBlock* dominator, HBasicBlock* dominated) { - SideEffects side_effects; - for (int i = 0; i < dominated->predecessors()->length(); ++i) { - HBasicBlock* block = dominated->predecessors()->at(i); - if (dominator->block_id() < block->block_id() && - block->block_id() < dominated->block_id() && - !visited_on_paths_.Contains(block->block_id())) { - visited_on_paths_.Add(block->block_id()); - side_effects.Add(block_side_effects_[block->block_id()]); - if (block->IsLoopHeader()) { - side_effects.Add(loop_side_effects_[block->block_id()]); - } - side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock( - dominator, block)); - } - } - return side_effects; -} - - -// Each instance of this class is like a "stack frame" for the recursive -// traversal of the dominator tree done during GVN (the stack is handled -// as a double linked list). -// We reuse frames when possible so the list length is limited by the depth -// of the dominator tree but this forces us to initialize each frame calling -// an explicit "Initialize" method instead of a using constructor. -class GvnBasicBlockState: public ZoneObject { - public: - static GvnBasicBlockState* CreateEntry(Zone* zone, - HBasicBlock* entry_block, - HInstructionMap* entry_map) { - return new(zone) - GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone); - } - - HBasicBlock* block() { return block_; } - HInstructionMap* map() { return map_; } - HSideEffectMap* dominators() { return &dominators_; } - - GvnBasicBlockState* next_in_dominator_tree_traversal( - Zone* zone, - HBasicBlock** dominator) { - // This assignment needs to happen before calling next_dominated() because - // that call can reuse "this" if we are at the last dominated block. - *dominator = block(); - GvnBasicBlockState* result = next_dominated(zone); - if (result == NULL) { - GvnBasicBlockState* dominator_state = pop(); - if (dominator_state != NULL) { - // This branch is guaranteed not to return NULL because pop() never - // returns a state where "is_done() == true". - *dominator = dominator_state->block(); - result = dominator_state->next_dominated(zone); - } else { - // Unnecessary (we are returning NULL) but done for cleanness. - *dominator = NULL; - } - } - return result; - } - - private: - void Initialize(HBasicBlock* block, - HInstructionMap* map, - HSideEffectMap* dominators, - bool copy_map, - Zone* zone) { - block_ = block; - map_ = copy_map ? map->Copy(zone) : map; - dominated_index_ = -1; - length_ = block->dominated_blocks()->length(); - if (dominators != NULL) { - dominators_ = *dominators; - } - } - bool is_done() { return dominated_index_ >= length_; } - - GvnBasicBlockState(GvnBasicBlockState* previous, - HBasicBlock* block, - HInstructionMap* map, - HSideEffectMap* dominators, - Zone* zone) - : previous_(previous), next_(NULL) { - Initialize(block, map, dominators, true, zone); - } - - GvnBasicBlockState* next_dominated(Zone* zone) { - dominated_index_++; - if (dominated_index_ == length_ - 1) { - // No need to copy the map for the last child in the dominator tree. - Initialize(block_->dominated_blocks()->at(dominated_index_), - map(), - dominators(), - false, - zone); - return this; - } else if (dominated_index_ < length_) { - return push(zone, block_->dominated_blocks()->at(dominated_index_)); - } else { - return NULL; - } - } - - GvnBasicBlockState* push(Zone* zone, HBasicBlock* block) { - if (next_ == NULL) { - next_ = - new(zone) GvnBasicBlockState(this, block, map(), dominators(), zone); - } else { - next_->Initialize(block, map(), dominators(), true, zone); - } - return next_; - } - GvnBasicBlockState* pop() { - GvnBasicBlockState* result = previous_; - while (result != NULL && result->is_done()) { - TRACE_GVN_2("Backtracking from block B%d to block b%d\n", - block()->block_id(), - previous_->block()->block_id()) - result = result->previous_; - } - return result; - } - - GvnBasicBlockState* previous_; - GvnBasicBlockState* next_; - HBasicBlock* block_; - HInstructionMap* map_; - HSideEffectMap dominators_; - int dominated_index_; - int length_; -}; - - -// This is a recursive traversal of the dominator tree but it has been turned -// into a loop to avoid stack overflows. -// The logical "stack frames" of the recursion are kept in a list of -// GvnBasicBlockState instances. -void HGlobalValueNumberingPhase::AnalyzeGraph() { - HBasicBlock* entry_block = graph()->entry_block(); - HInstructionMap* entry_map = - new(zone()) HInstructionMap(zone(), &side_effects_tracker_); - GvnBasicBlockState* current = - GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map); - - while (current != NULL) { - HBasicBlock* block = current->block(); - HInstructionMap* map = current->map(); - HSideEffectMap* dominators = current->dominators(); - - TRACE_GVN_2("Analyzing block B%d%s\n", - block->block_id(), - block->IsLoopHeader() ? " (loop header)" : ""); - - // If this is a loop header kill everything killed by the loop. - if (block->IsLoopHeader()) { - map->Kill(loop_side_effects_[block->block_id()]); - dominators->Kill(loop_side_effects_[block->block_id()]); - } - - // Go through all instructions of the current block. - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) { - for (int i = 0; i < kNumberOfTrackedSideEffects; i++) { - HValue* other = dominators->at(i); - GVNFlag flag = GVNFlagFromInt(i); - if (instr->DependsOnFlags().Contains(flag) && other != NULL) { - TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n", - i, - instr->id(), - instr->Mnemonic(), - other->id(), - other->Mnemonic()); - if (instr->HandleSideEffectDominator(flag, other)) { - removed_side_effects_ = true; - } - } - } - } - // Instruction was unlinked during graph traversal. - if (!instr->IsLinked()) continue; - - SideEffects changes = side_effects_tracker_.ComputeChanges(instr); - if (!changes.IsEmpty()) { - // Clear all instructions in the map that are affected by side effects. - // Store instruction as the dominating one for tracked side effects. - map->Kill(changes); - dominators->Store(changes, instr); - if (FLAG_trace_gvn) { - OFStream os(stdout); - os << "Instruction i" << instr->id() << " changes " << Print(changes) - << std::endl; - } - } - if (instr->CheckFlag(HValue::kUseGVN) && - !instr->CheckFlag(HValue::kCantBeReplaced)) { - DCHECK(!instr->HasObservableSideEffects()); - HInstruction* other = map->Lookup(instr); - if (other != NULL) { - DCHECK(instr->Equals(other) && other->Equals(instr)); - TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n", - instr->id(), - instr->Mnemonic(), - other->id(), - other->Mnemonic()); - if (instr->HasSideEffects()) removed_side_effects_ = true; - instr->DeleteAndReplaceWith(other); - } else { - map->Add(instr, zone()); - } - } - } - - HBasicBlock* dominator_block; - GvnBasicBlockState* next = - current->next_in_dominator_tree_traversal(zone(), - &dominator_block); - - if (next != NULL) { - HBasicBlock* dominated = next->block(); - HInstructionMap* successor_map = next->map(); - HSideEffectMap* successor_dominators = next->dominators(); - - // Kill everything killed on any path between this block and the - // dominated block. We don't have to traverse these paths if the - // value map and the dominators list is already empty. If the range - // of block ids (block_id, dominated_id) is empty there are no such - // paths. - if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) && - dominator_block->block_id() + 1 < dominated->block_id()) { - visited_on_paths_.Clear(); - SideEffects side_effects_on_all_paths = - CollectSideEffectsOnPathsToDominatedBlock(dominator_block, - dominated); - successor_map->Kill(side_effects_on_all_paths); - successor_dominators->Kill(side_effects_on_all_paths); - } - } - current = next; - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-gvn.h b/src/crankshaft/hydrogen-gvn.h deleted file mode 100644 index 5f11737dbc..0000000000 --- a/src/crankshaft/hydrogen-gvn.h +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_GVN_H_ -#define V8_CRANKSHAFT_HYDROGEN_GVN_H_ - -#include - -#include "src/crankshaft/hydrogen-instructions.h" -#include "src/crankshaft/hydrogen.h" -#include "src/zone/zone.h" - -namespace v8 { -namespace internal { - -// This class extends GVNFlagSet with additional "special" dynamic side effects, -// which can be used to represent side effects that cannot be expressed using -// the GVNFlags of an HInstruction. These special side effects are tracked by a -// SideEffectsTracker (see below). -class SideEffects final { - public: - static const int kNumberOfSpecials = 64 - kNumberOfFlags; - - SideEffects() : bits_(0) { - DCHECK(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT); - } - explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {} - bool IsEmpty() const { return bits_ == 0; } - bool ContainsFlag(GVNFlag flag) const { - return (bits_ & MaskFlag(flag)) != 0; - } - bool ContainsSpecial(int special) const { - return (bits_ & MaskSpecial(special)) != 0; - } - bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; } - void Add(SideEffects set) { bits_ |= set.bits_; } - void AddSpecial(int special) { bits_ |= MaskSpecial(special); } - void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); } - void RemoveAll() { bits_ = 0; } - uint64_t ToIntegral() const { return bits_; } - - private: - uint64_t MaskFlag(GVNFlag flag) const { - return static_cast(1) << static_cast(flag); - } - uint64_t MaskSpecial(int special) const { - DCHECK(special >= 0); - DCHECK(special < kNumberOfSpecials); - return static_cast(1) << static_cast( - special + kNumberOfFlags); - } - - uint64_t bits_; -}; - - -struct TrackedEffects; - -// Tracks global variable and inobject field loads/stores in a fine grained -// fashion, and represents them using the "special" dynamic side effects of the -// SideEffects class (see above). This way unrelated global variable/inobject -// field stores don't prevent hoisting and merging of global variable/inobject -// field loads. -class SideEffectsTracker final BASE_EMBEDDED { - public: - SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {} - SideEffects ComputeChanges(HInstruction* instr); - SideEffects ComputeDependsOn(HInstruction* instr); - - private: - friend std::ostream& operator<<(std::ostream& os, const TrackedEffects& f); - bool ComputeGlobalVar(Unique cell, int* index); - bool ComputeInobjectField(HObjectAccess access, int* index); - - static int GlobalVar(int index) { - DCHECK(index >= 0); - DCHECK(index < kNumberOfGlobalVars); - return index; - } - static int InobjectField(int index) { - DCHECK(index >= 0); - DCHECK(index < kNumberOfInobjectFields); - return index + kNumberOfGlobalVars; - } - - // Track up to four global vars. - static const int kNumberOfGlobalVars = 4; - Unique global_vars_[kNumberOfGlobalVars]; - int num_global_vars_; - - // Track up to n inobject fields. - static const int kNumberOfInobjectFields = - SideEffects::kNumberOfSpecials - kNumberOfGlobalVars; - HObjectAccess inobject_fields_[kNumberOfInobjectFields]; - int num_inobject_fields_; -}; - - -// Helper class for printing, because the effects don't know their tracker. -struct TrackedEffects { - TrackedEffects(SideEffectsTracker* t, SideEffects e) - : tracker(t), effects(e) {} - SideEffectsTracker* tracker; - SideEffects effects; -}; - - -std::ostream& operator<<(std::ostream& os, const TrackedEffects& f); - - -// Perform common subexpression elimination and loop-invariant code motion. -class HGlobalValueNumberingPhase final : public HPhase { - public: - explicit HGlobalValueNumberingPhase(HGraph* graph); - - void Run(); - - private: - SideEffects CollectSideEffectsOnPathsToDominatedBlock( - HBasicBlock* dominator, - HBasicBlock* dominated); - void AnalyzeGraph(); - void ComputeBlockSideEffects(); - void LoopInvariantCodeMotion(); - void ProcessLoopBlock(HBasicBlock* block, - HBasicBlock* before_loop, - SideEffects loop_kills); - bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header); - TrackedEffects Print(SideEffects side_effects) { - return TrackedEffects(&side_effects_tracker_, side_effects); - } - - SideEffectsTracker side_effects_tracker_; - bool removed_side_effects_; - - // A map of block IDs to their side effects. - ZoneList block_side_effects_; - - // A map of loop header block IDs to their loop's side effects. - ZoneList loop_side_effects_; - - // Used when collecting side effects on paths from dominator to - // dominated. - BitVector visited_on_paths_; - - DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase); -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_GVN_H_ diff --git a/src/crankshaft/hydrogen-infer-representation.cc b/src/crankshaft/hydrogen-infer-representation.cc deleted file mode 100644 index bbff24e5d1..0000000000 --- a/src/crankshaft/hydrogen-infer-representation.cc +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-infer-representation.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -void HInferRepresentationPhase::AddToWorklist(HValue* current) { - if (current->representation().IsTagged()) return; - if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return; - if (in_worklist_.Contains(current->id())) return; - worklist_.Add(current, zone()); - in_worklist_.Add(current->id()); -} - - -void HInferRepresentationPhase::Run() { - // (1) Initialize bit vectors and count real uses. Each phi gets a - // bit-vector of length . - const ZoneList* phi_list = graph()->phi_list(); - int phi_count = phi_list->length(); - ZoneList connected_phis(phi_count, zone()); - for (int i = 0; i < phi_count; ++i) { - phi_list->at(i)->InitRealUses(i); - BitVector* connected_set = new(zone()) BitVector(phi_count, zone()); - connected_set->Add(i); - connected_phis.Add(connected_set, zone()); - } - - // (2) Do a fixed point iteration to find the set of connected phis. A - // phi is connected to another phi if its value is used either directly or - // indirectly through a transitive closure of the def-use relation. - bool change = true; - while (change) { - change = false; - // We normally have far more "forward edges" than "backward edges", - // so we terminate faster when we walk backwards. - for (int i = phi_count - 1; i >= 0; --i) { - HPhi* phi = phi_list->at(i); - for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - if (use->IsPhi()) { - int id = HPhi::cast(use)->phi_id(); - if (connected_phis[i]->UnionIsChanged(*connected_phis[id])) - change = true; - } - } - } - } - - // Set truncation flags for groups of connected phis. This is a conservative - // approximation; the flag will be properly re-computed after representations - // have been determined. - if (phi_count > 0) { - BitVector done(phi_count, zone()); - for (int i = 0; i < phi_count; ++i) { - if (done.Contains(i)) continue; - - // Check if all uses of all connected phis in this group are truncating. - bool all_uses_everywhere_truncating_int32 = true; - bool all_uses_everywhere_truncating_smi = true; - for (BitVector::Iterator it(connected_phis[i]); - !it.Done(); - it.Advance()) { - int index = it.Current(); - all_uses_everywhere_truncating_int32 &= - phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32); - all_uses_everywhere_truncating_smi &= - phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToSmi); - done.Add(index); - } - - if (!all_uses_everywhere_truncating_int32) { - // Clear truncation flag of this group of connected phis. - for (BitVector::Iterator it(connected_phis[i]); - !it.Done(); - it.Advance()) { - int index = it.Current(); - phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32); - } - } - if (!all_uses_everywhere_truncating_smi) { - // Clear truncation flag of this group of connected phis. - for (BitVector::Iterator it(connected_phis[i]); - !it.Done(); - it.Advance()) { - int index = it.Current(); - phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToSmi); - } - } - } - } - - // Simplify constant phi inputs where possible. - // This step uses kTruncatingToInt32 flags of phis. - for (int i = 0; i < phi_count; ++i) { - phi_list->at(i)->SimplifyConstantInputs(); - } - - // Use the phi reachability information from step 2 to - // sum up the non-phi use counts of all connected phis. - for (int i = 0; i < phi_count; ++i) { - HPhi* phi = phi_list->at(i); - for (BitVector::Iterator it(connected_phis[i]); - !it.Done(); - it.Advance()) { - int index = it.Current(); - HPhi* it_use = phi_list->at(index); - if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice. - } - } - - // Initialize work list - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - const ZoneList* phis = block->phis(); - for (int j = 0; j < phis->length(); ++j) { - AddToWorklist(phis->at(j)); - } - - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* current = it.Current(); - AddToWorklist(current); - } - } - - // Do a fixed point iteration, trying to improve representations - while (!worklist_.is_empty()) { - HValue* current = worklist_.RemoveLast(); - current->InferRepresentation(this); - in_worklist_.Remove(current->id()); - } - - // Lastly: any instruction that we don't have representation information - // for defaults to Tagged. - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - const ZoneList* phis = block->phis(); - for (int j = 0; j < phis->length(); ++j) { - HPhi* phi = phis->at(j); - if (phi->representation().IsNone()) { - phi->ChangeRepresentation(Representation::Tagged()); - } - } - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* current = it.Current(); - if (current->representation().IsNone() && - current->CheckFlag(HInstruction::kFlexibleRepresentation)) { - if (current->CheckFlag(HInstruction::kCannotBeTagged)) { - current->ChangeRepresentation(Representation::Double()); - } else { - current->ChangeRepresentation(Representation::Tagged()); - } - } - } - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-infer-representation.h b/src/crankshaft/hydrogen-infer-representation.h deleted file mode 100644 index 92f2bc8c59..0000000000 --- a/src/crankshaft/hydrogen-infer-representation.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_ -#define V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HInferRepresentationPhase : public HPhase { - public: - explicit HInferRepresentationPhase(HGraph* graph) - : HPhase("H_Infer representations", graph), - worklist_(8, zone()), - in_worklist_(graph->GetMaximumValueID(), zone()) { } - - void Run(); - void AddToWorklist(HValue* current); - - private: - ZoneList worklist_; - BitVector in_worklist_; - - DISALLOW_COPY_AND_ASSIGN(HInferRepresentationPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_ diff --git a/src/crankshaft/hydrogen-infer-types.cc b/src/crankshaft/hydrogen-infer-types.cc deleted file mode 100644 index a2fd72e443..0000000000 --- a/src/crankshaft/hydrogen-infer-types.cc +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-infer-types.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -void HInferTypesPhase::InferTypes(int from_inclusive, int to_inclusive) { - for (int i = from_inclusive; i <= to_inclusive; ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - - const ZoneList* phis = block->phis(); - for (int j = 0; j < phis->length(); j++) { - phis->at(j)->UpdateInferredType(); - } - - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - it.Current()->UpdateInferredType(); - } - - if (block->IsLoopHeader()) { - HBasicBlock* last_back_edge = - block->loop_information()->GetLastBackEdge(); - InferTypes(i + 1, last_back_edge->block_id()); - // Skip all blocks already processed by the recursive call. - i = last_back_edge->block_id(); - // Update phis of the loop header now after the whole loop body is - // guaranteed to be processed. - for (int j = 0; j < block->phis()->length(); ++j) { - HPhi* phi = block->phis()->at(j); - worklist_.Add(phi, zone()); - in_worklist_.Add(phi->id()); - } - while (!worklist_.is_empty()) { - HValue* current = worklist_.RemoveLast(); - in_worklist_.Remove(current->id()); - if (current->UpdateInferredType()) { - for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - if (!in_worklist_.Contains(use->id())) { - in_worklist_.Add(use->id()); - worklist_.Add(use, zone()); - } - } - } - } - DCHECK(in_worklist_.IsEmpty()); - } - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-infer-types.h b/src/crankshaft/hydrogen-infer-types.h deleted file mode 100644 index 8acfeabd60..0000000000 --- a/src/crankshaft/hydrogen-infer-types.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_ -#define V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HInferTypesPhase : public HPhase { - public: - explicit HInferTypesPhase(HGraph* graph) - : HPhase("H_Inferring types", graph), worklist_(8, zone()), - in_worklist_(graph->GetMaximumValueID(), zone()) { } - - void Run() { - InferTypes(0, graph()->blocks()->length() - 1); - } - - private: - void InferTypes(int from_inclusive, int to_inclusive); - - ZoneList worklist_; - BitVector in_worklist_; - - DISALLOW_COPY_AND_ASSIGN(HInferTypesPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_ diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc deleted file mode 100644 index 951cd9b0b8..0000000000 --- a/src/crankshaft/hydrogen-instructions.cc +++ /dev/null @@ -1,4043 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-instructions.h" - -#include "src/base/bits.h" -#include "src/base/ieee754.h" -#include "src/base/safe_math.h" -#include "src/codegen.h" -#include "src/crankshaft/hydrogen-infer-representation.h" -#include "src/double.h" -#include "src/elements.h" -#include "src/factory.h" -#include "src/objects-inl.h" - -#if V8_TARGET_ARCH_IA32 -#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT -#elif V8_TARGET_ARCH_X64 -#include "src/crankshaft/x64/lithium-x64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM64 -#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM -#include "src/crankshaft/arm/lithium-arm.h" // NOLINT -#elif V8_TARGET_ARCH_PPC -#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS -#include "src/crankshaft/mips/lithium-mips.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS64 -#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_S390 -#include "src/crankshaft/s390/lithium-s390.h" // NOLINT -#elif V8_TARGET_ARCH_X87 -#include "src/crankshaft/x87/lithium-x87.h" // NOLINT -#else -#error Unsupported target architecture. -#endif - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - LInstruction* H##type::CompileToLithium(LChunkBuilder* builder) { \ - return builder->Do##type(this); \ - } -HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - -Representation RepresentationFromMachineType(MachineType type) { - if (type == MachineType::Int32()) { - return Representation::Integer32(); - } - - if (type == MachineType::TaggedSigned()) { - return Representation::Smi(); - } - - if (type == MachineType::Pointer()) { - return Representation::External(); - } - - return Representation::Tagged(); -} - -Isolate* HValue::isolate() const { - DCHECK(block() != NULL); - return block()->isolate(); -} - - -void HValue::AssumeRepresentation(Representation r) { - if (CheckFlag(kFlexibleRepresentation)) { - ChangeRepresentation(r); - // The representation of the value is dictated by type feedback and - // will not be changed later. - ClearFlag(kFlexibleRepresentation); - } -} - - -void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) { - DCHECK(CheckFlag(kFlexibleRepresentation)); - Representation new_rep = RepresentationFromInputs(); - UpdateRepresentation(new_rep, h_infer, "inputs"); - new_rep = RepresentationFromUses(); - UpdateRepresentation(new_rep, h_infer, "uses"); - if (representation().IsSmi() && HasNonSmiUse()) { - UpdateRepresentation( - Representation::Integer32(), h_infer, "use requirements"); - } -} - - -Representation HValue::RepresentationFromUses() { - if (HasNoUses()) return Representation::None(); - Representation result = Representation::None(); - - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - Representation rep = use->observed_input_representation(it.index()); - result = result.generalize(rep); - - if (FLAG_trace_representation) { - PrintF("#%d %s is used by #%d %s as %s%s\n", - id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(), - (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : "")); - } - } - if (IsPhi()) { - result = result.generalize( - HPhi::cast(this)->representation_from_indirect_uses()); - } - - // External representations are dealt with separately. - return result.IsExternal() ? Representation::None() : result; -} - - -void HValue::UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) { - Representation r = representation(); - if (new_rep.is_more_general_than(r)) { - if (CheckFlag(kCannotBeTagged) && new_rep.IsTagged()) return; - if (FLAG_trace_representation) { - PrintF("Changing #%d %s representation %s -> %s based on %s\n", - id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason); - } - ChangeRepresentation(new_rep); - AddDependantsToWorklist(h_infer); - } -} - - -void HValue::AddDependantsToWorklist(HInferRepresentationPhase* h_infer) { - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - h_infer->AddToWorklist(it.value()); - } - for (int i = 0; i < OperandCount(); ++i) { - h_infer->AddToWorklist(OperandAt(i)); - } -} - - -static int32_t ConvertAndSetOverflow(Representation r, - int64_t result, - bool* overflow) { - if (r.IsSmi()) { - if (result > Smi::kMaxValue) { - *overflow = true; - return Smi::kMaxValue; - } - if (result < Smi::kMinValue) { - *overflow = true; - return Smi::kMinValue; - } - } else { - if (result > kMaxInt) { - *overflow = true; - return kMaxInt; - } - if (result < kMinInt) { - *overflow = true; - return kMinInt; - } - } - return static_cast(result); -} - - -static int32_t AddWithoutOverflow(Representation r, - int32_t a, - int32_t b, - bool* overflow) { - int64_t result = static_cast(a) + static_cast(b); - return ConvertAndSetOverflow(r, result, overflow); -} - - -static int32_t SubWithoutOverflow(Representation r, - int32_t a, - int32_t b, - bool* overflow) { - int64_t result = static_cast(a) - static_cast(b); - return ConvertAndSetOverflow(r, result, overflow); -} - - -static int32_t MulWithoutOverflow(const Representation& r, - int32_t a, - int32_t b, - bool* overflow) { - int64_t result = static_cast(a) * static_cast(b); - return ConvertAndSetOverflow(r, result, overflow); -} - - -int32_t Range::Mask() const { - if (lower_ == upper_) return lower_; - if (lower_ >= 0) { - int32_t res = 1; - while (res < upper_) { - res = (res << 1) | 1; - } - return res; - } - return 0xffffffff; -} - - -void Range::AddConstant(int32_t value) { - if (value == 0) return; - bool may_overflow = false; // Overflow is ignored here. - Representation r = Representation::Integer32(); - lower_ = AddWithoutOverflow(r, lower_, value, &may_overflow); - upper_ = AddWithoutOverflow(r, upper_, value, &may_overflow); -#ifdef DEBUG - Verify(); -#endif -} - - -void Range::Intersect(Range* other) { - upper_ = Min(upper_, other->upper_); - lower_ = Max(lower_, other->lower_); - bool b = CanBeMinusZero() && other->CanBeMinusZero(); - set_can_be_minus_zero(b); -} - - -void Range::Union(Range* other) { - upper_ = Max(upper_, other->upper_); - lower_ = Min(lower_, other->lower_); - bool b = CanBeMinusZero() || other->CanBeMinusZero(); - set_can_be_minus_zero(b); -} - - -void Range::CombinedMax(Range* other) { - upper_ = Max(upper_, other->upper_); - lower_ = Max(lower_, other->lower_); - set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero()); -} - - -void Range::CombinedMin(Range* other) { - upper_ = Min(upper_, other->upper_); - lower_ = Min(lower_, other->lower_); - set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero()); -} - - -void Range::Sar(int32_t value) { - int32_t bits = value & 0x1F; - lower_ = lower_ >> bits; - upper_ = upper_ >> bits; - set_can_be_minus_zero(false); -} - - -void Range::Shl(int32_t value) { - int32_t bits = value & 0x1F; - int old_lower = lower_; - int old_upper = upper_; - lower_ = lower_ << bits; - upper_ = upper_ << bits; - if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) { - upper_ = kMaxInt; - lower_ = kMinInt; - } - set_can_be_minus_zero(false); -} - - -bool Range::AddAndCheckOverflow(const Representation& r, Range* other) { - bool may_overflow = false; - lower_ = AddWithoutOverflow(r, lower_, other->lower(), &may_overflow); - upper_ = AddWithoutOverflow(r, upper_, other->upper(), &may_overflow); - if (may_overflow) { - Clear(); - } else { - KeepOrder(); - } -#ifdef DEBUG - Verify(); -#endif - return may_overflow; -} - - -bool Range::SubAndCheckOverflow(const Representation& r, Range* other) { - bool may_overflow = false; - lower_ = SubWithoutOverflow(r, lower_, other->upper(), &may_overflow); - upper_ = SubWithoutOverflow(r, upper_, other->lower(), &may_overflow); - if (may_overflow) { - Clear(); - } else { - KeepOrder(); - } -#ifdef DEBUG - Verify(); -#endif - return may_overflow; -} - -void Range::Clear() { - lower_ = kMinInt; - upper_ = kMaxInt; -} - -void Range::KeepOrder() { - if (lower_ > upper_) { - int32_t tmp = lower_; - lower_ = upper_; - upper_ = tmp; - } -} - - -#ifdef DEBUG -void Range::Verify() const { - DCHECK(lower_ <= upper_); -} -#endif - - -bool Range::MulAndCheckOverflow(const Representation& r, Range* other) { - bool may_overflow = false; - int v1 = MulWithoutOverflow(r, lower_, other->lower(), &may_overflow); - int v2 = MulWithoutOverflow(r, lower_, other->upper(), &may_overflow); - int v3 = MulWithoutOverflow(r, upper_, other->lower(), &may_overflow); - int v4 = MulWithoutOverflow(r, upper_, other->upper(), &may_overflow); - if (may_overflow) { - Clear(); - } else { - lower_ = Min(Min(v1, v2), Min(v3, v4)); - upper_ = Max(Max(v1, v2), Max(v3, v4)); - } -#ifdef DEBUG - Verify(); -#endif - return may_overflow; -} - - -bool HValue::IsDefinedAfter(HBasicBlock* other) const { - return block()->block_id() > other->block_id(); -} - - -HUseListNode* HUseListNode::tail() { - // Skip and remove dead items in the use list. - while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) { - tail_ = tail_->tail_; - } - return tail_; -} - - -bool HValue::CheckUsesForFlag(Flag f) const { - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - if (it.value()->IsSimulate()) continue; - if (!it.value()->CheckFlag(f)) return false; - } - return true; -} - - -bool HValue::CheckUsesForFlag(Flag f, HValue** value) const { - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - if (it.value()->IsSimulate()) continue; - if (!it.value()->CheckFlag(f)) { - *value = it.value(); - return false; - } - } - return true; -} - - -bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const { - bool return_value = false; - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - if (it.value()->IsSimulate()) continue; - if (!it.value()->CheckFlag(f)) return false; - return_value = true; - } - return return_value; -} - - -HUseIterator::HUseIterator(HUseListNode* head) : next_(head) { - Advance(); -} - - -void HUseIterator::Advance() { - current_ = next_; - if (current_ != NULL) { - next_ = current_->tail(); - value_ = current_->value(); - index_ = current_->index(); - } -} - - -int HValue::UseCount() const { - int count = 0; - for (HUseIterator it(uses()); !it.Done(); it.Advance()) ++count; - return count; -} - - -HUseListNode* HValue::RemoveUse(HValue* value, int index) { - HUseListNode* previous = NULL; - HUseListNode* current = use_list_; - while (current != NULL) { - if (current->value() == value && current->index() == index) { - if (previous == NULL) { - use_list_ = current->tail(); - } else { - previous->set_tail(current->tail()); - } - break; - } - - previous = current; - current = current->tail(); - } - -#ifdef DEBUG - // Do not reuse use list nodes in debug mode, zap them. - if (current != NULL) { - HUseListNode* temp = - new(block()->zone()) - HUseListNode(current->value(), current->index(), NULL); - current->Zap(); - current = temp; - } -#endif - return current; -} - - -bool HValue::Equals(HValue* other) { - if (other->opcode() != opcode()) return false; - if (!other->representation().Equals(representation())) return false; - if (!other->type_.Equals(type_)) return false; - if (other->flags() != flags()) return false; - if (OperandCount() != other->OperandCount()) return false; - for (int i = 0; i < OperandCount(); ++i) { - if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false; - } - bool result = DataEquals(other); - DCHECK(!result || Hashcode() == other->Hashcode()); - return result; -} - - -intptr_t HValue::Hashcode() { - intptr_t result = opcode(); - int count = OperandCount(); - for (int i = 0; i < count; ++i) { - result = result * 19 + OperandAt(i)->id() + (result >> 7); - } - return result; -} - - -const char* HValue::Mnemonic() const { - switch (opcode()) { -#define MAKE_CASE(type) case k##type: return #type; - HYDROGEN_CONCRETE_INSTRUCTION_LIST(MAKE_CASE) -#undef MAKE_CASE - case kPhi: return "Phi"; - default: return ""; - } -} - - -bool HValue::CanReplaceWithDummyUses() { - return FLAG_unreachable_code_elimination && - !(block()->IsReachable() || - IsBlockEntry() || - IsControlInstruction() || - IsArgumentsObject() || - IsCapturedObject() || - IsSimulate() || - IsEnterInlined() || - IsLeaveInlined()); -} - - -bool HValue::IsInteger32Constant() { - return IsConstant() && HConstant::cast(this)->HasInteger32Value(); -} - - -int32_t HValue::GetInteger32Constant() { - return HConstant::cast(this)->Integer32Value(); -} - - -bool HValue::EqualsInteger32Constant(int32_t value) { - return IsInteger32Constant() && GetInteger32Constant() == value; -} - - -void HValue::SetOperandAt(int index, HValue* value) { - RegisterUse(index, value); - InternalSetOperandAt(index, value); -} - - -void HValue::DeleteAndReplaceWith(HValue* other) { - // We replace all uses first, so Delete can assert that there are none. - if (other != NULL) ReplaceAllUsesWith(other); - Kill(); - DeleteFromGraph(); -} - - -void HValue::ReplaceAllUsesWith(HValue* other) { - while (use_list_ != NULL) { - HUseListNode* list_node = use_list_; - HValue* value = list_node->value(); - DCHECK(!value->block()->IsStartBlock()); - value->InternalSetOperandAt(list_node->index(), other); - use_list_ = list_node->tail(); - list_node->set_tail(other->use_list_); - other->use_list_ = list_node; - } -} - - -void HValue::Kill() { - // Instead of going through the entire use list of each operand, we only - // check the first item in each use list and rely on the tail() method to - // skip dead items, removing them lazily next time we traverse the list. - SetFlag(kIsDead); - for (int i = 0; i < OperandCount(); ++i) { - HValue* operand = OperandAt(i); - if (operand == NULL) continue; - HUseListNode* first = operand->use_list_; - if (first != NULL && first->value()->CheckFlag(kIsDead)) { - operand->use_list_ = first->tail(); - } - } -} - - -void HValue::SetBlock(HBasicBlock* block) { - DCHECK(block_ == NULL || block == NULL); - block_ = block; - if (id_ == kNoNumber && block != NULL) { - id_ = block->graph()->GetNextValueID(this); - } -} - - -std::ostream& operator<<(std::ostream& os, const HValue& v) { - return v.PrintTo(os); -} - - -std::ostream& operator<<(std::ostream& os, const TypeOf& t) { - if (t.value->representation().IsTagged() && - !t.value->type().Equals(HType::Tagged())) - return os; - return os << " type:" << t.value->type(); -} - - -std::ostream& operator<<(std::ostream& os, const ChangesOf& c) { - GVNFlagSet changes_flags = c.value->ChangesFlags(); - if (changes_flags.IsEmpty()) return os; - os << " changes["; - if (changes_flags == c.value->AllSideEffectsFlagSet()) { - os << "*"; - } else { - bool add_comma = false; -#define PRINT_DO(Type) \ - if (changes_flags.Contains(k##Type)) { \ - if (add_comma) os << ","; \ - add_comma = true; \ - os << #Type; \ - } - GVN_TRACKED_FLAG_LIST(PRINT_DO); - GVN_UNTRACKED_FLAG_LIST(PRINT_DO); -#undef PRINT_DO - } - return os << "]"; -} - - -bool HValue::HasMonomorphicJSObjectType() { - return !GetMonomorphicJSObjectMap().is_null(); -} - - -bool HValue::UpdateInferredType() { - HType type = CalculateInferredType(); - bool result = (!type.Equals(type_)); - type_ = type; - return result; -} - - -void HValue::RegisterUse(int index, HValue* new_value) { - HValue* old_value = OperandAt(index); - if (old_value == new_value) return; - - HUseListNode* removed = NULL; - if (old_value != NULL) { - removed = old_value->RemoveUse(this, index); - } - - if (new_value != NULL) { - if (removed == NULL) { - new_value->use_list_ = new(new_value->block()->zone()) HUseListNode( - this, index, new_value->use_list_); - } else { - removed->set_tail(new_value->use_list_); - new_value->use_list_ = removed; - } - } -} - - -void HValue::AddNewRange(Range* r, Zone* zone) { - if (!HasRange()) ComputeInitialRange(zone); - if (!HasRange()) range_ = new(zone) Range(); - DCHECK(HasRange()); - r->StackUpon(range_); - range_ = r; -} - - -void HValue::RemoveLastAddedRange() { - DCHECK(HasRange()); - DCHECK(range_->next() != NULL); - range_ = range_->next(); -} - - -void HValue::ComputeInitialRange(Zone* zone) { - DCHECK(!HasRange()); - range_ = InferRange(zone); - DCHECK(HasRange()); -} - - -std::ostream& HInstruction::PrintTo(std::ostream& os) const { // NOLINT - os << Mnemonic() << " "; - PrintDataTo(os) << ChangesOf(this) << TypeOf(this); - if (CheckFlag(HValue::kHasNoObservableSideEffects)) os << " [noOSE]"; - if (CheckFlag(HValue::kIsDead)) os << " [dead]"; - return os; -} - - -std::ostream& HInstruction::PrintDataTo(std::ostream& os) const { // NOLINT - for (int i = 0; i < OperandCount(); ++i) { - if (i > 0) os << " "; - os << NameOf(OperandAt(i)); - } - return os; -} - - -void HInstruction::Unlink() { - DCHECK(IsLinked()); - DCHECK(!IsControlInstruction()); // Must never move control instructions. - DCHECK(!IsBlockEntry()); // Doesn't make sense to delete these. - DCHECK(previous_ != NULL); - previous_->next_ = next_; - if (next_ == NULL) { - DCHECK(block()->last() == this); - block()->set_last(previous_); - } else { - next_->previous_ = previous_; - } - clear_block(); -} - - -void HInstruction::InsertBefore(HInstruction* next) { - DCHECK(!IsLinked()); - DCHECK(!next->IsBlockEntry()); - DCHECK(!IsControlInstruction()); - DCHECK(!next->block()->IsStartBlock()); - DCHECK(next->previous_ != NULL); - HInstruction* prev = next->previous(); - prev->next_ = this; - next->previous_ = this; - next_ = next; - previous_ = prev; - SetBlock(next->block()); - if (!has_position() && next->has_position()) { - set_position(next->position()); - } -} - - -void HInstruction::InsertAfter(HInstruction* previous) { - DCHECK(!IsLinked()); - DCHECK(!previous->IsControlInstruction()); - DCHECK(!IsControlInstruction() || previous->next_ == NULL); - HBasicBlock* block = previous->block(); - // Never insert anything except constants into the start block after finishing - // it. - if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) { - DCHECK(block->end()->SecondSuccessor() == NULL); - InsertAfter(block->end()->FirstSuccessor()->first()); - return; - } - - // If we're inserting after an instruction with side-effects that is - // followed by a simulate instruction, we need to insert after the - // simulate instruction instead. - HInstruction* next = previous->next_; - if (previous->HasObservableSideEffects() && next != NULL) { - DCHECK(next->IsSimulate()); - previous = next; - next = previous->next_; - } - - previous_ = previous; - next_ = next; - SetBlock(block); - previous->next_ = this; - if (next != NULL) next->previous_ = this; - if (block->last() == previous) { - block->set_last(this); - } - if (!has_position() && previous->has_position()) { - set_position(previous->position()); - } -} - - -bool HInstruction::Dominates(HInstruction* other) { - if (block() != other->block()) { - return block()->Dominates(other->block()); - } - // Both instructions are in the same basic block. This instruction - // should precede the other one in order to dominate it. - for (HInstruction* instr = next(); instr != NULL; instr = instr->next()) { - if (instr == other) { - return true; - } - } - return false; -} - - -#ifdef DEBUG -void HInstruction::Verify() { - // Verify that input operands are defined before use. - HBasicBlock* cur_block = block(); - for (int i = 0; i < OperandCount(); ++i) { - HValue* other_operand = OperandAt(i); - if (other_operand == NULL) continue; - HBasicBlock* other_block = other_operand->block(); - if (cur_block == other_block) { - if (!other_operand->IsPhi()) { - HInstruction* cur = this->previous(); - while (cur != NULL) { - if (cur == other_operand) break; - cur = cur->previous(); - } - // Must reach other operand in the same block! - DCHECK(cur == other_operand); - } - } else { - // If the following assert fires, you may have forgotten an - // AddInstruction. - DCHECK(other_block->Dominates(cur_block)); - } - } - - // Verify that instructions that may have side-effects are followed - // by a simulate instruction. - if (HasObservableSideEffects() && !IsOsrEntry()) { - DCHECK(next()->IsSimulate()); - } - - // Verify that instructions that can be eliminated by GVN have overridden - // HValue::DataEquals. The default implementation is UNREACHABLE. We - // don't actually care whether DataEquals returns true or false here. - if (CheckFlag(kUseGVN)) DataEquals(this); - - // Verify that all uses are in the graph. - for (HUseIterator use = uses(); !use.Done(); use.Advance()) { - if (use.value()->IsInstruction()) { - DCHECK(HInstruction::cast(use.value())->IsLinked()); - } - } -} -#endif - - -bool HInstruction::CanDeoptimize() { - switch (opcode()) { - case HValue::kAbnormalExit: - case HValue::kAccessArgumentsAt: - case HValue::kAllocate: - case HValue::kArgumentsElements: - case HValue::kArgumentsLength: - case HValue::kArgumentsObject: - case HValue::kBlockEntry: - case HValue::kCallNewArray: - case HValue::kCapturedObject: - case HValue::kClassOfTestAndBranch: - case HValue::kCompareGeneric: - case HValue::kCompareHoleAndBranch: - case HValue::kCompareMap: - case HValue::kCompareNumericAndBranch: - case HValue::kCompareObjectEqAndBranch: - case HValue::kConstant: - case HValue::kContext: - case HValue::kDebugBreak: - case HValue::kDeclareGlobals: - case HValue::kDummyUse: - case HValue::kEnterInlined: - case HValue::kEnvironmentMarker: - case HValue::kForceRepresentation: - case HValue::kGoto: - case HValue::kHasInstanceTypeAndBranch: - case HValue::kInnerAllocatedObject: - case HValue::kIsSmiAndBranch: - case HValue::kIsStringAndBranch: - case HValue::kIsUndetectableAndBranch: - case HValue::kLeaveInlined: - case HValue::kLoadFieldByIndex: - case HValue::kLoadNamedField: - case HValue::kLoadRoot: - case HValue::kMathMinMax: - case HValue::kParameter: - case HValue::kPhi: - case HValue::kPushArguments: - case HValue::kReturn: - case HValue::kSeqStringGetChar: - case HValue::kStoreCodeEntry: - case HValue::kStoreKeyed: - case HValue::kStoreNamedField: - case HValue::kStringCharCodeAt: - case HValue::kStringCharFromCode: - case HValue::kThisFunction: - case HValue::kTypeofIsAndBranch: - case HValue::kUnknownOSRValue: - case HValue::kUseConst: - return false; - - case HValue::kAdd: - case HValue::kApplyArguments: - case HValue::kBitwise: - case HValue::kBoundsCheck: - case HValue::kBranch: - case HValue::kCallRuntime: - case HValue::kCallWithDescriptor: - case HValue::kChange: - case HValue::kCheckArrayBufferNotNeutered: - case HValue::kCheckHeapObject: - case HValue::kCheckInstanceType: - case HValue::kCheckMapValue: - case HValue::kCheckMaps: - case HValue::kCheckSmi: - case HValue::kCheckValue: - case HValue::kClampToUint8: - case HValue::kDeoptimize: - case HValue::kDiv: - case HValue::kForInCacheArray: - case HValue::kForInPrepareMap: - case HValue::kHasInPrototypeChainAndBranch: - case HValue::kInvokeFunction: - case HValue::kLoadContextSlot: - case HValue::kLoadFunctionPrototype: - case HValue::kLoadKeyed: - case HValue::kMathFloorOfDiv: - case HValue::kMaybeGrowElements: - case HValue::kMod: - case HValue::kMul: - case HValue::kOsrEntry: - case HValue::kPower: - case HValue::kPrologue: - case HValue::kRor: - case HValue::kSar: - case HValue::kSeqStringSetChar: - case HValue::kShl: - case HValue::kShr: - case HValue::kSimulate: - case HValue::kStackCheck: - case HValue::kStoreContextSlot: - case HValue::kStringAdd: - case HValue::kStringCompareAndBranch: - case HValue::kSub: - case HValue::kTransitionElementsKind: - case HValue::kTrapAllocationMemento: - case HValue::kTypeof: - case HValue::kUnaryMathOperation: - case HValue::kWrapReceiver: - return true; - } - UNREACHABLE(); -} - - -std::ostream& operator<<(std::ostream& os, const NameOf& v) { - return os << v.value->representation().Mnemonic() << v.value->id(); -} - -std::ostream& HDummyUse::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(value()); -} - - -std::ostream& HEnvironmentMarker::PrintDataTo( - std::ostream& os) const { // NOLINT - return os << (kind() == BIND ? "bind" : "lookup") << " var[" << index() - << "]"; -} - - -std::ostream& HUnaryCall::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(value()) << " #" << argument_count(); -} - - -std::ostream& HBinaryCall::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(first()) << " " << NameOf(second()) << " #" - << argument_count(); -} - -std::ostream& HInvokeFunction::PrintTo(std::ostream& os) const { // NOLINT - if (tail_call_mode() == TailCallMode::kAllow) os << "Tail"; - return HBinaryCall::PrintTo(os); -} - -std::ostream& HInvokeFunction::PrintDataTo(std::ostream& os) const { // NOLINT - HBinaryCall::PrintDataTo(os); - if (syntactic_tail_call_mode() == TailCallMode::kAllow) { - os << ", JSTailCall"; - } - return os; -} - -std::ostream& HBoundsCheck::PrintDataTo(std::ostream& os) const { // NOLINT - os << NameOf(index()) << " " << NameOf(length()); - if (base() != NULL && (offset() != 0 || scale() != 0)) { - os << " base: (("; - if (base() != index()) { - os << NameOf(index()); - } else { - os << "index"; - } - os << " + " << offset() << ") >> " << scale() << ")"; - } - if (skip_check()) os << " [DISABLED]"; - return os; -} - - -void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) { - DCHECK(CheckFlag(kFlexibleRepresentation)); - HValue* actual_index = index()->ActualValue(); - HValue* actual_length = length()->ActualValue(); - Representation index_rep = actual_index->representation(); - Representation length_rep = actual_length->representation(); - if (index_rep.IsTagged() && actual_index->type().IsSmi()) { - index_rep = Representation::Smi(); - } - if (length_rep.IsTagged() && actual_length->type().IsSmi()) { - length_rep = Representation::Smi(); - } - Representation r = index_rep.generalize(length_rep); - if (r.is_more_general_than(Representation::Integer32())) { - r = Representation::Integer32(); - } - UpdateRepresentation(r, h_infer, "boundscheck"); -} - - -Range* HBoundsCheck::InferRange(Zone* zone) { - Representation r = representation(); - if (r.IsSmiOrInteger32() && length()->HasRange()) { - int upper = length()->range()->upper() - (allow_equality() ? 0 : 1); - int lower = 0; - - Range* result = new(zone) Range(lower, upper); - if (index()->HasRange()) { - result->Intersect(index()->range()); - } - - // In case of Smi representation, clamp result to Smi::kMaxValue. - if (r.IsSmi()) result->ClampToSmi(); - return result; - } - return HValue::InferRange(zone); -} - - -std::ostream& HCallWithDescriptor::PrintDataTo( - std::ostream& os) const { // NOLINT - for (int i = 0; i < OperandCount(); i++) { - os << NameOf(OperandAt(i)) << " "; - } - os << "#" << argument_count(); - if (syntactic_tail_call_mode() == TailCallMode::kAllow) { - os << ", JSTailCall"; - } - return os; -} - - -std::ostream& HCallNewArray::PrintDataTo(std::ostream& os) const { // NOLINT - os << ElementsKindToString(elements_kind()) << " "; - return HBinaryCall::PrintDataTo(os); -} - - -std::ostream& HCallRuntime::PrintDataTo(std::ostream& os) const { // NOLINT - os << function()->name << " "; - if (save_doubles() == kSaveFPRegs) os << "[save doubles] "; - return os << "#" << argument_count(); -} - -std::ostream& HClassOfTestAndBranch::PrintDataTo( - std::ostream& os) const { // NOLINT - return os << "class_of_test(" << NameOf(value()) << ", \"" - << class_name()->ToCString().get() << "\")"; -} - -std::ostream& HWrapReceiver::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(receiver()) << " " << NameOf(function()); -} - - -std::ostream& HAccessArgumentsAt::PrintDataTo( - std::ostream& os) const { // NOLINT - return os << NameOf(arguments()) << "[" << NameOf(index()) << "], length " - << NameOf(length()); -} - - -std::ostream& HControlInstruction::PrintDataTo( - std::ostream& os) const { // NOLINT - os << " goto ("; - bool first_block = true; - for (HSuccessorIterator it(this); !it.Done(); it.Advance()) { - if (!first_block) os << ", "; - os << *it.Current(); - first_block = false; - } - return os << ")"; -} - - -std::ostream& HUnaryControlInstruction::PrintDataTo( - std::ostream& os) const { // NOLINT - os << NameOf(value()); - return HControlInstruction::PrintDataTo(os); -} - - -std::ostream& HReturn::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(value()) << " (pop " << NameOf(parameter_count()) - << " values)"; -} - - -Representation HBranch::observed_input_representation(int index) { - if (expected_input_types_ & - (ToBooleanHint::kNull | ToBooleanHint::kReceiver | - ToBooleanHint::kString | ToBooleanHint::kSymbol)) { - return Representation::Tagged(); - } - if (expected_input_types_ & ToBooleanHint::kUndefined) { - if (expected_input_types_ & ToBooleanHint::kHeapNumber) { - return Representation::Double(); - } - return Representation::Tagged(); - } - if (expected_input_types_ & ToBooleanHint::kHeapNumber) { - return Representation::Double(); - } - if (expected_input_types_ & ToBooleanHint::kSmallInteger) { - return Representation::Smi(); - } - return Representation::None(); -} - - -bool HBranch::KnownSuccessorBlock(HBasicBlock** block) { - HValue* value = this->value(); - if (value->EmitAtUses()) { - DCHECK(value->IsConstant()); - DCHECK(!value->representation().IsDouble()); - *block = HConstant::cast(value)->BooleanValue() - ? FirstSuccessor() - : SecondSuccessor(); - return true; - } - *block = NULL; - return false; -} - - -std::ostream& HBranch::PrintDataTo(std::ostream& os) const { // NOLINT - return HUnaryControlInstruction::PrintDataTo(os) << " " - << expected_input_types(); -} - - -std::ostream& HCompareMap::PrintDataTo(std::ostream& os) const { // NOLINT - os << NameOf(value()) << " (" << *map().handle() << ")"; - HControlInstruction::PrintDataTo(os); - if (known_successor_index() == 0) { - os << " [true]"; - } else if (known_successor_index() == 1) { - os << " [false]"; - } - return os; -} - - -const char* HUnaryMathOperation::OpName() const { - switch (op()) { - case kMathFloor: - return "floor"; - case kMathFround: - return "fround"; - case kMathRound: - return "round"; - case kMathAbs: - return "abs"; - case kMathCos: - return "cos"; - case kMathLog: - return "log"; - case kMathExp: - return "exp"; - case kMathSin: - return "sin"; - case kMathSqrt: - return "sqrt"; - case kMathPowHalf: - return "pow-half"; - case kMathClz32: - return "clz32"; - default: - UNREACHABLE(); - } -} - - -Range* HUnaryMathOperation::InferRange(Zone* zone) { - Representation r = representation(); - if (op() == kMathClz32) return new(zone) Range(0, 32); - if (r.IsSmiOrInteger32() && value()->HasRange()) { - if (op() == kMathAbs) { - int upper = value()->range()->upper(); - int lower = value()->range()->lower(); - bool spans_zero = value()->range()->CanBeZero(); - // Math.abs(kMinInt) overflows its representation, on which the - // instruction deopts. Hence clamp it to kMaxInt. - int abs_upper = upper == kMinInt ? kMaxInt : abs(upper); - int abs_lower = lower == kMinInt ? kMaxInt : abs(lower); - Range* result = - new(zone) Range(spans_zero ? 0 : Min(abs_lower, abs_upper), - Max(abs_lower, abs_upper)); - // In case of Smi representation, clamp Math.abs(Smi::kMinValue) to - // Smi::kMaxValue. - if (r.IsSmi()) result->ClampToSmi(); - return result; - } - } - return HValue::InferRange(zone); -} - - -std::ostream& HUnaryMathOperation::PrintDataTo( - std::ostream& os) const { // NOLINT - return os << OpName() << " " << NameOf(value()); -} - - -std::ostream& HUnaryOperation::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(value()); -} - - -std::ostream& HHasInstanceTypeAndBranch::PrintDataTo( - std::ostream& os) const { // NOLINT - os << NameOf(value()); - switch (from_) { - case FIRST_JS_RECEIVER_TYPE: - if (to_ == LAST_TYPE) os << " spec_object"; - break; - case JS_REGEXP_TYPE: - if (to_ == JS_REGEXP_TYPE) os << " reg_exp"; - break; - case JS_ARRAY_TYPE: - if (to_ == JS_ARRAY_TYPE) os << " array"; - break; - case JS_FUNCTION_TYPE: - if (to_ == JS_FUNCTION_TYPE) os << " function"; - break; - default: - break; - } - return os; -} - - -std::ostream& HTypeofIsAndBranch::PrintDataTo( - std::ostream& os) const { // NOLINT - os << NameOf(value()) << " == " << type_literal()->ToCString().get(); - return HControlInstruction::PrintDataTo(os); -} - - -namespace { - -String* TypeOfString(HConstant* constant, Isolate* isolate) { - Heap* heap = isolate->heap(); - if (constant->HasNumberValue()) return heap->number_string(); - if (constant->HasStringValue()) return heap->string_string(); - switch (constant->GetInstanceType()) { - case ODDBALL_TYPE: { - Unique unique = constant->GetUnique(); - if (unique.IsKnownGlobal(heap->true_value()) || - unique.IsKnownGlobal(heap->false_value())) { - return heap->boolean_string(); - } - if (unique.IsKnownGlobal(heap->null_value())) { - return heap->object_string(); - } - DCHECK(unique.IsKnownGlobal(heap->undefined_value())); - return heap->undefined_string(); - } - case SYMBOL_TYPE: - return heap->symbol_string(); - default: - if (constant->IsUndetectable()) return heap->undefined_string(); - if (constant->IsCallable()) return heap->function_string(); - return heap->object_string(); - } -} - -} // namespace - - -bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) { - if (FLAG_fold_constants && value()->IsConstant()) { - HConstant* constant = HConstant::cast(value()); - String* type_string = TypeOfString(constant, isolate()); - bool same_type = type_literal_.IsKnownGlobal(type_string); - *block = same_type ? FirstSuccessor() : SecondSuccessor(); - return true; - } else if (value()->representation().IsSpecialization()) { - bool number_type = - type_literal_.IsKnownGlobal(isolate()->heap()->number_string()); - *block = number_type ? FirstSuccessor() : SecondSuccessor(); - return true; - } - *block = NULL; - return false; -} - - -std::ostream& HCheckMapValue::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(value()) << " " << NameOf(map()); -} - - -HValue* HCheckMapValue::Canonicalize() { - if (map()->IsConstant()) { - HConstant* c_map = HConstant::cast(map()); - return HCheckMaps::CreateAndInsertAfter( - block()->graph()->zone(), value(), c_map->MapValue(), - c_map->HasStableMapValue(), this); - } - return this; -} - - -std::ostream& HForInPrepareMap::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(enumerable()); -} - - -std::ostream& HForInCacheArray::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(enumerable()) << " " << NameOf(map()) << "[" << idx_ - << "]"; -} - - -std::ostream& HLoadFieldByIndex::PrintDataTo( - std::ostream& os) const { // NOLINT - return os << NameOf(object()) << " " << NameOf(index()); -} - - -static bool MatchLeftIsOnes(HValue* l, HValue* r, HValue** negated) { - if (!l->EqualsInteger32Constant(~0)) return false; - *negated = r; - return true; -} - - -static bool MatchNegationViaXor(HValue* instr, HValue** negated) { - if (!instr->IsBitwise()) return false; - HBitwise* b = HBitwise::cast(instr); - return (b->op() == Token::BIT_XOR) && - (MatchLeftIsOnes(b->left(), b->right(), negated) || - MatchLeftIsOnes(b->right(), b->left(), negated)); -} - - -static bool MatchDoubleNegation(HValue* instr, HValue** arg) { - HValue* negated; - return MatchNegationViaXor(instr, &negated) && - MatchNegationViaXor(negated, arg); -} - - -HValue* HBitwise::Canonicalize() { - if (!representation().IsSmiOrInteger32()) return this; - // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x. - int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0; - if (left()->EqualsInteger32Constant(nop_constant) && - !right()->CheckFlag(kUint32)) { - return right(); - } - if (right()->EqualsInteger32Constant(nop_constant) && - !left()->CheckFlag(kUint32)) { - return left(); - } - // Optimize double negation, a common pattern used for ToInt32(x). - HValue* arg; - if (MatchDoubleNegation(this, &arg) && !arg->CheckFlag(kUint32)) { - return arg; - } - return this; -} - - -// static -HInstruction* HAdd::New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right, - ExternalAddType external_add_type) { - // For everything else, you should use the other factory method without - // ExternalAddType. - DCHECK_EQ(external_add_type, AddOfExternalAndTagged); - return new (zone) HAdd(context, left, right, external_add_type); -} - - -Representation HAdd::RepresentationFromInputs() { - Representation left_rep = left()->representation(); - if (left_rep.IsExternal()) { - return Representation::External(); - } - return HArithmeticBinaryOperation::RepresentationFromInputs(); -} - - -Representation HAdd::RequiredInputRepresentation(int index) { - if (index == 2) { - Representation left_rep = left()->representation(); - if (left_rep.IsExternal()) { - if (external_add_type_ == AddOfExternalAndTagged) { - return Representation::Tagged(); - } else { - return Representation::Integer32(); - } - } - } - return HArithmeticBinaryOperation::RequiredInputRepresentation(index); -} - - -static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) { - return arg1->representation().IsSpecialization() && - arg2->EqualsInteger32Constant(identity); -} - - -HValue* HAdd::Canonicalize() { - // Adding 0 is an identity operation except in case of -0: -0 + 0 = +0 - if (IsIdentityOperation(left(), right(), 0) && - !left()->representation().IsDouble()) { // Left could be -0. - return left(); - } - if (IsIdentityOperation(right(), left(), 0) && - !left()->representation().IsDouble()) { // Right could be -0. - return right(); - } - return this; -} - - -HValue* HSub::Canonicalize() { - if (IsIdentityOperation(left(), right(), 0)) return left(); - return this; -} - - -HValue* HMul::Canonicalize() { - if (IsIdentityOperation(left(), right(), 1)) return left(); - if (IsIdentityOperation(right(), left(), 1)) return right(); - return this; -} - - -bool HMul::MulMinusOne() { - if (left()->EqualsInteger32Constant(-1) || - right()->EqualsInteger32Constant(-1)) { - return true; - } - - return false; -} - - -HValue* HMod::Canonicalize() { - return this; -} - - -HValue* HDiv::Canonicalize() { - if (IsIdentityOperation(left(), right(), 1)) return left(); - return this; -} - - -HValue* HChange::Canonicalize() { - return (from().Equals(to())) ? value() : this; -} - - -HValue* HWrapReceiver::Canonicalize() { - if (HasNoUses()) return NULL; - if (receiver()->type().IsJSReceiver()) { - return receiver(); - } - return this; -} - - -std::ostream& HTypeof::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(value()); -} - - -HInstruction* HForceRepresentation::New(Isolate* isolate, Zone* zone, - HValue* context, HValue* value, - Representation representation) { - if (FLAG_fold_constants && value->IsConstant()) { - HConstant* c = HConstant::cast(value); - c = c->CopyToRepresentation(representation, zone); - if (c != NULL) return c; - } - return new(zone) HForceRepresentation(value, representation); -} - - -std::ostream& HForceRepresentation::PrintDataTo( - std::ostream& os) const { // NOLINT - return os << representation().Mnemonic() << " " << NameOf(value()); -} - - -std::ostream& HChange::PrintDataTo(std::ostream& os) const { // NOLINT - HUnaryOperation::PrintDataTo(os); - os << " " << from().Mnemonic() << " to " << to().Mnemonic(); - - if (CanTruncateToSmi()) os << " truncating-smi"; - if (CanTruncateToInt32()) os << " truncating-int32"; - if (CanTruncateToNumber()) os << " truncating-number"; - if (CheckFlag(kBailoutOnMinusZero)) os << " -0?"; - return os; -} - - -HValue* HUnaryMathOperation::Canonicalize() { - if (op() == kMathRound || op() == kMathFloor) { - HValue* val = value(); - if (val->IsChange()) val = HChange::cast(val)->value(); - if (val->representation().IsSmiOrInteger32()) { - if (val->representation().Equals(representation())) return val; - return Prepend(new (block()->zone()) - HChange(val, representation(), false, false, true)); - } - } - if (op() == kMathFloor && representation().IsSmiOrInteger32() && - value()->IsDiv() && value()->HasOneUse()) { - HDiv* hdiv = HDiv::cast(value()); - - HValue* left = hdiv->left(); - if (left->representation().IsInteger32() && !left->CheckFlag(kUint32)) { - // A value with an integer representation does not need to be transformed. - } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32() && - !HChange::cast(left)->value()->CheckFlag(kUint32)) { - // A change from an integer32 can be replaced by the integer32 value. - left = HChange::cast(left)->value(); - } else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) { - left = Prepend(new (block()->zone()) HChange( - left, Representation::Integer32(), false, false, true)); - } else { - return this; - } - - HValue* right = hdiv->right(); - if (right->IsInteger32Constant()) { - right = Prepend(HConstant::cast(right)->CopyToRepresentation( - Representation::Integer32(), right->block()->zone())); - } else if (right->representation().IsInteger32() && - !right->CheckFlag(kUint32)) { - // A value with an integer representation does not need to be transformed. - } else if (right->IsChange() && - HChange::cast(right)->from().IsInteger32() && - !HChange::cast(right)->value()->CheckFlag(kUint32)) { - // A change from an integer32 can be replaced by the integer32 value. - right = HChange::cast(right)->value(); - } else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) { - right = Prepend(new (block()->zone()) HChange( - right, Representation::Integer32(), false, false, true)); - } else { - return this; - } - - return Prepend(HMathFloorOfDiv::New( - block()->graph()->isolate(), block()->zone(), context(), left, right)); - } - return this; -} - - -HValue* HCheckInstanceType::Canonicalize() { - if ((check_ == IS_JS_RECEIVER && value()->type().IsJSReceiver()) || - (check_ == IS_JS_ARRAY && value()->type().IsJSArray()) || - (check_ == IS_STRING && value()->type().IsString())) { - return value(); - } - - if (check_ == IS_INTERNALIZED_STRING && value()->IsConstant()) { - if (HConstant::cast(value())->HasInternalizedStringValue()) { - return value(); - } - } - return this; -} - - -void HCheckInstanceType::GetCheckInterval(InstanceType* first, - InstanceType* last) { - DCHECK(is_interval_check()); - switch (check_) { - case IS_JS_RECEIVER: - *first = FIRST_JS_RECEIVER_TYPE; - *last = LAST_JS_RECEIVER_TYPE; - return; - case IS_JS_ARRAY: - *first = *last = JS_ARRAY_TYPE; - return; - case IS_JS_FUNCTION: - *first = *last = JS_FUNCTION_TYPE; - return; - case IS_JS_DATE: - *first = *last = JS_DATE_TYPE; - return; - default: - UNREACHABLE(); - } -} - - -void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) { - DCHECK(!is_interval_check()); - switch (check_) { - case IS_STRING: - *mask = kIsNotStringMask; - *tag = kStringTag; - return; - case IS_INTERNALIZED_STRING: - *mask = kIsNotStringMask | kIsNotInternalizedMask; - *tag = kInternalizedTag; - return; - default: - UNREACHABLE(); - } -} - - -std::ostream& HCheckMaps::PrintDataTo(std::ostream& os) const { // NOLINT - os << NameOf(value()) << " [" << *maps()->at(0).handle(); - for (int i = 1; i < maps()->size(); ++i) { - os << "," << *maps()->at(i).handle(); - } - os << "]"; - if (IsStabilityCheck()) os << "(stability-check)"; - return os; -} - - -HValue* HCheckMaps::Canonicalize() { - if (!IsStabilityCheck() && maps_are_stable() && value()->IsConstant()) { - HConstant* c_value = HConstant::cast(value()); - if (c_value->HasObjectMap()) { - for (int i = 0; i < maps()->size(); ++i) { - if (c_value->ObjectMap() == maps()->at(i)) { - if (maps()->size() > 1) { - set_maps(new(block()->graph()->zone()) UniqueSet( - maps()->at(i), block()->graph()->zone())); - } - MarkAsStabilityCheck(); - break; - } - } - } - } - return this; -} - - -std::ostream& HCheckValue::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(value()) << " " << Brief(*object().handle()); -} - - -HValue* HCheckValue::Canonicalize() { - return (value()->IsConstant() && - HConstant::cast(value())->EqualsUnique(object_)) ? NULL : this; -} - - -const char* HCheckInstanceType::GetCheckName() const { - switch (check_) { - case IS_JS_RECEIVER: return "object"; - case IS_JS_ARRAY: return "array"; - case IS_JS_FUNCTION: - return "function"; - case IS_JS_DATE: - return "date"; - case IS_STRING: return "string"; - case IS_INTERNALIZED_STRING: return "internalized_string"; - } - UNREACHABLE(); -} - - -std::ostream& HCheckInstanceType::PrintDataTo( - std::ostream& os) const { // NOLINT - os << GetCheckName() << " "; - return HUnaryOperation::PrintDataTo(os); -} - - -std::ostream& HUnknownOSRValue::PrintDataTo(std::ostream& os) const { // NOLINT - const char* type = "expression"; - if (environment_->is_local_index(index_)) type = "local"; - if (environment_->is_special_index(index_)) type = "special"; - if (environment_->is_parameter_index(index_)) type = "parameter"; - return os << type << " @ " << index_; -} - - -Range* HValue::InferRange(Zone* zone) { - Range* result; - if (representation().IsSmi() || type().IsSmi()) { - result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue); - result->set_can_be_minus_zero(false); - } else { - result = new(zone) Range(); - result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32)); - // TODO(jkummerow): The range cannot be minus zero when the upper type - // bound is Integer32. - } - return result; -} - - -Range* HChange::InferRange(Zone* zone) { - Range* input_range = value()->range(); - if (from().IsInteger32() && !value()->CheckFlag(HInstruction::kUint32) && - (to().IsSmi() || - (to().IsTagged() && - input_range != NULL && - input_range->IsInSmiRange()))) { - set_type(HType::Smi()); - ClearChangesFlag(kNewSpacePromotion); - } - if (to().IsSmiOrTagged() && - input_range != NULL && - input_range->IsInSmiRange() && - (!SmiValuesAre32Bits() || - !value()->CheckFlag(HValue::kUint32) || - input_range->upper() != kMaxInt)) { - // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32] - // interval, so we treat kMaxInt as a sentinel for this entire interval. - ClearFlag(kCanOverflow); - } - Range* result = (input_range != NULL) - ? input_range->Copy(zone) - : HValue::InferRange(zone); - result->set_can_be_minus_zero(!to().IsSmiOrInteger32() || - !(CheckFlag(kAllUsesTruncatingToInt32) || - CheckFlag(kAllUsesTruncatingToSmi))); - if (to().IsSmi()) result->ClampToSmi(); - return result; -} - - -Range* HConstant::InferRange(Zone* zone) { - if (HasInteger32Value()) { - Range* result = new(zone) Range(int32_value_, int32_value_); - result->set_can_be_minus_zero(false); - return result; - } - return HValue::InferRange(zone); -} - - -SourcePosition HPhi::position() const { return block()->first()->position(); } - - -Range* HPhi::InferRange(Zone* zone) { - Representation r = representation(); - if (r.IsSmiOrInteger32()) { - if (block()->IsLoopHeader()) { - Range* range = r.IsSmi() - ? new(zone) Range(Smi::kMinValue, Smi::kMaxValue) - : new(zone) Range(kMinInt, kMaxInt); - return range; - } else { - Range* range = OperandAt(0)->range()->Copy(zone); - for (int i = 1; i < OperandCount(); ++i) { - range->Union(OperandAt(i)->range()); - } - return range; - } - } else { - return HValue::InferRange(zone); - } -} - - -Range* HAdd::InferRange(Zone* zone) { - Representation r = representation(); - if (r.IsSmiOrInteger32()) { - Range* a = left()->range(); - Range* b = right()->range(); - Range* res = a->Copy(zone); - if (!res->AddAndCheckOverflow(r, b) || - (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) || - (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) { - ClearFlag(kCanOverflow); - } - res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) && - !CheckFlag(kAllUsesTruncatingToInt32) && - a->CanBeMinusZero() && b->CanBeMinusZero()); - return res; - } else { - return HValue::InferRange(zone); - } -} - - -Range* HSub::InferRange(Zone* zone) { - Representation r = representation(); - if (r.IsSmiOrInteger32()) { - Range* a = left()->range(); - Range* b = right()->range(); - Range* res = a->Copy(zone); - if (!res->SubAndCheckOverflow(r, b) || - (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) || - (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) { - ClearFlag(kCanOverflow); - } - res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) && - !CheckFlag(kAllUsesTruncatingToInt32) && - a->CanBeMinusZero() && b->CanBeZero()); - return res; - } else { - return HValue::InferRange(zone); - } -} - - -Range* HMul::InferRange(Zone* zone) { - Representation r = representation(); - if (r.IsSmiOrInteger32()) { - Range* a = left()->range(); - Range* b = right()->range(); - Range* res = a->Copy(zone); - if (!res->MulAndCheckOverflow(r, b) || - (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) || - (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) && - MulMinusOne())) { - // Truncated int multiplication is too precise and therefore not the - // same as converting to Double and back. - // Handle truncated integer multiplication by -1 special. - ClearFlag(kCanOverflow); - } - res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) && - !CheckFlag(kAllUsesTruncatingToInt32) && - ((a->CanBeZero() && b->CanBeNegative()) || - (a->CanBeNegative() && b->CanBeZero()))); - return res; - } else { - return HValue::InferRange(zone); - } -} - - -Range* HDiv::InferRange(Zone* zone) { - if (representation().IsInteger32()) { - Range* a = left()->range(); - Range* b = right()->range(); - Range* result = new(zone) Range(); - result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) && - (a->CanBeMinusZero() || - (a->CanBeZero() && b->CanBeNegative()))); - if (!a->Includes(kMinInt) || !b->Includes(-1)) { - ClearFlag(kCanOverflow); - } - - if (!b->CanBeZero()) { - ClearFlag(kCanBeDivByZero); - } - return result; - } else { - return HValue::InferRange(zone); - } -} - - -Range* HMathFloorOfDiv::InferRange(Zone* zone) { - if (representation().IsInteger32()) { - Range* a = left()->range(); - Range* b = right()->range(); - Range* result = new(zone) Range(); - result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) && - (a->CanBeMinusZero() || - (a->CanBeZero() && b->CanBeNegative()))); - if (!a->Includes(kMinInt)) { - ClearFlag(kLeftCanBeMinInt); - } - - if (!a->CanBeNegative()) { - ClearFlag(HValue::kLeftCanBeNegative); - } - - if (!a->CanBePositive()) { - ClearFlag(HValue::kLeftCanBePositive); - } - - if (!a->Includes(kMinInt) || !b->Includes(-1)) { - ClearFlag(kCanOverflow); - } - - if (!b->CanBeZero()) { - ClearFlag(kCanBeDivByZero); - } - return result; - } else { - return HValue::InferRange(zone); - } -} - - -// Returns the absolute value of its argument minus one, avoiding undefined -// behavior at kMinInt. -static int32_t AbsMinus1(int32_t a) { return a < 0 ? -(a + 1) : (a - 1); } - - -Range* HMod::InferRange(Zone* zone) { - if (representation().IsInteger32()) { - Range* a = left()->range(); - Range* b = right()->range(); - - // The magnitude of the modulus is bounded by the right operand. - int32_t positive_bound = Max(AbsMinus1(b->lower()), AbsMinus1(b->upper())); - - // The result of the modulo operation has the sign of its left operand. - bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative(); - Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0, - a->CanBePositive() ? positive_bound : 0); - - result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) && - left_can_be_negative); - - if (!a->CanBeNegative()) { - ClearFlag(HValue::kLeftCanBeNegative); - } - - if (!a->Includes(kMinInt) || !b->Includes(-1)) { - ClearFlag(HValue::kCanOverflow); - } - - if (!b->CanBeZero()) { - ClearFlag(HValue::kCanBeDivByZero); - } - return result; - } else { - return HValue::InferRange(zone); - } -} - - -Range* HMathMinMax::InferRange(Zone* zone) { - if (representation().IsSmiOrInteger32()) { - Range* a = left()->range(); - Range* b = right()->range(); - Range* res = a->Copy(zone); - if (operation_ == kMathMax) { - res->CombinedMax(b); - } else { - DCHECK(operation_ == kMathMin); - res->CombinedMin(b); - } - return res; - } else { - return HValue::InferRange(zone); - } -} - - -void HPushArguments::AddInput(HValue* value) { - inputs_.Add(NULL, value->block()->zone()); - SetOperandAt(OperandCount() - 1, value); -} - - -std::ostream& HPhi::PrintTo(std::ostream& os) const { // NOLINT - os << "["; - for (int i = 0; i < OperandCount(); ++i) { - os << " " << NameOf(OperandAt(i)) << " "; - } - return os << " uses" << UseCount() - << representation_from_indirect_uses().Mnemonic() << " " - << TypeOf(this) << "]"; -} - - -void HPhi::AddInput(HValue* value) { - inputs_.Add(NULL, value->block()->zone()); - SetOperandAt(OperandCount() - 1, value); - // Mark phis that may have 'arguments' directly or indirectly as an operand. - if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) { - SetFlag(kIsArguments); - } -} - - -bool HPhi::HasRealUses() { - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - if (!it.value()->IsPhi()) return true; - } - return false; -} - - -HValue* HPhi::GetRedundantReplacement() { - HValue* candidate = NULL; - int count = OperandCount(); - int position = 0; - while (position < count && candidate == NULL) { - HValue* current = OperandAt(position++); - if (current != this) candidate = current; - } - while (position < count) { - HValue* current = OperandAt(position++); - if (current != this && current != candidate) return NULL; - } - DCHECK(candidate != this); - return candidate; -} - - -void HPhi::DeleteFromGraph() { - DCHECK(block() != NULL); - block()->RemovePhi(this); - DCHECK(block() == NULL); -} - - -void HPhi::InitRealUses(int phi_id) { - // Initialize real uses. - phi_id_ = phi_id; - // Compute a conservative approximation of truncating uses before inferring - // representations. The proper, exact computation will be done later, when - // inserting representation changes. - SetFlag(kTruncatingToSmi); - SetFlag(kTruncatingToInt32); - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - HValue* value = it.value(); - if (!value->IsPhi()) { - Representation rep = value->observed_input_representation(it.index()); - representation_from_non_phi_uses_ = - representation_from_non_phi_uses().generalize(rep); - if (rep.IsSmi() || rep.IsInteger32() || rep.IsDouble()) { - has_type_feedback_from_uses_ = true; - } - - if (FLAG_trace_representation) { - PrintF("#%d Phi is used by real #%d %s as %s\n", - id(), value->id(), value->Mnemonic(), rep.Mnemonic()); - } - if (!value->IsSimulate()) { - if (!value->CheckFlag(kTruncatingToSmi)) { - ClearFlag(kTruncatingToSmi); - } - if (!value->CheckFlag(kTruncatingToInt32)) { - ClearFlag(kTruncatingToInt32); - } - } - } - } -} - - -void HPhi::AddNonPhiUsesFrom(HPhi* other) { - if (FLAG_trace_representation) { - PrintF( - "generalizing use representation '%s' of #%d Phi " - "with uses of #%d Phi '%s'\n", - representation_from_indirect_uses().Mnemonic(), id(), other->id(), - other->representation_from_non_phi_uses().Mnemonic()); - } - - representation_from_indirect_uses_ = - representation_from_indirect_uses().generalize( - other->representation_from_non_phi_uses()); -} - - -void HSimulate::MergeWith(ZoneList* list) { - while (!list->is_empty()) { - HSimulate* from = list->RemoveLast(); - ZoneList* from_values = &from->values_; - for (int i = 0; i < from_values->length(); ++i) { - if (from->HasAssignedIndexAt(i)) { - int index = from->GetAssignedIndexAt(i); - if (HasValueForIndex(index)) continue; - AddAssignedValue(index, from_values->at(i)); - } else { - if (pop_count_ > 0) { - pop_count_--; - } else { - AddPushedValue(from_values->at(i)); - } - } - } - pop_count_ += from->pop_count_; - from->DeleteAndReplaceWith(NULL); - } -} - - -std::ostream& HSimulate::PrintDataTo(std::ostream& os) const { // NOLINT - os << "id=" << ast_id().ToInt(); - if (pop_count_ > 0) os << " pop " << pop_count_; - if (values_.length() > 0) { - if (pop_count_ > 0) os << " /"; - for (int i = values_.length() - 1; i >= 0; --i) { - if (HasAssignedIndexAt(i)) { - os << " var[" << GetAssignedIndexAt(i) << "] = "; - } else { - os << " push "; - } - os << NameOf(values_[i]); - if (i > 0) os << ","; - } - } - return os; -} - - -void HSimulate::ReplayEnvironment(HEnvironment* env) { - if (is_done_with_replay()) return; - DCHECK(env != NULL); - env->set_ast_id(ast_id()); - env->Drop(pop_count()); - for (int i = values()->length() - 1; i >= 0; --i) { - HValue* value = values()->at(i); - if (HasAssignedIndexAt(i)) { - env->Bind(GetAssignedIndexAt(i), value); - } else { - env->Push(value); - } - } - set_done_with_replay(); -} - - -static void ReplayEnvironmentNested(const ZoneList* values, - HCapturedObject* other) { - for (int i = 0; i < values->length(); ++i) { - HValue* value = values->at(i); - if (value->IsCapturedObject()) { - if (HCapturedObject::cast(value)->capture_id() == other->capture_id()) { - values->at(i) = other; - } else { - ReplayEnvironmentNested(HCapturedObject::cast(value)->values(), other); - } - } - } -} - - -// Replay captured objects by replacing all captured objects with the -// same capture id in the current and all outer environments. -void HCapturedObject::ReplayEnvironment(HEnvironment* env) { - DCHECK(env != NULL); - while (env != NULL) { - ReplayEnvironmentNested(env->values(), this); - env = env->outer(); - } -} - - -std::ostream& HCapturedObject::PrintDataTo(std::ostream& os) const { // NOLINT - os << "#" << capture_id() << " "; - return HDematerializedObject::PrintDataTo(os); -} - - -std::ostream& HEnterInlined::PrintDataTo(std::ostream& os) const { // NOLINT - os << function()->debug_name()->ToCString().get(); - if (syntactic_tail_call_mode() == TailCallMode::kAllow) { - os << ", JSTailCall"; - } - return os; -} - - -static bool IsInteger32(double value) { - if (value >= std::numeric_limits::min() && - value <= std::numeric_limits::max()) { - double roundtrip_value = static_cast(static_cast(value)); - return bit_cast(roundtrip_value) == bit_cast(value); - } - return false; -} - - -HConstant::HConstant(Special special) - : HTemplateInstruction<0>(HType::TaggedNumber()), - object_(Handle::null()), - object_map_(Handle::null()), - bit_field_(HasDoubleValueField::encode(true) | - InstanceTypeField::encode(kUnknownInstanceType)), - int32_value_(0) { - DCHECK_EQ(kHoleNaN, special); - // Manipulating the signaling NaN used for the hole in C++, e.g. with bit_cast - // will change its value on ia32 (the x87 stack is used to return values - // and stores to the stack silently clear the signalling bit). - // Therefore we have to use memcpy for initializing |double_value_| with - // kHoleNanInt64 here. - std::memcpy(&double_value_, &kHoleNanInt64, sizeof(double_value_)); - Initialize(Representation::Double()); -} - - -HConstant::HConstant(Handle object, Representation r) - : HTemplateInstruction<0>(HType::FromValue(object)), - object_(Unique::CreateUninitialized(object)), - object_map_(Handle::null()), - bit_field_( - HasStableMapValueField::encode(false) | - HasSmiValueField::encode(false) | HasInt32ValueField::encode(false) | - HasDoubleValueField::encode(false) | - HasExternalReferenceValueField::encode(false) | - IsNotInNewSpaceField::encode(true) | - BooleanValueField::encode(object->BooleanValue()) | - IsUndetectableField::encode(false) | IsCallableField::encode(false) | - InstanceTypeField::encode(kUnknownInstanceType)) { - if (object->IsNumber()) { - double n = object->Number(); - bool has_int32_value = IsInteger32(n); - bit_field_ = HasInt32ValueField::update(bit_field_, has_int32_value); - int32_value_ = DoubleToInt32(n); - bit_field_ = HasSmiValueField::update( - bit_field_, has_int32_value && Smi::IsValid(int32_value_)); - if (std::isnan(n)) { - double_value_ = std::numeric_limits::quiet_NaN(); - // Canonicalize object with NaN value. - DCHECK(object->IsHeapObject()); // NaN can't be a Smi. - Isolate* isolate = HeapObject::cast(*object)->GetIsolate(); - object = isolate->factory()->nan_value(); - object_ = Unique::CreateUninitialized(object); - } else { - double_value_ = n; - // Canonicalize object with -0.0 value. - if (bit_cast(n) == bit_cast(-0.0)) { - DCHECK(object->IsHeapObject()); // -0.0 can't be a Smi. - Isolate* isolate = HeapObject::cast(*object)->GetIsolate(); - object = isolate->factory()->minus_zero_value(); - object_ = Unique::CreateUninitialized(object); - } - } - bit_field_ = HasDoubleValueField::update(bit_field_, true); - } - if (object->IsHeapObject()) { - Handle heap_object = Handle::cast(object); - Isolate* isolate = heap_object->GetIsolate(); - Handle map(heap_object->map(), isolate); - bit_field_ = IsNotInNewSpaceField::update( - bit_field_, !isolate->heap()->InNewSpace(*object)); - bit_field_ = InstanceTypeField::update(bit_field_, map->instance_type()); - bit_field_ = - IsUndetectableField::update(bit_field_, map->is_undetectable()); - bit_field_ = IsCallableField::update(bit_field_, map->is_callable()); - if (map->is_stable()) object_map_ = Unique::CreateImmovable(map); - bit_field_ = HasStableMapValueField::update( - bit_field_, - HasMapValue() && Handle::cast(heap_object)->is_stable()); - } - - Initialize(r); -} - - -HConstant::HConstant(Unique object, Unique object_map, - bool has_stable_map_value, Representation r, HType type, - bool is_not_in_new_space, bool boolean_value, - bool is_undetectable, InstanceType instance_type) - : HTemplateInstruction<0>(type), - object_(object), - object_map_(object_map), - bit_field_(HasStableMapValueField::encode(has_stable_map_value) | - HasSmiValueField::encode(false) | - HasInt32ValueField::encode(false) | - HasDoubleValueField::encode(false) | - HasExternalReferenceValueField::encode(false) | - IsNotInNewSpaceField::encode(is_not_in_new_space) | - BooleanValueField::encode(boolean_value) | - IsUndetectableField::encode(is_undetectable) | - InstanceTypeField::encode(instance_type)) { - DCHECK(!object.handle().is_null()); - DCHECK(!type.IsTaggedNumber() || type.IsNone()); - Initialize(r); -} - - -HConstant::HConstant(int32_t integer_value, Representation r, - bool is_not_in_new_space, Unique object) - : object_(object), - object_map_(Handle::null()), - bit_field_(HasStableMapValueField::encode(false) | - HasSmiValueField::encode(Smi::IsValid(integer_value)) | - HasInt32ValueField::encode(true) | - HasDoubleValueField::encode(true) | - HasExternalReferenceValueField::encode(false) | - IsNotInNewSpaceField::encode(is_not_in_new_space) | - BooleanValueField::encode(integer_value != 0) | - IsUndetectableField::encode(false) | - InstanceTypeField::encode(kUnknownInstanceType)), - int32_value_(integer_value), - double_value_(FastI2D(integer_value)) { - // It's possible to create a constant with a value in Smi-range but stored - // in a (pre-existing) HeapNumber. See crbug.com/349878. - bool could_be_heapobject = r.IsTagged() && !object.handle().is_null(); - bool is_smi = HasSmiValue() && !could_be_heapobject; - set_type(is_smi ? HType::Smi() : HType::TaggedNumber()); - Initialize(r); -} - -HConstant::HConstant(double double_value, Representation r, - bool is_not_in_new_space, Unique object) - : object_(object), - object_map_(Handle::null()), - bit_field_(HasStableMapValueField::encode(false) | - HasInt32ValueField::encode(IsInteger32(double_value)) | - HasDoubleValueField::encode(true) | - HasExternalReferenceValueField::encode(false) | - IsNotInNewSpaceField::encode(is_not_in_new_space) | - BooleanValueField::encode(double_value != 0 && - !std::isnan(double_value)) | - IsUndetectableField::encode(false) | - InstanceTypeField::encode(kUnknownInstanceType)), - int32_value_(DoubleToInt32(double_value)) { - bit_field_ = HasSmiValueField::update( - bit_field_, HasInteger32Value() && Smi::IsValid(int32_value_)); - // It's possible to create a constant with a value in Smi-range but stored - // in a (pre-existing) HeapNumber. See crbug.com/349878. - bool could_be_heapobject = r.IsTagged() && !object.handle().is_null(); - bool is_smi = HasSmiValue() && !could_be_heapobject; - set_type(is_smi ? HType::Smi() : HType::TaggedNumber()); - if (std::isnan(double_value)) { - double_value_ = std::numeric_limits::quiet_NaN(); - } else { - double_value_ = double_value; - } - Initialize(r); -} - - -HConstant::HConstant(ExternalReference reference) - : HTemplateInstruction<0>(HType::Any()), - object_(Unique(Handle::null())), - object_map_(Handle::null()), - bit_field_( - HasStableMapValueField::encode(false) | - HasSmiValueField::encode(false) | HasInt32ValueField::encode(false) | - HasDoubleValueField::encode(false) | - HasExternalReferenceValueField::encode(true) | - IsNotInNewSpaceField::encode(true) | BooleanValueField::encode(true) | - IsUndetectableField::encode(false) | - InstanceTypeField::encode(kUnknownInstanceType)), - external_reference_value_(reference) { - Initialize(Representation::External()); -} - - -void HConstant::Initialize(Representation r) { - if (r.IsNone()) { - if (HasSmiValue() && SmiValuesAre31Bits()) { - r = Representation::Smi(); - } else if (HasInteger32Value()) { - r = Representation::Integer32(); - } else if (HasDoubleValue()) { - r = Representation::Double(); - } else if (HasExternalReferenceValue()) { - r = Representation::External(); - } else { - Handle object = object_.handle(); - if (object->IsJSObject()) { - // Try to eagerly migrate JSObjects that have deprecated maps. - Handle js_object = Handle::cast(object); - if (js_object->map()->is_deprecated()) { - JSObject::TryMigrateInstance(js_object); - } - } - r = Representation::Tagged(); - } - } - if (r.IsSmi()) { - // If we have an existing handle, zap it, because it might be a heap - // number which we must not re-use when copying this HConstant to - // Tagged representation later, because having Smi representation now - // could cause heap object checks not to get emitted. - object_ = Unique(Handle::null()); - } - if (r.IsSmiOrInteger32() && object_.handle().is_null()) { - // If it's not a heap object, it can't be in new space. - bit_field_ = IsNotInNewSpaceField::update(bit_field_, true); - } - set_representation(r); - SetFlag(kUseGVN); -} - - -bool HConstant::ImmortalImmovable() const { - if (HasInteger32Value()) { - return false; - } - if (HasDoubleValue()) { - if (IsSpecialDouble()) { - return true; - } - return false; - } - if (HasExternalReferenceValue()) { - return false; - } - - DCHECK(!object_.handle().is_null()); - Heap* heap = isolate()->heap(); - DCHECK(!object_.IsKnownGlobal(heap->minus_zero_value())); - DCHECK(!object_.IsKnownGlobal(heap->nan_value())); - return -#define IMMORTAL_IMMOVABLE_ROOT(name) \ - object_.IsKnownGlobal(heap->root(Heap::k##name##RootIndex)) || - IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT) -#undef IMMORTAL_IMMOVABLE_ROOT -#define INTERNALIZED_STRING(name, value) \ - object_.IsKnownGlobal(heap->name()) || - INTERNALIZED_STRING_LIST(INTERNALIZED_STRING) -#undef INTERNALIZED_STRING -#define STRING_TYPE(NAME, size, name, Name) \ - object_.IsKnownGlobal(heap->name##_map()) || - STRING_TYPE_LIST(STRING_TYPE) -#undef STRING_TYPE - false; -} - - -bool HConstant::EmitAtUses() { - DCHECK(IsLinked()); - if (HasNoUses()) return true; - if (IsCell()) return false; - if (representation().IsDouble()) return false; - if (representation().IsExternal()) return false; - return true; -} - - -HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const { - if (r.IsSmi() && !HasSmiValue()) return NULL; - if (r.IsInteger32() && !HasInteger32Value()) return NULL; - if (r.IsDouble() && !HasDoubleValue()) return NULL; - if (r.IsExternal() && !HasExternalReferenceValue()) return NULL; - if (HasInteger32Value()) { - return new (zone) HConstant(int32_value_, r, NotInNewSpace(), object_); - } - if (HasDoubleValue()) { - return new (zone) HConstant(double_value_, r, NotInNewSpace(), object_); - } - if (HasExternalReferenceValue()) { - return new(zone) HConstant(external_reference_value_); - } - DCHECK(!object_.handle().is_null()); - return new (zone) HConstant(object_, object_map_, HasStableMapValue(), r, - type_, NotInNewSpace(), BooleanValue(), - IsUndetectable(), GetInstanceType()); -} - - -Maybe HConstant::CopyToTruncatedInt32(Zone* zone) { - HConstant* res = NULL; - if (HasInteger32Value()) { - res = new (zone) HConstant(int32_value_, Representation::Integer32(), - NotInNewSpace(), object_); - } else if (HasDoubleValue()) { - res = new (zone) - HConstant(DoubleToInt32(double_value_), Representation::Integer32(), - NotInNewSpace(), object_); - } - return res != NULL ? Just(res) : Nothing(); -} - - -Maybe HConstant::CopyToTruncatedNumber(Isolate* isolate, - Zone* zone) { - HConstant* res = NULL; - Handle handle = this->handle(isolate); - if (handle->IsBoolean()) { - res = handle->BooleanValue() ? - new(zone) HConstant(1) : new(zone) HConstant(0); - } else if (handle->IsUndefined(isolate)) { - res = new (zone) HConstant(std::numeric_limits::quiet_NaN()); - } else if (handle->IsNull(isolate)) { - res = new(zone) HConstant(0); - } else if (handle->IsString()) { - res = new(zone) HConstant(String::ToNumber(Handle::cast(handle))); - } - return res != NULL ? Just(res) : Nothing(); -} - - -std::ostream& HConstant::PrintDataTo(std::ostream& os) const { // NOLINT - if (HasInteger32Value()) { - os << int32_value_ << " "; - } else if (HasDoubleValue()) { - os << double_value_ << " "; - } else if (HasExternalReferenceValue()) { - os << reinterpret_cast(external_reference_value_.address()) << " "; - } else { - // The handle() method is silently and lazily mutating the object. - Handle h = const_cast(this)->handle(isolate()); - os << Brief(*h) << " "; - if (HasStableMapValue()) os << "[stable-map] "; - if (HasObjectMap()) os << "[map " << *ObjectMap().handle() << "] "; - } - if (!NotInNewSpace()) os << "[new space] "; - return os; -} - - -std::ostream& HBinaryOperation::PrintDataTo(std::ostream& os) const { // NOLINT - os << NameOf(left()) << " " << NameOf(right()); - if (CheckFlag(kCanOverflow)) os << " !"; - if (CheckFlag(kBailoutOnMinusZero)) os << " -0?"; - return os; -} - - -void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) { - DCHECK(CheckFlag(kFlexibleRepresentation)); - Representation new_rep = RepresentationFromInputs(); - UpdateRepresentation(new_rep, h_infer, "inputs"); - - if (representation().IsSmi() && HasNonSmiUse()) { - UpdateRepresentation( - Representation::Integer32(), h_infer, "use requirements"); - } - - if (observed_output_representation_.IsNone()) { - new_rep = RepresentationFromUses(); - UpdateRepresentation(new_rep, h_infer, "uses"); - } else { - new_rep = RepresentationFromOutput(); - UpdateRepresentation(new_rep, h_infer, "output"); - } -} - - -Representation HBinaryOperation::RepresentationFromInputs() { - // Determine the worst case of observed input representations and - // the currently assumed output representation. - Representation rep = representation(); - for (int i = 1; i <= 2; ++i) { - rep = rep.generalize(observed_input_representation(i)); - } - // If any of the actual input representation is more general than what we - // have so far but not Tagged, use that representation instead. - Representation left_rep = left()->representation(); - Representation right_rep = right()->representation(); - if (!left_rep.IsTagged()) rep = rep.generalize(left_rep); - if (!right_rep.IsTagged()) rep = rep.generalize(right_rep); - - return rep; -} - - -bool HBinaryOperation::IgnoreObservedOutputRepresentation( - Representation current_rep) { - return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) || - (current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) && - // Mul in Integer32 mode would be too precise. - (!this->IsMul() || HMul::cast(this)->MulMinusOne()); -} - - -Representation HBinaryOperation::RepresentationFromOutput() { - Representation rep = representation(); - // Consider observed output representation, but ignore it if it's Double, - // this instruction is not a division, and all its uses are truncating - // to Integer32. - if (observed_output_representation_.is_more_general_than(rep) && - !IgnoreObservedOutputRepresentation(rep)) { - return observed_output_representation_; - } - return Representation::None(); -} - - -void HBinaryOperation::AssumeRepresentation(Representation r) { - set_observed_input_representation(1, r); - set_observed_input_representation(2, r); - HValue::AssumeRepresentation(r); -} - - -void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) { - DCHECK(CheckFlag(kFlexibleRepresentation)); - Representation new_rep = RepresentationFromInputs(); - UpdateRepresentation(new_rep, h_infer, "inputs"); - // Do not care about uses. -} - - -Range* HBitwise::InferRange(Zone* zone) { - if (op() == Token::BIT_XOR) { - if (left()->HasRange() && right()->HasRange()) { - // The maximum value has the high bit, and all bits below, set: - // (1 << high) - 1. - // If the range can be negative, the minimum int is a negative number with - // the high bit, and all bits below, unset: - // -(1 << high). - // If it cannot be negative, conservatively choose 0 as minimum int. - int64_t left_upper = left()->range()->upper(); - int64_t left_lower = left()->range()->lower(); - int64_t right_upper = right()->range()->upper(); - int64_t right_lower = right()->range()->lower(); - - if (left_upper < 0) left_upper = ~left_upper; - if (left_lower < 0) left_lower = ~left_lower; - if (right_upper < 0) right_upper = ~right_upper; - if (right_lower < 0) right_lower = ~right_lower; - - int high = MostSignificantBit( - static_cast( - left_upper | left_lower | right_upper | right_lower)); - - int64_t limit = 1; - limit <<= high; - int32_t min = (left()->range()->CanBeNegative() || - right()->range()->CanBeNegative()) - ? static_cast(-limit) : 0; - return new(zone) Range(min, static_cast(limit - 1)); - } - Range* result = HValue::InferRange(zone); - result->set_can_be_minus_zero(false); - return result; - } - const int32_t kDefaultMask = static_cast(0xffffffff); - int32_t left_mask = (left()->range() != NULL) - ? left()->range()->Mask() - : kDefaultMask; - int32_t right_mask = (right()->range() != NULL) - ? right()->range()->Mask() - : kDefaultMask; - int32_t result_mask = (op() == Token::BIT_AND) - ? left_mask & right_mask - : left_mask | right_mask; - if (result_mask >= 0) return new(zone) Range(0, result_mask); - - Range* result = HValue::InferRange(zone); - result->set_can_be_minus_zero(false); - return result; -} - - -Range* HSar::InferRange(Zone* zone) { - if (right()->IsConstant()) { - HConstant* c = HConstant::cast(right()); - if (c->HasInteger32Value()) { - Range* result = (left()->range() != NULL) - ? left()->range()->Copy(zone) - : new(zone) Range(); - result->Sar(c->Integer32Value()); - return result; - } - } - return HValue::InferRange(zone); -} - - -Range* HShr::InferRange(Zone* zone) { - if (right()->IsConstant()) { - HConstant* c = HConstant::cast(right()); - if (c->HasInteger32Value()) { - int shift_count = c->Integer32Value() & 0x1f; - if (left()->range()->CanBeNegative()) { - // Only compute bounds if the result always fits into an int32. - return (shift_count >= 1) - ? new(zone) Range(0, - static_cast(0xffffffff) >> shift_count) - : new(zone) Range(); - } else { - // For positive inputs we can use the >> operator. - Range* result = (left()->range() != NULL) - ? left()->range()->Copy(zone) - : new(zone) Range(); - result->Sar(c->Integer32Value()); - return result; - } - } - } - return HValue::InferRange(zone); -} - - -Range* HShl::InferRange(Zone* zone) { - if (right()->IsConstant()) { - HConstant* c = HConstant::cast(right()); - if (c->HasInteger32Value()) { - Range* result = (left()->range() != NULL) - ? left()->range()->Copy(zone) - : new(zone) Range(); - result->Shl(c->Integer32Value()); - return result; - } - } - return HValue::InferRange(zone); -} - - -Range* HLoadNamedField::InferRange(Zone* zone) { - if (access().representation().IsInteger8()) { - return new(zone) Range(kMinInt8, kMaxInt8); - } - if (access().representation().IsUInteger8()) { - return new(zone) Range(kMinUInt8, kMaxUInt8); - } - if (access().representation().IsInteger16()) { - return new(zone) Range(kMinInt16, kMaxInt16); - } - if (access().representation().IsUInteger16()) { - return new(zone) Range(kMinUInt16, kMaxUInt16); - } - if (access().IsStringLength()) { - return new(zone) Range(0, String::kMaxLength); - } - return HValue::InferRange(zone); -} - - -Range* HLoadKeyed::InferRange(Zone* zone) { - switch (elements_kind()) { - case INT8_ELEMENTS: - return new(zone) Range(kMinInt8, kMaxInt8); - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - return new(zone) Range(kMinUInt8, kMaxUInt8); - case INT16_ELEMENTS: - return new(zone) Range(kMinInt16, kMaxInt16); - case UINT16_ELEMENTS: - return new(zone) Range(kMinUInt16, kMaxUInt16); - default: - return HValue::InferRange(zone); - } -} - - -std::ostream& HCompareGeneric::PrintDataTo(std::ostream& os) const { // NOLINT - os << Token::Name(token()) << " "; - return HBinaryOperation::PrintDataTo(os); -} - - -std::ostream& HStringCompareAndBranch::PrintDataTo( - std::ostream& os) const { // NOLINT - os << Token::Name(token()) << " "; - return HControlInstruction::PrintDataTo(os); -} - - -std::ostream& HCompareNumericAndBranch::PrintDataTo( - std::ostream& os) const { // NOLINT - os << Token::Name(token()) << " " << NameOf(left()) << " " << NameOf(right()); - return HControlInstruction::PrintDataTo(os); -} - - -std::ostream& HCompareObjectEqAndBranch::PrintDataTo( - std::ostream& os) const { // NOLINT - os << NameOf(left()) << " " << NameOf(right()); - return HControlInstruction::PrintDataTo(os); -} - - -bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) { - if (known_successor_index() != kNoKnownSuccessorIndex) { - *block = SuccessorAt(known_successor_index()); - return true; - } - if (FLAG_fold_constants && left()->IsConstant() && right()->IsConstant()) { - *block = HConstant::cast(left())->DataEquals(HConstant::cast(right())) - ? FirstSuccessor() : SecondSuccessor(); - return true; - } - *block = NULL; - return false; -} - - -bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) { - if (known_successor_index() != kNoKnownSuccessorIndex) { - *block = SuccessorAt(known_successor_index()); - return true; - } - if (FLAG_fold_constants && value()->IsConstant()) { - *block = HConstant::cast(value())->HasStringValue() - ? FirstSuccessor() : SecondSuccessor(); - return true; - } - if (value()->type().IsString()) { - *block = FirstSuccessor(); - return true; - } - if (value()->type().IsSmi() || - value()->type().IsNull() || - value()->type().IsBoolean() || - value()->type().IsUndefined() || - value()->type().IsJSReceiver()) { - *block = SecondSuccessor(); - return true; - } - *block = NULL; - return false; -} - - -bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) { - if (FLAG_fold_constants && value()->IsConstant()) { - *block = HConstant::cast(value())->IsUndetectable() - ? FirstSuccessor() : SecondSuccessor(); - return true; - } - if (value()->type().IsNull() || value()->type().IsUndefined()) { - *block = FirstSuccessor(); - return true; - } - if (value()->type().IsBoolean() || - value()->type().IsSmi() || - value()->type().IsString() || - value()->type().IsJSReceiver()) { - *block = SecondSuccessor(); - return true; - } - *block = NULL; - return false; -} - - -bool HHasInstanceTypeAndBranch::KnownSuccessorBlock(HBasicBlock** block) { - if (FLAG_fold_constants && value()->IsConstant()) { - InstanceType type = HConstant::cast(value())->GetInstanceType(); - *block = (from_ <= type) && (type <= to_) - ? FirstSuccessor() : SecondSuccessor(); - return true; - } - *block = NULL; - return false; -} - - -void HCompareHoleAndBranch::InferRepresentation( - HInferRepresentationPhase* h_infer) { - ChangeRepresentation(value()->representation()); -} - - -bool HCompareNumericAndBranch::KnownSuccessorBlock(HBasicBlock** block) { - if (left() == right() && - left()->representation().IsSmiOrInteger32()) { - *block = (token() == Token::EQ || - token() == Token::EQ_STRICT || - token() == Token::LTE || - token() == Token::GTE) - ? FirstSuccessor() : SecondSuccessor(); - return true; - } - *block = NULL; - return false; -} - - -std::ostream& HGoto::PrintDataTo(std::ostream& os) const { // NOLINT - return os << *SuccessorAt(0); -} - - -void HCompareNumericAndBranch::InferRepresentation( - HInferRepresentationPhase* h_infer) { - Representation left_rep = left()->representation(); - Representation right_rep = right()->representation(); - Representation observed_left = observed_input_representation(0); - Representation observed_right = observed_input_representation(1); - - Representation rep = Representation::None(); - rep = rep.generalize(observed_left); - rep = rep.generalize(observed_right); - if (rep.IsNone() || rep.IsSmiOrInteger32()) { - if (!left_rep.IsTagged()) rep = rep.generalize(left_rep); - if (!right_rep.IsTagged()) rep = rep.generalize(right_rep); - } else { - rep = Representation::Double(); - } - - if (rep.IsDouble()) { - // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, === - // and !=) have special handling of undefined, e.g. undefined == undefined - // is 'true'. Relational comparisons have a different semantic, first - // calling ToPrimitive() on their arguments. The standard Crankshaft - // tagged-to-double conversion to ensure the HCompareNumericAndBranch's - // inputs are doubles caused 'undefined' to be converted to NaN. That's - // compatible out-of-the box with ordered relational comparisons (<, >, <=, - // >=). However, for equality comparisons (and for 'in' and 'instanceof'), - // it is not consistent with the spec. For example, it would cause undefined - // == undefined (should be true) to be evaluated as NaN == NaN - // (false). Therefore, any comparisons other than ordered relational - // comparisons must cause a deopt when one of their arguments is undefined. - // See also v8:1434 - if (Token::IsOrderedRelationalCompareOp(token_)) { - SetFlag(kTruncatingToNumber); - } - } - ChangeRepresentation(rep); -} - - -std::ostream& HParameter::PrintDataTo(std::ostream& os) const { // NOLINT - return os << index(); -} - - -std::ostream& HLoadNamedField::PrintDataTo(std::ostream& os) const { // NOLINT - os << NameOf(object()) << access_; - - if (maps() != NULL) { - os << " [" << *maps()->at(0).handle(); - for (int i = 1; i < maps()->size(); ++i) { - os << "," << *maps()->at(i).handle(); - } - os << "]"; - } - - if (HasDependency()) os << " " << NameOf(dependency()); - return os; -} - - -std::ostream& HLoadKeyed::PrintDataTo(std::ostream& os) const { // NOLINT - if (!is_fixed_typed_array()) { - os << NameOf(elements()); - } else { - DCHECK(elements_kind() >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND && - elements_kind() <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND); - os << NameOf(elements()) << "." << ElementsKindToString(elements_kind()); - } - - os << "[" << NameOf(key()); - if (IsDehoisted()) os << " + " << base_offset(); - os << "]"; - - if (HasDependency()) os << " " << NameOf(dependency()); - if (RequiresHoleCheck()) os << " check_hole"; - return os; -} - - -bool HLoadKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) { - // The base offset is usually simply the size of the array header, except - // with dehoisting adds an addition offset due to a array index key - // manipulation, in which case it becomes (array header size + - // constant-offset-from-key * kPointerSize) - uint32_t base_offset = BaseOffsetField::decode(bit_field_); - v8::base::internal::CheckedNumeric addition_result = base_offset; - addition_result += increase_by_value; - if (!addition_result.IsValid()) return false; - base_offset = addition_result.ValueOrDie(); - if (!BaseOffsetField::is_valid(base_offset)) return false; - bit_field_ = BaseOffsetField::update(bit_field_, base_offset); - return true; -} - - -bool HLoadKeyed::UsesMustHandleHole() const { - if (IsFastPackedElementsKind(elements_kind())) { - return false; - } - - if (IsFixedTypedArrayElementsKind(elements_kind())) { - return false; - } - - if (hole_mode() == ALLOW_RETURN_HOLE) { - if (IsFastDoubleElementsKind(elements_kind())) { - return AllUsesCanTreatHoleAsNaN(); - } - return true; - } - - if (IsFastDoubleElementsKind(elements_kind())) { - return false; - } - - // Holes are only returned as tagged values. - if (!representation().IsTagged()) { - return false; - } - - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - if (!use->IsChange()) return false; - } - - return true; -} - - -bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const { - return IsFastDoubleElementsKind(elements_kind()) && - CheckUsesForFlag(HValue::kTruncatingToNumber); -} - - -bool HLoadKeyed::RequiresHoleCheck() const { - if (IsFastPackedElementsKind(elements_kind())) { - return false; - } - - if (IsFixedTypedArrayElementsKind(elements_kind())) { - return false; - } - - if (hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - return false; - } - - return !UsesMustHandleHole(); -} - -HValue* HCallWithDescriptor::Canonicalize() { - if (kind() != Code::KEYED_LOAD_IC) return this; - - // Recognize generic keyed loads that use property name generated - // by for-in statement as a key and rewrite them into fast property load - // by index. - typedef LoadWithVectorDescriptor Descriptor; - HValue* key = parameter(Descriptor::kName); - if (key->IsLoadKeyed()) { - HLoadKeyed* key_load = HLoadKeyed::cast(key); - if (key_load->elements()->IsForInCacheArray()) { - HForInCacheArray* names_cache = - HForInCacheArray::cast(key_load->elements()); - - HValue* object = parameter(Descriptor::kReceiver); - if (names_cache->enumerable() == object) { - HForInCacheArray* index_cache = - names_cache->index_cache(); - HCheckMapValue* map_check = HCheckMapValue::New( - block()->graph()->isolate(), block()->graph()->zone(), - block()->graph()->GetInvalidContext(), object, names_cache->map()); - HInstruction* index = HLoadKeyed::New( - block()->graph()->isolate(), block()->graph()->zone(), - block()->graph()->GetInvalidContext(), index_cache, key_load->key(), - key_load->key(), nullptr, key_load->elements_kind()); - map_check->InsertBefore(this); - index->InsertBefore(this); - return Prepend(new (block()->zone()) HLoadFieldByIndex(object, index)); - } - } - } - return this; -} - -std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const { // NOLINT - os << NameOf(object()) << access_ << " = " << NameOf(value()); - if (NeedsWriteBarrier()) os << " (write-barrier)"; - if (has_transition()) os << " (transition map " << *transition_map() << ")"; - return os; -} - - -std::ostream& HStoreKeyed::PrintDataTo(std::ostream& os) const { // NOLINT - if (!is_fixed_typed_array()) { - os << NameOf(elements()); - } else { - DCHECK(elements_kind() >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND && - elements_kind() <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND); - os << NameOf(elements()) << "." << ElementsKindToString(elements_kind()); - } - - os << "[" << NameOf(key()); - if (IsDehoisted()) os << " + " << base_offset(); - return os << "] = " << NameOf(value()); -} - - -std::ostream& HTransitionElementsKind::PrintDataTo( - std::ostream& os) const { // NOLINT - os << NameOf(object()); - ElementsKind from_kind = original_map().handle()->elements_kind(); - ElementsKind to_kind = transitioned_map().handle()->elements_kind(); - os << " " << *original_map().handle() << " [" - << ElementsAccessor::ForKind(from_kind)->name() << "] -> " - << *transitioned_map().handle() << " [" - << ElementsAccessor::ForKind(to_kind)->name() << "]"; - if (IsSimpleMapChangeTransition(from_kind, to_kind)) os << " (simple)"; - return os; -} - - -std::ostream& HInnerAllocatedObject::PrintDataTo( - std::ostream& os) const { // NOLINT - os << NameOf(base_object()) << " offset "; - return offset()->PrintTo(os); -} - - -std::ostream& HLoadContextSlot::PrintDataTo(std::ostream& os) const { // NOLINT - return os << NameOf(value()) << "[" << slot_index() << "]"; -} - - -std::ostream& HStoreContextSlot::PrintDataTo( - std::ostream& os) const { // NOLINT - return os << NameOf(context()) << "[" << slot_index() - << "] = " << NameOf(value()); -} - - -// Implementation of type inference and type conversions. Calculates -// the inferred type of this instruction based on the input operands. - -HType HValue::CalculateInferredType() { - return type_; -} - - -HType HPhi::CalculateInferredType() { - if (OperandCount() == 0) return HType::Tagged(); - HType result = OperandAt(0)->type(); - for (int i = 1; i < OperandCount(); ++i) { - HType current = OperandAt(i)->type(); - result = result.Combine(current); - } - return result; -} - - -HType HChange::CalculateInferredType() { - if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber(); - return type(); -} - - -Representation HUnaryMathOperation::RepresentationFromInputs() { - if (SupportsFlexibleFloorAndRound() && - (op_ == kMathFloor || op_ == kMathRound)) { - // Floor and Round always take a double input. The integral result can be - // used as an integer or a double. Infer the representation from the uses. - return Representation::None(); - } - Representation rep = representation(); - // If any of the actual input representation is more general than what we - // have so far but not Tagged, use that representation instead. - Representation input_rep = value()->representation(); - if (!input_rep.IsTagged()) { - rep = rep.generalize(input_rep); - } - return rep; -} - - -bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect, - HValue* dominator) { - DCHECK(side_effect == kNewSpacePromotion); - DCHECK(!IsAllocationFolded()); - Zone* zone = block()->zone(); - Isolate* isolate = block()->isolate(); - if (!FLAG_use_allocation_folding) return false; - - // Try to fold allocations together with their dominating allocations. - if (!dominator->IsAllocate()) { - if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) cannot fold into #%d (%s)\n", - id(), Mnemonic(), dominator->id(), dominator->Mnemonic()); - } - return false; - } - - // Check whether we are folding within the same block for local folding. - if (FLAG_use_local_allocation_folding && dominator->block() != block()) { - if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n", - id(), Mnemonic(), dominator->id(), dominator->Mnemonic()); - } - return false; - } - - HAllocate* dominator_allocate = HAllocate::cast(dominator); - HValue* dominator_size = dominator_allocate->size(); - HValue* current_size = size(); - - // TODO(hpayer): Add support for non-constant allocation in dominator. - if (!current_size->IsInteger32Constant() || - !dominator_size->IsInteger32Constant()) { - if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) cannot fold into #%d (%s), " - "dynamic allocation size in dominator\n", - id(), Mnemonic(), dominator->id(), dominator->Mnemonic()); - } - return false; - } - - if (IsAllocationFoldingDominator()) { - if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) cannot fold into #%d (%s), already dominator\n", id(), - Mnemonic(), dominator->id(), dominator->Mnemonic()); - } - return false; - } - - if (!IsFoldable(dominator_allocate)) { - if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) cannot fold into #%d (%s), different spaces\n", id(), - Mnemonic(), dominator->id(), dominator->Mnemonic()); - } - return false; - } - - DCHECK( - (IsNewSpaceAllocation() && dominator_allocate->IsNewSpaceAllocation()) || - (IsOldSpaceAllocation() && dominator_allocate->IsOldSpaceAllocation())); - - // First update the size of the dominator allocate instruction. - dominator_size = dominator_allocate->size(); - int32_t original_object_size = - HConstant::cast(dominator_size)->GetInteger32Constant(); - int32_t dominator_size_constant = original_object_size; - - if (MustAllocateDoubleAligned()) { - if ((dominator_size_constant & kDoubleAlignmentMask) != 0) { - dominator_size_constant += kDoubleSize / 2; - } - } - - int32_t current_size_max_value = size()->GetInteger32Constant(); - int32_t new_dominator_size = dominator_size_constant + current_size_max_value; - - // Since we clear the first word after folded memory, we cannot use the - // whole kMaxRegularHeapObjectSize memory. - if (new_dominator_size > kMaxRegularHeapObjectSize - kPointerSize) { - if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n", - id(), Mnemonic(), dominator_allocate->id(), - dominator_allocate->Mnemonic(), new_dominator_size); - } - return false; - } - - HInstruction* new_dominator_size_value = HConstant::CreateAndInsertBefore( - isolate, zone, context(), new_dominator_size, Representation::None(), - dominator_allocate); - - dominator_allocate->UpdateSize(new_dominator_size_value); - - if (MustAllocateDoubleAligned()) { - if (!dominator_allocate->MustAllocateDoubleAligned()) { - dominator_allocate->MakeDoubleAligned(); - } - } - - if (!dominator_allocate->IsAllocationFoldingDominator()) { - HAllocate* first_alloc = - HAllocate::New(isolate, zone, dominator_allocate->context(), - dominator_size, dominator_allocate->type(), - IsNewSpaceAllocation() ? NOT_TENURED : TENURED, - JS_OBJECT_TYPE, block()->graph()->GetConstant0()); - first_alloc->InsertAfter(dominator_allocate); - dominator_allocate->ReplaceAllUsesWith(first_alloc); - dominator_allocate->MakeAllocationFoldingDominator(); - first_alloc->MakeFoldedAllocation(dominator_allocate); - if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) inserted for dominator #%d (%s)\n", first_alloc->id(), - first_alloc->Mnemonic(), dominator_allocate->id(), - dominator_allocate->Mnemonic()); - } - } - - MakeFoldedAllocation(dominator_allocate); - - if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) folded into #%d (%s), new dominator size: %d\n", id(), - Mnemonic(), dominator_allocate->id(), dominator_allocate->Mnemonic(), - new_dominator_size); - } - return true; -} - - -std::ostream& HAllocate::PrintDataTo(std::ostream& os) const { // NOLINT - os << NameOf(size()) << " ("; - if (IsNewSpaceAllocation()) os << "N"; - if (IsOldSpaceAllocation()) os << "P"; - if (MustAllocateDoubleAligned()) os << "A"; - if (MustPrefillWithFiller()) os << "F"; - if (IsAllocationFoldingDominator()) os << "d"; - if (IsAllocationFolded()) os << "f"; - return os << ")"; -} - - -bool HStoreKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) { - // The base offset is usually simply the size of the array header, except - // with dehoisting adds an addition offset due to a array index key - // manipulation, in which case it becomes (array header size + - // constant-offset-from-key * kPointerSize) - v8::base::internal::CheckedNumeric addition_result = base_offset_; - addition_result += increase_by_value; - if (!addition_result.IsValid()) return false; - base_offset_ = addition_result.ValueOrDie(); - return true; -} - - -bool HStoreKeyed::NeedsCanonicalization() { - switch (value()->opcode()) { - case kLoadKeyed: { - ElementsKind load_kind = HLoadKeyed::cast(value())->elements_kind(); - return IsFixedFloatElementsKind(load_kind); - } - case kChange: { - Representation from = HChange::cast(value())->from(); - return from.IsTagged() || from.IsHeapObject(); - } - case kConstant: - // Double constants are canonicalized upon construction. - return false; - default: - return !value()->IsBinaryOperation(); - } -} - - -#define H_CONSTANT_INT(val) \ - HConstant::New(isolate, zone, context, static_cast(val)) -#define H_CONSTANT_DOUBLE(val) \ - HConstant::New(isolate, zone, context, static_cast(val)) - -#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \ - HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \ - HValue* left, HValue* right) { \ - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \ - HConstant* c_left = HConstant::cast(left); \ - HConstant* c_right = HConstant::cast(right); \ - if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \ - double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \ - if (IsInt32Double(double_res)) { \ - return H_CONSTANT_INT(double_res); \ - } \ - return H_CONSTANT_DOUBLE(double_res); \ - } \ - } \ - return new (zone) HInstr(context, left, right); \ - } - -DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +) -DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *) -DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -) - -#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR - - -HInstruction* HStringAdd::New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right, - PretenureFlag pretenure_flag, - StringAddFlags flags, - Handle allocation_site) { - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { - HConstant* c_right = HConstant::cast(right); - HConstant* c_left = HConstant::cast(left); - if (c_left->HasStringValue() && c_right->HasStringValue()) { - Handle left_string = c_left->StringValue(); - Handle right_string = c_right->StringValue(); - // Prevent possible exception by invalid string length. - if (left_string->length() + right_string->length() < String::kMaxLength) { - MaybeHandle concat = isolate->factory()->NewConsString( - c_left->StringValue(), c_right->StringValue()); - return HConstant::New(isolate, zone, context, concat.ToHandleChecked()); - } - } - } - return new (zone) - HStringAdd(context, left, right, pretenure_flag, flags, allocation_site); -} - - -std::ostream& HStringAdd::PrintDataTo(std::ostream& os) const { // NOLINT - if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) { - os << "_CheckBoth"; - } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_LEFT) { - os << "_CheckLeft"; - } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) { - os << "_CheckRight"; - } - HBinaryOperation::PrintDataTo(os); - os << " ("; - if (pretenure_flag() == NOT_TENURED) - os << "N"; - else if (pretenure_flag() == TENURED) - os << "D"; - return os << ")"; -} - - -HInstruction* HStringCharFromCode::New(Isolate* isolate, Zone* zone, - HValue* context, HValue* char_code) { - if (FLAG_fold_constants && char_code->IsConstant()) { - HConstant* c_code = HConstant::cast(char_code); - if (c_code->HasNumberValue()) { - if (std::isfinite(c_code->DoubleValue())) { - uint32_t code = c_code->NumberValueAsInteger32() & 0xffff; - return HConstant::New( - isolate, zone, context, - isolate->factory()->LookupSingleCharacterStringFromCode(code)); - } - return HConstant::New(isolate, zone, context, - isolate->factory()->empty_string()); - } - } - return new(zone) HStringCharFromCode(context, char_code); -} - - -HInstruction* HUnaryMathOperation::New(Isolate* isolate, Zone* zone, - HValue* context, HValue* value, - BuiltinFunctionId op) { - do { - if (!FLAG_fold_constants) break; - if (!value->IsConstant()) break; - HConstant* constant = HConstant::cast(value); - if (!constant->HasNumberValue()) break; - double d = constant->DoubleValue(); - if (std::isnan(d)) { // NaN poisons everything. - return H_CONSTANT_DOUBLE(std::numeric_limits::quiet_NaN()); - } - if (std::isinf(d)) { // +Infinity and -Infinity. - switch (op) { - case kMathCos: - case kMathSin: - return H_CONSTANT_DOUBLE(std::numeric_limits::quiet_NaN()); - case kMathExp: - return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0); - case kMathLog: - case kMathSqrt: - return H_CONSTANT_DOUBLE( - (d > 0.0) ? d : std::numeric_limits::quiet_NaN()); - case kMathPowHalf: - case kMathAbs: - return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d); - case kMathRound: - case kMathFround: - case kMathFloor: - return H_CONSTANT_DOUBLE(d); - case kMathClz32: - return H_CONSTANT_INT(32); - default: - UNREACHABLE(); - break; - } - } - switch (op) { - case kMathCos: - return H_CONSTANT_DOUBLE(base::ieee754::cos(d)); - case kMathExp: - return H_CONSTANT_DOUBLE(base::ieee754::exp(d)); - case kMathLog: - return H_CONSTANT_DOUBLE(base::ieee754::log(d)); - case kMathSin: - return H_CONSTANT_DOUBLE(base::ieee754::sin(d)); - case kMathSqrt: - lazily_initialize_fast_sqrt(isolate); - return H_CONSTANT_DOUBLE(fast_sqrt(d, isolate)); - case kMathPowHalf: - return H_CONSTANT_DOUBLE(power_double_double(d, 0.5)); - case kMathAbs: - return H_CONSTANT_DOUBLE((d >= 0.0) ? d + 0.0 : -d); - case kMathRound: - // -0.5 .. -0.0 round to -0.0. - if ((d >= -0.5 && Double(d).Sign() < 0)) return H_CONSTANT_DOUBLE(-0.0); - // Doubles are represented as Significant * 2 ^ Exponent. If the - // Exponent is not negative, the double value is already an integer. - if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d); - return H_CONSTANT_DOUBLE(Floor(d + 0.5)); - case kMathFround: - return H_CONSTANT_DOUBLE(static_cast(static_cast(d))); - case kMathFloor: - return H_CONSTANT_DOUBLE(Floor(d)); - case kMathClz32: { - uint32_t i = DoubleToUint32(d); - return H_CONSTANT_INT(base::bits::CountLeadingZeros32(i)); - } - default: - UNREACHABLE(); - break; - } - } while (false); - return new(zone) HUnaryMathOperation(context, value, op); -} - - -Representation HUnaryMathOperation::RepresentationFromUses() { - if (op_ != kMathFloor && op_ != kMathRound) { - return HValue::RepresentationFromUses(); - } - - // The instruction can have an int32 or double output. Prefer a double - // representation if there are double uses. - bool use_double = false; - - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - int use_index = it.index(); - Representation rep_observed = use->observed_input_representation(use_index); - Representation rep_required = use->RequiredInputRepresentation(use_index); - use_double |= (rep_observed.IsDouble() || rep_required.IsDouble()); - if (use_double && !FLAG_trace_representation) { - // Having seen one double is enough. - break; - } - if (FLAG_trace_representation) { - if (!rep_required.IsDouble() || rep_observed.IsDouble()) { - PrintF("#%d %s is used by #%d %s as %s%s\n", - id(), Mnemonic(), use->id(), - use->Mnemonic(), rep_observed.Mnemonic(), - (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : "")); - } else { - PrintF("#%d %s is required by #%d %s as %s%s\n", - id(), Mnemonic(), use->id(), - use->Mnemonic(), rep_required.Mnemonic(), - (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : "")); - } - } - } - return use_double ? Representation::Double() : Representation::Integer32(); -} - - -HInstruction* HPower::New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right) { - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { - HConstant* c_left = HConstant::cast(left); - HConstant* c_right = HConstant::cast(right); - if (c_left->HasNumberValue() && c_right->HasNumberValue()) { - double result = - power_helper(isolate, c_left->DoubleValue(), c_right->DoubleValue()); - return H_CONSTANT_DOUBLE(std::isnan(result) - ? std::numeric_limits::quiet_NaN() - : result); - } - } - return new(zone) HPower(left, right); -} - - -HInstruction* HMathMinMax::New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right, Operation op) { - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { - HConstant* c_left = HConstant::cast(left); - HConstant* c_right = HConstant::cast(right); - if (c_left->HasNumberValue() && c_right->HasNumberValue()) { - double d_left = c_left->DoubleValue(); - double d_right = c_right->DoubleValue(); - if (op == kMathMin) { - if (d_left > d_right) return H_CONSTANT_DOUBLE(d_right); - if (d_left < d_right) return H_CONSTANT_DOUBLE(d_left); - if (d_left == d_right) { - // Handle +0 and -0. - return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_left - : d_right); - } - } else { - if (d_left < d_right) return H_CONSTANT_DOUBLE(d_right); - if (d_left > d_right) return H_CONSTANT_DOUBLE(d_left); - if (d_left == d_right) { - // Handle +0 and -0. - return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_right - : d_left); - } - } - // All comparisons failed, must be NaN. - return H_CONSTANT_DOUBLE(std::numeric_limits::quiet_NaN()); - } - } - return new(zone) HMathMinMax(context, left, right, op); -} - -HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right) { - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { - HConstant* c_left = HConstant::cast(left); - HConstant* c_right = HConstant::cast(right); - if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) { - int32_t dividend = c_left->Integer32Value(); - int32_t divisor = c_right->Integer32Value(); - if (dividend == kMinInt && divisor == -1) { - return H_CONSTANT_DOUBLE(-0.0); - } - if (divisor != 0) { - int32_t res = dividend % divisor; - if ((res == 0) && (dividend < 0)) { - return H_CONSTANT_DOUBLE(-0.0); - } - return H_CONSTANT_INT(res); - } - } - } - return new (zone) HMod(context, left, right); -} - -HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right) { - // If left and right are constant values, try to return a constant value. - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { - HConstant* c_left = HConstant::cast(left); - HConstant* c_right = HConstant::cast(right); - if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { - if (std::isnan(c_left->DoubleValue()) || - std::isnan(c_right->DoubleValue())) { - return H_CONSTANT_DOUBLE(std::numeric_limits::quiet_NaN()); - } else if (c_right->DoubleValue() != 0) { - double double_res = c_left->DoubleValue() / c_right->DoubleValue(); - if (IsInt32Double(double_res)) { - return H_CONSTANT_INT(double_res); - } - return H_CONSTANT_DOUBLE(double_res); - } else if (c_left->DoubleValue() != 0) { - int sign = Double(c_left->DoubleValue()).Sign() * - Double(c_right->DoubleValue()).Sign(); // Right could be -0. - return H_CONSTANT_DOUBLE(sign * V8_INFINITY); - } else { - return H_CONSTANT_DOUBLE(std::numeric_limits::quiet_NaN()); - } - } - } - return new (zone) HDiv(context, left, right); -} - -HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context, - Token::Value op, HValue* left, HValue* right) { - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { - HConstant* c_left = HConstant::cast(left); - HConstant* c_right = HConstant::cast(right); - if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { - int32_t result; - int32_t v_left = c_left->NumberValueAsInteger32(); - int32_t v_right = c_right->NumberValueAsInteger32(); - switch (op) { - case Token::BIT_XOR: - result = v_left ^ v_right; - break; - case Token::BIT_AND: - result = v_left & v_right; - break; - case Token::BIT_OR: - result = v_left | v_right; - break; - default: - result = 0; // Please the compiler. - UNREACHABLE(); - } - return H_CONSTANT_INT(result); - } - } - return new (zone) HBitwise(context, op, left, right); -} - -#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \ - HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \ - HValue* left, HValue* right) { \ - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \ - HConstant* c_left = HConstant::cast(left); \ - HConstant* c_right = HConstant::cast(right); \ - if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \ - return H_CONSTANT_INT(result); \ - } \ - } \ - return new (zone) HInstr(context, left, right); \ - } - -DEFINE_NEW_H_BITWISE_INSTR(HSar, -c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f)) -DEFINE_NEW_H_BITWISE_INSTR(HShl, -c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f)) - -#undef DEFINE_NEW_H_BITWISE_INSTR - -HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right) { - if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { - HConstant* c_left = HConstant::cast(left); - HConstant* c_right = HConstant::cast(right); - if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { - int32_t left_val = c_left->NumberValueAsInteger32(); - int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f; - if ((right_val == 0) && (left_val < 0)) { - return H_CONSTANT_DOUBLE(static_cast(left_val)); - } - return H_CONSTANT_INT(static_cast(left_val) >> right_val); - } - } - return new (zone) HShr(context, left, right); -} - - -HInstruction* HSeqStringGetChar::New(Isolate* isolate, Zone* zone, - HValue* context, String::Encoding encoding, - HValue* string, HValue* index) { - if (FLAG_fold_constants && string->IsConstant() && index->IsConstant()) { - HConstant* c_string = HConstant::cast(string); - HConstant* c_index = HConstant::cast(index); - if (c_string->HasStringValue() && c_index->HasInteger32Value()) { - Handle s = c_string->StringValue(); - int32_t i = c_index->Integer32Value(); - DCHECK_LE(0, i); - DCHECK_LT(i, s->length()); - return H_CONSTANT_INT(s->Get(i)); - } - } - return new(zone) HSeqStringGetChar(encoding, string, index); -} - - -#undef H_CONSTANT_INT -#undef H_CONSTANT_DOUBLE - - -std::ostream& HBitwise::PrintDataTo(std::ostream& os) const { // NOLINT - os << Token::Name(op_) << " "; - return HBitwiseBinaryOperation::PrintDataTo(os); -} - - -void HPhi::SimplifyConstantInputs() { - // Convert constant inputs to integers when all uses are truncating. - // This must happen before representation inference takes place. - if (!CheckUsesForFlag(kTruncatingToInt32)) return; - for (int i = 0; i < OperandCount(); ++i) { - if (!OperandAt(i)->IsConstant()) return; - } - HGraph* graph = block()->graph(); - for (int i = 0; i < OperandCount(); ++i) { - HConstant* operand = HConstant::cast(OperandAt(i)); - if (operand->HasInteger32Value()) { - continue; - } else if (operand->HasDoubleValue()) { - HConstant* integer_input = HConstant::New( - graph->isolate(), graph->zone(), graph->GetInvalidContext(), - DoubleToInt32(operand->DoubleValue())); - integer_input->InsertAfter(operand); - SetOperandAt(i, integer_input); - } else if (operand->HasBooleanValue()) { - SetOperandAt(i, operand->BooleanValue() ? graph->GetConstant1() - : graph->GetConstant0()); - } else if (operand->ImmortalImmovable()) { - if (operand->HasStringValue() && - operand->EqualsUnique( - Unique(isolate()->factory()->one_string()))) { - SetOperandAt(i, graph->GetConstant1()); - } else { - SetOperandAt(i, graph->GetConstant0()); - } - } - } - // Overwrite observed input representations because they are likely Tagged. - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - if (use->IsBinaryOperation()) { - HBinaryOperation::cast(use)->set_observed_input_representation( - it.index(), Representation::Smi()); - } - } -} - - -void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) { - DCHECK(CheckFlag(kFlexibleRepresentation)); - Representation new_rep = RepresentationFromUses(); - UpdateRepresentation(new_rep, h_infer, "uses"); - new_rep = RepresentationFromInputs(); - UpdateRepresentation(new_rep, h_infer, "inputs"); - new_rep = RepresentationFromUseRequirements(); - UpdateRepresentation(new_rep, h_infer, "use requirements"); -} - - -Representation HPhi::RepresentationFromInputs() { - Representation r = representation(); - for (int i = 0; i < OperandCount(); ++i) { - // Ignore conservative Tagged assumption of parameters if we have - // reason to believe that it's too conservative. - if (has_type_feedback_from_uses() && OperandAt(i)->IsParameter()) { - continue; - } - - r = r.generalize(OperandAt(i)->KnownOptimalRepresentation()); - } - return r; -} - - -// Returns a representation if all uses agree on the same representation. -// Integer32 is also returned when some uses are Smi but others are Integer32. -Representation HValue::RepresentationFromUseRequirements() { - Representation rep = Representation::None(); - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - // Ignore the use requirement from never run code - if (it.value()->block()->IsUnreachable()) continue; - - // We check for observed_input_representation elsewhere. - Representation use_rep = - it.value()->RequiredInputRepresentation(it.index()); - if (rep.IsNone()) { - rep = use_rep; - continue; - } - if (use_rep.IsNone() || rep.Equals(use_rep)) continue; - if (rep.generalize(use_rep).IsInteger32()) { - rep = Representation::Integer32(); - continue; - } - return Representation::None(); - } - return rep; -} - - -bool HValue::HasNonSmiUse() { - for (HUseIterator it(uses()); !it.Done(); it.Advance()) { - // We check for observed_input_representation elsewhere. - Representation use_rep = - it.value()->RequiredInputRepresentation(it.index()); - if (!use_rep.IsNone() && - !use_rep.IsSmi() && - !use_rep.IsTagged()) { - return true; - } - } - return false; -} - - -// Node-specific verification code is only included in debug mode. -#ifdef DEBUG - -void HPhi::Verify() { - DCHECK(OperandCount() == block()->predecessors()->length()); - for (int i = 0; i < OperandCount(); ++i) { - HValue* value = OperandAt(i); - HBasicBlock* defining_block = value->block(); - HBasicBlock* predecessor_block = block()->predecessors()->at(i); - DCHECK(defining_block == predecessor_block || - defining_block->Dominates(predecessor_block)); - } -} - - -void HSimulate::Verify() { - HInstruction::Verify(); - DCHECK(HasAstId() || next()->IsEnterInlined()); -} - - -void HCheckHeapObject::Verify() { - HInstruction::Verify(); - DCHECK(HasNoUses()); -} - - -void HCheckValue::Verify() { - HInstruction::Verify(); - DCHECK(HasNoUses()); -} - -#endif - - -HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) { - DCHECK(offset >= 0); - DCHECK(offset < FixedArray::kHeaderSize); - if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength(); - return HObjectAccess(kInobject, offset); -} - - -HObjectAccess HObjectAccess::ForMapAndOffset(Handle map, int offset, - Representation representation) { - DCHECK(offset >= 0); - Portion portion = kInobject; - - if (offset == JSObject::kElementsOffset) { - portion = kElementsPointer; - } else if (offset == JSObject::kMapOffset) { - portion = kMaps; - } - bool existing_inobject_property = true; - if (!map.is_null()) { - existing_inobject_property = (offset < - map->instance_size() - map->unused_property_fields() * kPointerSize); - } - return HObjectAccess(portion, offset, representation, Handle::null(), - false, existing_inobject_property); -} - - -HObjectAccess HObjectAccess::ForAllocationSiteOffset(int offset) { - switch (offset) { - case AllocationSite::kTransitionInfoOffset: - return HObjectAccess(kInobject, offset, Representation::Tagged()); - case AllocationSite::kNestedSiteOffset: - return HObjectAccess(kInobject, offset, Representation::Tagged()); - case AllocationSite::kPretenureDataOffset: - return HObjectAccess(kInobject, offset, Representation::Smi()); - case AllocationSite::kPretenureCreateCountOffset: - return HObjectAccess(kInobject, offset, Representation::Smi()); - case AllocationSite::kDependentCodeOffset: - return HObjectAccess(kInobject, offset, Representation::Tagged()); - case AllocationSite::kWeakNextOffset: - return HObjectAccess(kInobject, offset, Representation::Tagged()); - default: - UNREACHABLE(); - } - return HObjectAccess(kInobject, offset); -} - - -HObjectAccess HObjectAccess::ForContextSlot(int index) { - DCHECK(index >= 0); - Portion portion = kInobject; - int offset = Context::kHeaderSize + index * kPointerSize; - DCHECK_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag); - return HObjectAccess(portion, offset, Representation::Tagged()); -} - - -HObjectAccess HObjectAccess::ForScriptContext(int index) { - DCHECK(index >= 0); - Portion portion = kInobject; - int offset = ScriptContextTable::GetContextOffset(index); - return HObjectAccess(portion, offset, Representation::Tagged()); -} - - -HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) { - DCHECK(offset >= 0); - Portion portion = kInobject; - - if (offset == JSObject::kElementsOffset) { - portion = kElementsPointer; - } else if (offset == JSArray::kLengthOffset) { - portion = kArrayLengths; - } else if (offset == JSObject::kMapOffset) { - portion = kMaps; - } - return HObjectAccess(portion, offset); -} - - -HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset, - Representation representation) { - DCHECK(offset >= 0); - return HObjectAccess(kBackingStore, offset, representation, - Handle::null(), false, false); -} - - -HObjectAccess HObjectAccess::ForField(Handle map, int index, - Representation representation, - Handle name) { - if (index < 0) { - // Negative property indices are in-object properties, indexed - // from the end of the fixed part of the object. - int offset = (index * kPointerSize) + map->instance_size(); - return HObjectAccess(kInobject, offset, representation, name, false, true); - } else { - // Non-negative property indices are in the properties array. - int offset = (index * kPointerSize) + FixedArray::kHeaderSize; - return HObjectAccess(kBackingStore, offset, representation, name, - false, false); - } -} - - -void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) { - // set the appropriate GVN flags for a given load or store instruction - if (access_type == STORE) { - // track dominating allocations in order to eliminate write barriers - instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion); - instr->SetFlag(HValue::kTrackSideEffectDominators); - } else { - // try to GVN loads, but don't hoist above map changes - instr->SetFlag(HValue::kUseGVN); - instr->SetDependsOnFlag(::v8::internal::kMaps); - } - - switch (portion()) { - case kArrayLengths: - if (access_type == STORE) { - instr->SetChangesFlag(::v8::internal::kArrayLengths); - } else { - instr->SetDependsOnFlag(::v8::internal::kArrayLengths); - } - break; - case kStringLengths: - if (access_type == STORE) { - instr->SetChangesFlag(::v8::internal::kStringLengths); - } else { - instr->SetDependsOnFlag(::v8::internal::kStringLengths); - } - break; - case kInobject: - if (access_type == STORE) { - instr->SetChangesFlag(::v8::internal::kInobjectFields); - } else { - instr->SetDependsOnFlag(::v8::internal::kInobjectFields); - } - break; - case kDouble: - if (access_type == STORE) { - instr->SetChangesFlag(::v8::internal::kDoubleFields); - } else { - instr->SetDependsOnFlag(::v8::internal::kDoubleFields); - } - break; - case kBackingStore: - if (access_type == STORE) { - instr->SetChangesFlag(::v8::internal::kBackingStoreFields); - } else { - instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields); - } - break; - case kElementsPointer: - if (access_type == STORE) { - instr->SetChangesFlag(::v8::internal::kElementsPointer); - } else { - instr->SetDependsOnFlag(::v8::internal::kElementsPointer); - } - break; - case kMaps: - if (access_type == STORE) { - instr->SetChangesFlag(::v8::internal::kMaps); - } else { - instr->SetDependsOnFlag(::v8::internal::kMaps); - } - break; - case kExternalMemory: - if (access_type == STORE) { - instr->SetChangesFlag(::v8::internal::kExternalMemory); - } else { - instr->SetDependsOnFlag(::v8::internal::kExternalMemory); - } - break; - } -} - - -std::ostream& operator<<(std::ostream& os, const HObjectAccess& access) { - os << "."; - - switch (access.portion()) { - case HObjectAccess::kArrayLengths: - case HObjectAccess::kStringLengths: - os << "%length"; - break; - case HObjectAccess::kElementsPointer: - os << "%elements"; - break; - case HObjectAccess::kMaps: - os << "%map"; - break; - case HObjectAccess::kDouble: // fall through - case HObjectAccess::kInobject: - if (!access.name().is_null() && access.name()->IsString()) { - os << Handle::cast(access.name())->ToCString().get(); - } - os << "[in-object]"; - break; - case HObjectAccess::kBackingStore: - if (!access.name().is_null() && access.name()->IsString()) { - os << Handle::cast(access.name())->ToCString().get(); - } - os << "[backing-store]"; - break; - case HObjectAccess::kExternalMemory: - os << "[external-memory]"; - break; - } - - return os << "@" << access.offset(); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h deleted file mode 100644 index 9ccf4f260d..0000000000 --- a/src/crankshaft/hydrogen-instructions.h +++ /dev/null @@ -1,6746 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_ -#define V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_ - -#include -#include - -#include "src/allocation.h" -#include "src/ast/ast.h" -#include "src/base/bits.h" -#include "src/bit-vector.h" -#include "src/conversions.h" -#include "src/crankshaft/hydrogen-types.h" -#include "src/crankshaft/unique.h" -#include "src/deoptimizer.h" -#include "src/globals.h" -#include "src/interface-descriptors.h" -#include "src/small-pointer-list.h" -#include "src/utils.h" -#include "src/zone/zone.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -struct ChangesOf; -class HBasicBlock; -class HDiv; -class HEnvironment; -class HInferRepresentationPhase; -class HInstruction; -class HLoopInformation; -class HStoreNamedField; -class HValue; -class LInstruction; -class LChunkBuilder; -class SmallMapList; - -#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \ - V(ArithmeticBinaryOperation) \ - V(BinaryOperation) \ - V(BitwiseBinaryOperation) \ - V(ControlInstruction) \ - V(Instruction) - -#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \ - V(AbnormalExit) \ - V(AccessArgumentsAt) \ - V(Add) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArgumentsObject) \ - V(Bitwise) \ - V(BlockEntry) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CapturedObject) \ - V(Change) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckHeapObject) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareNumericAndBranch) \ - V(CompareHoleAndBranch) \ - V(CompareGeneric) \ - V(CompareObjectEqAndBranch) \ - V(CompareMap) \ - V(Constant) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(Div) \ - V(DummyUse) \ - V(EnterInlined) \ - V(EnvironmentMarker) \ - V(ForceRepresentation) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InvokeFunction) \ - V(HasInPrototypeChainAndBranch) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(LeaveInlined) \ - V(LoadContextSlot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(LoadRoot) \ - V(MathFloorOfDiv) \ - V(MathMinMax) \ - V(MaybeGrowElements) \ - V(Mod) \ - V(Mul) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArguments) \ - V(Return) \ - V(Ror) \ - V(Sar) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(Shl) \ - V(Shr) \ - V(Simulate) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(Sub) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(UnaryMathOperation) \ - V(UnknownOSRValue) \ - V(UseConst) \ - V(WrapReceiver) - -#define GVN_TRACKED_FLAG_LIST(V) \ - V(NewSpacePromotion) - -#define GVN_UNTRACKED_FLAG_LIST(V) \ - V(ArrayElements) \ - V(ArrayLengths) \ - V(StringLengths) \ - V(BackingStoreFields) \ - V(Calls) \ - V(ContextSlots) \ - V(DoubleArrayElements) \ - V(DoubleFields) \ - V(ElementsKind) \ - V(ElementsPointer) \ - V(GlobalVars) \ - V(InobjectFields) \ - V(Maps) \ - V(OsrEntries) \ - V(ExternalMemory) \ - V(StringChars) \ - V(TypedArrayElements) - - -#define DECLARE_ABSTRACT_INSTRUCTION(type) \ - bool Is##type() const final { return true; } \ - static H##type* cast(HValue* value) { \ - DCHECK(value->Is##type()); \ - return reinterpret_cast(value); \ - } - - -#define DECLARE_CONCRETE_INSTRUCTION(type) \ - LInstruction* CompileToLithium(LChunkBuilder* builder) final; \ - static H##type* cast(HValue* value) { \ - DCHECK(value->Is##type()); \ - return reinterpret_cast(value); \ - } \ - Opcode opcode() const final { return HValue::k##type; } - - -enum PropertyAccessType { LOAD, STORE }; - -Representation RepresentationFromMachineType(MachineType type); - -class Range final : public ZoneObject { - public: - Range() - : lower_(kMinInt), - upper_(kMaxInt), - next_(NULL), - can_be_minus_zero_(false) { } - - Range(int32_t lower, int32_t upper) - : lower_(lower), - upper_(upper), - next_(NULL), - can_be_minus_zero_(false) { } - - int32_t upper() const { return upper_; } - int32_t lower() const { return lower_; } - Range* next() const { return next_; } - Range* CopyClearLower(Zone* zone) const { - return new(zone) Range(kMinInt, upper_); - } - Range* CopyClearUpper(Zone* zone) const { - return new(zone) Range(lower_, kMaxInt); - } - Range* Copy(Zone* zone) const { - Range* result = new(zone) Range(lower_, upper_); - result->set_can_be_minus_zero(CanBeMinusZero()); - return result; - } - int32_t Mask() const; - void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; } - bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; } - bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; } - bool CanBeNegative() const { return lower_ < 0; } - bool CanBePositive() const { return upper_ > 0; } - bool Includes(int value) const { return lower_ <= value && upper_ >= value; } - bool IsMostGeneric() const { - return lower_ == kMinInt && upper_ == kMaxInt && CanBeMinusZero(); - } - bool IsInSmiRange() const { - return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue; - } - void ClampToSmi() { - lower_ = Max(lower_, Smi::kMinValue); - upper_ = Min(upper_, Smi::kMaxValue); - } - void Clear(); - void KeepOrder(); -#ifdef DEBUG - void Verify() const; -#endif - - void StackUpon(Range* other) { - Intersect(other); - next_ = other; - } - - void Intersect(Range* other); - void Union(Range* other); - void CombinedMax(Range* other); - void CombinedMin(Range* other); - - void AddConstant(int32_t value); - void Sar(int32_t value); - void Shl(int32_t value); - bool AddAndCheckOverflow(const Representation& r, Range* other); - bool SubAndCheckOverflow(const Representation& r, Range* other); - bool MulAndCheckOverflow(const Representation& r, Range* other); - - private: - int32_t lower_; - int32_t upper_; - Range* next_; - bool can_be_minus_zero_; -}; - - -class HUseListNode: public ZoneObject { - public: - HUseListNode(HValue* value, int index, HUseListNode* tail) - : tail_(tail), value_(value), index_(index) { - } - - HUseListNode* tail(); - HValue* value() const { return value_; } - int index() const { return index_; } - - void set_tail(HUseListNode* list) { tail_ = list; } - -#ifdef DEBUG - void Zap() { - tail_ = reinterpret_cast(1); - value_ = NULL; - index_ = -1; - } -#endif - - private: - HUseListNode* tail_; - HValue* value_; - int index_; -}; - - -// We reuse use list nodes behind the scenes as uses are added and deleted. -// This class is the safe way to iterate uses while deleting them. -class HUseIterator final BASE_EMBEDDED { - public: - bool Done() { return current_ == NULL; } - void Advance(); - - HValue* value() { - DCHECK(!Done()); - return value_; - } - - int index() { - DCHECK(!Done()); - return index_; - } - - private: - explicit HUseIterator(HUseListNode* head); - - HUseListNode* current_; - HUseListNode* next_; - HValue* value_; - int index_; - - friend class HValue; -}; - - -// All tracked flags should appear before untracked ones. -enum GVNFlag { - // Declare global value numbering flags. -#define DECLARE_FLAG(Type) k##Type, - GVN_TRACKED_FLAG_LIST(DECLARE_FLAG) - GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG) -#undef DECLARE_FLAG -#define COUNT_FLAG(Type) + 1 - kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG), - kNumberOfUntrackedSideEffects = 0 GVN_UNTRACKED_FLAG_LIST(COUNT_FLAG), -#undef COUNT_FLAG - kNumberOfFlags = kNumberOfTrackedSideEffects + kNumberOfUntrackedSideEffects -}; - - -static inline GVNFlag GVNFlagFromInt(int i) { - DCHECK(i >= 0); - DCHECK(i < kNumberOfFlags); - return static_cast(i); -} - - -class DecompositionResult final BASE_EMBEDDED { - public: - DecompositionResult() : base_(NULL), offset_(0), scale_(0) {} - - HValue* base() { return base_; } - int offset() { return offset_; } - int scale() { return scale_; } - - bool Apply(HValue* other_base, int other_offset, int other_scale = 0) { - if (base_ == NULL) { - base_ = other_base; - offset_ = other_offset; - scale_ = other_scale; - return true; - } else { - if (scale_ == 0) { - base_ = other_base; - offset_ += other_offset; - scale_ = other_scale; - return true; - } else { - return false; - } - } - } - - void SwapValues(HValue** other_base, int* other_offset, int* other_scale) { - swap(&base_, other_base); - swap(&offset_, other_offset); - swap(&scale_, other_scale); - } - - private: - template void swap(T* a, T* b) { - T c(*a); - *a = *b; - *b = c; - } - - HValue* base_; - int offset_; - int scale_; -}; - - -typedef EnumSet GVNFlagSet; - - -class HValue : public ZoneObject { - public: - static const int kNoNumber = -1; - - enum Flag { - kFlexibleRepresentation, - kCannotBeTagged, - // Participate in Global Value Numbering, i.e. elimination of - // unnecessary recomputations. If an instruction sets this flag, it must - // implement DataEquals(), which will be used to determine if other - // occurrences of the instruction are indeed the same. - kUseGVN, - // Track instructions that are dominating side effects. If an instruction - // sets this flag, it must implement HandleSideEffectDominator() and should - // indicate which side effects to track by setting GVN flags. - kTrackSideEffectDominators, - kCanOverflow, - kBailoutOnMinusZero, - kCanBeDivByZero, - kLeftCanBeMinInt, - kLeftCanBeNegative, - kLeftCanBePositive, - kTruncatingToNumber, - kIsArguments, - kTruncatingToInt32, - kAllUsesTruncatingToInt32, - kTruncatingToSmi, - kAllUsesTruncatingToSmi, - // Set after an instruction is killed. - kIsDead, - // Instructions that are allowed to produce full range unsigned integer - // values are marked with kUint32 flag. If arithmetic shift or a load from - // EXTERNAL_UINT32_ELEMENTS array is not marked with this flag - // it will deoptimize if result does not fit into signed integer range. - // HGraph::ComputeSafeUint32Operations is responsible for setting this - // flag. - kUint32, - kHasNoObservableSideEffects, - // Indicates an instruction shouldn't be replaced by optimization, this flag - // is useful to set in cases where recomputing a value is cheaper than - // extending the value's live range and spilling it. - kCantBeReplaced, - // Indicates the instruction is live during dead code elimination. - kIsLive, - - // HEnvironmentMarkers are deleted before dead code - // elimination takes place, so they can repurpose the kIsLive flag: - kEndsLiveRange = kIsLive, - - // TODO(everyone): Don't forget to update this! - kLastFlag = kIsLive - }; - - STATIC_ASSERT(kLastFlag < kBitsPerInt); - - static HValue* cast(HValue* value) { return value; } - - enum Opcode { - // Declare a unique enum value for each hydrogen instruction. - #define DECLARE_OPCODE(type) k##type, - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) - kPhi - #undef DECLARE_OPCODE - }; - virtual Opcode opcode() const = 0; - - // Declare a non-virtual predicates for each concrete HInstruction or HValue. - #define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) - #undef DECLARE_PREDICATE - bool IsPhi() const { return opcode() == kPhi; } - - // Declare virtual predicates for abstract HInstruction or HValue - #define DECLARE_PREDICATE(type) \ - virtual bool Is##type() const { return false; } - HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE) - #undef DECLARE_PREDICATE - - bool IsBitwiseBinaryShift() { - return IsShl() || IsShr() || IsSar(); - } - - explicit HValue(HType type = HType::Tagged()) - : block_(NULL), - id_(kNoNumber), - type_(type), - use_list_(NULL), - range_(NULL), -#ifdef DEBUG - range_poisoned_(false), -#endif - flags_(0) {} - virtual ~HValue() {} - - virtual SourcePosition position() const { return SourcePosition::Unknown(); } - - HBasicBlock* block() const { return block_; } - void SetBlock(HBasicBlock* block); - - // Note: Never call this method for an unlinked value. - Isolate* isolate() const; - - int id() const { return id_; } - void set_id(int id) { id_ = id; } - - HUseIterator uses() const { return HUseIterator(use_list_); } - - virtual bool EmitAtUses() { return false; } - - Representation representation() const { return representation_; } - void ChangeRepresentation(Representation r) { - DCHECK(CheckFlag(kFlexibleRepresentation)); - DCHECK(!CheckFlag(kCannotBeTagged) || !r.IsTagged()); - RepresentationChanged(r); - representation_ = r; - if (r.IsTagged()) { - // Tagged is the bottom of the lattice, don't go any further. - ClearFlag(kFlexibleRepresentation); - } - } - virtual void AssumeRepresentation(Representation r); - - virtual Representation KnownOptimalRepresentation() { - Representation r = representation(); - if (r.IsTagged()) { - HType t = type(); - if (t.IsSmi()) return Representation::Smi(); - if (t.IsHeapNumber()) return Representation::Double(); - if (t.IsHeapObject()) return r; - return Representation::None(); - } - return r; - } - - HType type() const { return type_; } - void set_type(HType new_type) { - DCHECK(new_type.IsSubtypeOf(type_)); - type_ = new_type; - } - - // There are HInstructions that do not really change a value, they - // only add pieces of information to it (like bounds checks, map checks, - // smi checks...). - // We call these instructions "informative definitions", or "iDef". - // One of the iDef operands is special because it is the value that is - // "transferred" to the output, we call it the "redefined operand". - // If an HValue is an iDef it must override RedefinedOperandIndex() so that - // it does not return kNoRedefinedOperand; - static const int kNoRedefinedOperand = -1; - virtual int RedefinedOperandIndex() { return kNoRedefinedOperand; } - bool IsInformativeDefinition() { - return RedefinedOperandIndex() != kNoRedefinedOperand; - } - HValue* RedefinedOperand() { - int index = RedefinedOperandIndex(); - return index == kNoRedefinedOperand ? NULL : OperandAt(index); - } - - bool CanReplaceWithDummyUses(); - - virtual int argument_delta() const { return 0; } - - // A purely informative definition is an idef that will not emit code and - // should therefore be removed from the graph in the RestoreActualValues - // phase (so that live ranges will be shorter). - virtual bool IsPurelyInformativeDefinition() { return false; } - - // This method must always return the original HValue SSA definition, - // regardless of any chain of iDefs of this value. - HValue* ActualValue() { - HValue* value = this; - int index; - while ((index = value->RedefinedOperandIndex()) != kNoRedefinedOperand) { - value = value->OperandAt(index); - } - return value; - } - - bool IsInteger32Constant(); - int32_t GetInteger32Constant(); - bool EqualsInteger32Constant(int32_t value); - - bool IsDefinedAfter(HBasicBlock* other) const; - - // Operands. - virtual int OperandCount() const = 0; - virtual HValue* OperandAt(int index) const = 0; - void SetOperandAt(int index, HValue* value); - - void DeleteAndReplaceWith(HValue* other); - void ReplaceAllUsesWith(HValue* other); - bool HasNoUses() const { return use_list_ == NULL; } - bool HasOneUse() const { - return use_list_ != NULL && use_list_->tail() == NULL; - } - bool HasMultipleUses() const { - return use_list_ != NULL && use_list_->tail() != NULL; - } - int UseCount() const; - - // Mark this HValue as dead and to be removed from other HValues' use lists. - void Kill(); - - int flags() const { return flags_; } - void SetFlag(Flag f) { flags_ |= (1 << f); } - void ClearFlag(Flag f) { flags_ &= ~(1 << f); } - bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; } - void CopyFlag(Flag f, HValue* other) { - if (other->CheckFlag(f)) SetFlag(f); - } - - // Returns true if the flag specified is set for all uses, false otherwise. - bool CheckUsesForFlag(Flag f) const; - // Same as before and the first one without the flag is returned in value. - bool CheckUsesForFlag(Flag f, HValue** value) const; - // Returns true if the flag specified is set for all uses, and this set - // of uses is non-empty. - bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const; - - GVNFlagSet ChangesFlags() const { return changes_flags_; } - GVNFlagSet DependsOnFlags() const { return depends_on_flags_; } - void SetChangesFlag(GVNFlag f) { changes_flags_.Add(f); } - void SetDependsOnFlag(GVNFlag f) { depends_on_flags_.Add(f); } - void ClearChangesFlag(GVNFlag f) { changes_flags_.Remove(f); } - void ClearDependsOnFlag(GVNFlag f) { depends_on_flags_.Remove(f); } - bool CheckChangesFlag(GVNFlag f) const { - return changes_flags_.Contains(f); - } - bool CheckDependsOnFlag(GVNFlag f) const { - return depends_on_flags_.Contains(f); - } - void SetAllSideEffects() { changes_flags_.Add(AllSideEffectsFlagSet()); } - void ClearAllSideEffects() { - changes_flags_.Remove(AllSideEffectsFlagSet()); - } - bool HasSideEffects() const { - return changes_flags_.ContainsAnyOf(AllSideEffectsFlagSet()); - } - bool HasObservableSideEffects() const { - return !CheckFlag(kHasNoObservableSideEffects) && - changes_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet()); - } - - GVNFlagSet SideEffectFlags() const { - GVNFlagSet result = ChangesFlags(); - result.Intersect(AllSideEffectsFlagSet()); - return result; - } - - GVNFlagSet ObservableChangesFlags() const { - GVNFlagSet result = ChangesFlags(); - result.Intersect(AllObservableSideEffectsFlagSet()); - return result; - } - - Range* range() const { - DCHECK(!range_poisoned_); - return range_; - } - bool HasRange() const { - DCHECK(!range_poisoned_); - return range_ != NULL; - } -#ifdef DEBUG - void PoisonRange() { range_poisoned_ = true; } -#endif - void AddNewRange(Range* r, Zone* zone); - void RemoveLastAddedRange(); - void ComputeInitialRange(Zone* zone); - - // Escape analysis helpers. - virtual bool HasEscapingOperandAt(int index) { return true; } - virtual bool HasOutOfBoundsAccess(int size) { return false; } - - // Representation helpers. - virtual Representation observed_input_representation(int index) { - return Representation::None(); - } - virtual Representation RequiredInputRepresentation(int index) = 0; - virtual void InferRepresentation(HInferRepresentationPhase* h_infer); - - // This gives the instruction an opportunity to replace itself with an - // instruction that does the same in some better way. To replace an - // instruction with a new one, first add the new instruction to the graph, - // then return it. Return NULL to have the instruction deleted. - virtual HValue* Canonicalize() { return this; } - - bool Equals(HValue* other); - virtual intptr_t Hashcode(); - - // Compute unique ids upfront that is safe wrt GC and concurrent compilation. - virtual void FinalizeUniqueness() { } - - // Printing support. - virtual std::ostream& PrintTo(std::ostream& os) const = 0; // NOLINT - - const char* Mnemonic() const; - - // Type information helpers. - bool HasMonomorphicJSObjectType(); - - // TODO(mstarzinger): For now instructions can override this function to - // specify statically known types, once HType can convey more information - // it should be based on the HType. - virtual Handle GetMonomorphicJSObjectMap() { return Handle(); } - - // Updated the inferred type of this instruction and returns true if - // it has changed. - bool UpdateInferredType(); - - virtual HType CalculateInferredType(); - - // This function must be overridden for instructions which have the - // kTrackSideEffectDominators flag set, to track instructions that are - // dominating side effects. - // It returns true if it removed an instruction which had side effects. - virtual bool HandleSideEffectDominator(GVNFlag side_effect, - HValue* dominator) { - UNREACHABLE(); - } - - // Check if this instruction has some reason that prevents elimination. - bool CannotBeEliminated() const { - return HasObservableSideEffects() || !IsDeletable(); - } - -#ifdef DEBUG - virtual void Verify() = 0; -#endif - - // Returns true conservatively if the program might be able to observe a - // ToString() operation on this value. - bool ToStringCanBeObserved() const { - return ToStringOrToNumberCanBeObserved(); - } - - // Returns true conservatively if the program might be able to observe a - // ToNumber() operation on this value. - bool ToNumberCanBeObserved() const { - return ToStringOrToNumberCanBeObserved(); - } - - MinusZeroMode GetMinusZeroMode() { - return CheckFlag(kBailoutOnMinusZero) - ? FAIL_ON_MINUS_ZERO : TREAT_MINUS_ZERO_AS_ZERO; - } - - protected: - // This function must be overridden for instructions with flag kUseGVN, to - // compare the non-Operand parts of the instruction. - virtual bool DataEquals(HValue* other) { - UNREACHABLE(); - } - - bool ToStringOrToNumberCanBeObserved() const { - if (type().IsTaggedPrimitive()) return false; - if (type().IsJSReceiver()) return true; - return !representation().IsSmiOrInteger32() && !representation().IsDouble(); - } - - virtual Representation RepresentationFromInputs() { - return representation(); - } - virtual Representation RepresentationFromUses(); - Representation RepresentationFromUseRequirements(); - bool HasNonSmiUse(); - virtual void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason); - void AddDependantsToWorklist(HInferRepresentationPhase* h_infer); - - virtual void RepresentationChanged(Representation to) { } - - virtual Range* InferRange(Zone* zone); - virtual void DeleteFromGraph() = 0; - virtual void InternalSetOperandAt(int index, HValue* value) = 0; - void clear_block() { - DCHECK(block_ != NULL); - block_ = NULL; - } - - void set_representation(Representation r) { - DCHECK(representation_.IsNone() && !r.IsNone()); - representation_ = r; - } - - static GVNFlagSet AllFlagSet() { - GVNFlagSet result; -#define ADD_FLAG(Type) result.Add(k##Type); - GVN_TRACKED_FLAG_LIST(ADD_FLAG) - GVN_UNTRACKED_FLAG_LIST(ADD_FLAG) -#undef ADD_FLAG - return result; - } - - // A flag mask to mark an instruction as having arbitrary side effects. - static GVNFlagSet AllSideEffectsFlagSet() { - GVNFlagSet result = AllFlagSet(); - result.Remove(kOsrEntries); - return result; - } - friend std::ostream& operator<<(std::ostream& os, const ChangesOf& v); - - // A flag mask of all side effects that can make observable changes in - // an executing program (i.e. are not safe to repeat, move or remove); - static GVNFlagSet AllObservableSideEffectsFlagSet() { - GVNFlagSet result = AllFlagSet(); - result.Remove(kNewSpacePromotion); - result.Remove(kElementsKind); - result.Remove(kElementsPointer); - result.Remove(kMaps); - return result; - } - - // Remove the matching use from the use list if present. Returns the - // removed list node or NULL. - HUseListNode* RemoveUse(HValue* value, int index); - - void RegisterUse(int index, HValue* new_value); - - HBasicBlock* block_; - - // The id of this instruction in the hydrogen graph, assigned when first - // added to the graph. Reflects creation order. - int id_; - - Representation representation_; - HType type_; - HUseListNode* use_list_; - Range* range_; -#ifdef DEBUG - bool range_poisoned_; -#endif - int flags_; - GVNFlagSet changes_flags_; - GVNFlagSet depends_on_flags_; - - private: - virtual bool IsDeletable() const { return false; } - - DISALLOW_COPY_AND_ASSIGN(HValue); -}; - -// Support for printing various aspects of an HValue. -struct NameOf { - explicit NameOf(const HValue* const v) : value(v) {} - const HValue* value; -}; - - -struct TypeOf { - explicit TypeOf(const HValue* const v) : value(v) {} - const HValue* value; -}; - - -struct ChangesOf { - explicit ChangesOf(const HValue* const v) : value(v) {} - const HValue* value; -}; - - -std::ostream& operator<<(std::ostream& os, const HValue& v); -std::ostream& operator<<(std::ostream& os, const NameOf& v); -std::ostream& operator<<(std::ostream& os, const TypeOf& v); -std::ostream& operator<<(std::ostream& os, const ChangesOf& v); - - -#define DECLARE_INSTRUCTION_FACTORY_P0(I) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context) { \ - return new (zone) I(); \ - } - -#define DECLARE_INSTRUCTION_FACTORY_P1(I, P1) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1) { \ - return new (zone) I(p1); \ - } - -#define DECLARE_INSTRUCTION_FACTORY_P2(I, P1, P2) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2) { \ - return new (zone) I(p1, p2); \ - } - -#define DECLARE_INSTRUCTION_FACTORY_P3(I, P1, P2, P3) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3) { \ - return new (zone) I(p1, p2, p3); \ - } - -#define DECLARE_INSTRUCTION_FACTORY_P4(I, P1, P2, P3, P4) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3, P4 p4) { \ - return new (zone) I(p1, p2, p3, p4); \ - } - -#define DECLARE_INSTRUCTION_FACTORY_P5(I, P1, P2, P3, P4, P5) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3, P4 p4, P5 p5) { \ - return new (zone) I(p1, p2, p3, p4, p5); \ - } - -#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3, P4 p4, P5 p5, P6 p6) { \ - return new (zone) I(p1, p2, p3, p4, p5, p6); \ - } - -#define DECLARE_INSTRUCTION_FACTORY_P7(I, P1, P2, P3, P4, P5, P6, P7) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { \ - return new (zone) I(p1, p2, p3, p4, p5, p6, p7); \ - } - -#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context) { \ - return new (zone) I(context); \ - } - -#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1) { \ - return new (zone) I(context, p1); \ - } - -#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2) { \ - return new (zone) I(context, p1, p2); \ - } - -#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3) { \ - return new (zone) I(context, p1, p2, p3); \ - } - -#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3, P4 p4) { \ - return new (zone) I(context, p1, p2, p3, p4); \ - } - -#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3, P4 p4, P5 p5) { \ - return new (zone) I(context, p1, p2, p3, p4, p5); \ - } - -#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \ - static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \ - P3 p3, P4 p4, P5 p5, P6 p6) { \ - return new (zone) I(context, p1, p2, p3, p4, p5, p6); \ - } - -class HInstruction : public HValue { - public: - HInstruction* next() const { return next_; } - HInstruction* previous() const { return previous_; } - - std::ostream& PrintTo(std::ostream& os) const override; // NOLINT - virtual std::ostream& PrintDataTo(std::ostream& os) const; // NOLINT - - bool IsLinked() const { return block() != NULL; } - void Unlink(); - - void InsertBefore(HInstruction* next); - - template T* Prepend(T* instr) { - instr->InsertBefore(this); - return instr; - } - - void InsertAfter(HInstruction* previous); - - template T* Append(T* instr) { - instr->InsertAfter(this); - return instr; - } - - // The position is a write-once variable. - SourcePosition position() const override { return position_; } - bool has_position() const { return position_.IsKnown(); } - void set_position(SourcePosition position) { - DCHECK(position.IsKnown()); - position_ = position; - } - - bool Dominates(HInstruction* other); - bool CanTruncateToSmi() const { return CheckFlag(kTruncatingToSmi); } - bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); } - bool CanTruncateToNumber() const { return CheckFlag(kTruncatingToNumber); } - - virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0; - -#ifdef DEBUG - void Verify() override; -#endif - - bool CanDeoptimize(); - - virtual bool HasStackCheck() { return false; } - - DECLARE_ABSTRACT_INSTRUCTION(Instruction) - - protected: - explicit HInstruction(HType type = HType::Tagged()) - : HValue(type), - next_(NULL), - previous_(NULL), - position_(SourcePosition::Unknown()) { - SetDependsOnFlag(kOsrEntries); - } - - void DeleteFromGraph() override { Unlink(); } - - private: - void InitializeAsFirst(HBasicBlock* block) { - DCHECK(!IsLinked()); - SetBlock(block); - } - - HInstruction* next_; - HInstruction* previous_; - SourcePosition position_; - - friend class HBasicBlock; -}; - - -template -class HTemplateInstruction : public HInstruction { - public: - int OperandCount() const final { return V; } - HValue* OperandAt(int i) const final { return inputs_[i]; } - - protected: - explicit HTemplateInstruction(HType type = HType::Tagged()) - : HInstruction(type) {} - - void InternalSetOperandAt(int i, HValue* value) final { inputs_[i] = value; } - - private: - EmbeddedContainer inputs_; -}; - - -class HControlInstruction : public HInstruction { - public: - virtual HBasicBlock* SuccessorAt(int i) const = 0; - virtual int SuccessorCount() const = 0; - virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0; - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - virtual bool KnownSuccessorBlock(HBasicBlock** block) { - *block = NULL; - return false; - } - - HBasicBlock* FirstSuccessor() { - return SuccessorCount() > 0 ? SuccessorAt(0) : NULL; - } - HBasicBlock* SecondSuccessor() { - return SuccessorCount() > 1 ? SuccessorAt(1) : NULL; - } - - void Not() { - HBasicBlock* swap = SuccessorAt(0); - SetSuccessorAt(0, SuccessorAt(1)); - SetSuccessorAt(1, swap); - } - - DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction) -}; - - -class HSuccessorIterator final BASE_EMBEDDED { - public: - explicit HSuccessorIterator(const HControlInstruction* instr) - : instr_(instr), current_(0) {} - - bool Done() { return current_ >= instr_->SuccessorCount(); } - HBasicBlock* Current() { return instr_->SuccessorAt(current_); } - void Advance() { current_++; } - - private: - const HControlInstruction* instr_; - int current_; -}; - - -template -class HTemplateControlInstruction : public HControlInstruction { - public: - int SuccessorCount() const override { return S; } - HBasicBlock* SuccessorAt(int i) const override { return successors_[i]; } - void SetSuccessorAt(int i, HBasicBlock* block) override { - successors_[i] = block; - } - - int OperandCount() const override { return V; } - HValue* OperandAt(int i) const override { return inputs_[i]; } - - - protected: - void InternalSetOperandAt(int i, HValue* value) override { - inputs_[i] = value; - } - - private: - EmbeddedContainer successors_; - EmbeddedContainer inputs_; -}; - - -class HBlockEntry final : public HTemplateInstruction<0> { - public: - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(BlockEntry) -}; - - -class HDummyUse final : public HTemplateInstruction<1> { - public: - explicit HDummyUse(HValue* value) - : HTemplateInstruction<1>(HType::Smi()) { - SetOperandAt(0, value); - // Pretend to be a Smi so that the HChange instructions inserted - // before any use generate as little code as possible. - set_representation(Representation::Tagged()); - } - - HValue* value() const { return OperandAt(0); } - - bool HasEscapingOperandAt(int index) override { return false; } - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(DummyUse); -}; - - -// Inserts an int3/stop break instruction for debugging purposes. -class HDebugBreak final : public HTemplateInstruction<0> { - public: - DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak); - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(DebugBreak) -}; - - -class HPrologue final : public HTemplateInstruction<0> { - public: - static HPrologue* New(Zone* zone) { return new (zone) HPrologue(); } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(Prologue) -}; - - -class HGoto final : public HTemplateControlInstruction<1, 0> { - public: - explicit HGoto(HBasicBlock* target) { - SetSuccessorAt(0, target); - } - - bool KnownSuccessorBlock(HBasicBlock** block) override { - *block = FirstSuccessor(); - return true; - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(Goto) -}; - - -class HDeoptimize final : public HTemplateControlInstruction<1, 0> { - public: - static HDeoptimize* New(Isolate* isolate, Zone* zone, HValue* context, - DeoptimizeReason reason, - Deoptimizer::BailoutType type, - HBasicBlock* unreachable_continuation) { - return new(zone) HDeoptimize(reason, type, unreachable_continuation); - } - - bool KnownSuccessorBlock(HBasicBlock** block) override { - *block = NULL; - return true; - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DeoptimizeReason reason() const { return reason_; } - Deoptimizer::BailoutType type() { return type_; } - - DECLARE_CONCRETE_INSTRUCTION(Deoptimize) - - private: - explicit HDeoptimize(DeoptimizeReason reason, Deoptimizer::BailoutType type, - HBasicBlock* unreachable_continuation) - : reason_(reason), type_(type) { - SetSuccessorAt(0, unreachable_continuation); - } - - DeoptimizeReason reason_; - Deoptimizer::BailoutType type_; -}; - - -class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> { - public: - HUnaryControlInstruction(HValue* value, - HBasicBlock* true_target, - HBasicBlock* false_target) { - SetOperandAt(0, value); - SetSuccessorAt(0, true_target); - SetSuccessorAt(1, false_target); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HValue* value() const { return OperandAt(0); } -}; - - -class HBranch final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*); - DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanHints); - DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanHints, HBasicBlock*, - HBasicBlock*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - Representation observed_input_representation(int index) override; - - bool KnownSuccessorBlock(HBasicBlock** block) override; - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - ToBooleanHints expected_input_types() const { return expected_input_types_; } - - DECLARE_CONCRETE_INSTRUCTION(Branch) - - private: - HBranch(HValue* value, - ToBooleanHints expected_input_types = ToBooleanHint::kNone, - HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target), - expected_input_types_(expected_input_types) {} - - ToBooleanHints expected_input_types_; -}; - - -class HCompareMap final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle); - DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle, - HBasicBlock*, HBasicBlock*); - - bool KnownSuccessorBlock(HBasicBlock** block) override { - if (known_successor_index() != kNoKnownSuccessorIndex) { - *block = SuccessorAt(known_successor_index()); - return true; - } - *block = NULL; - return false; - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - static const int kNoKnownSuccessorIndex = -1; - int known_successor_index() const { - return KnownSuccessorIndexField::decode(bit_field_) - - kInternalKnownSuccessorOffset; - } - void set_known_successor_index(int index) { - DCHECK(index >= 0 - kInternalKnownSuccessorOffset); - bit_field_ = KnownSuccessorIndexField::update( - bit_field_, index + kInternalKnownSuccessorOffset); - } - - Unique map() const { return map_; } - bool map_is_stable() const { return MapIsStableField::decode(bit_field_); } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(CompareMap) - - protected: - int RedefinedOperandIndex() override { return 0; } - - private: - HCompareMap(HValue* value, Handle map, HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target), - bit_field_(KnownSuccessorIndexField::encode( - kNoKnownSuccessorIndex + kInternalKnownSuccessorOffset) | - MapIsStableField::encode(map->is_stable())), - map_(Unique::CreateImmovable(map)) { - set_representation(Representation::Tagged()); - } - - // BitFields can only store unsigned values, so use an offset. - // Adding kInternalKnownSuccessorOffset must yield an unsigned value. - static const int kInternalKnownSuccessorOffset = 1; - STATIC_ASSERT(kNoKnownSuccessorIndex + kInternalKnownSuccessorOffset >= 0); - - class KnownSuccessorIndexField : public BitField {}; - class MapIsStableField : public BitField {}; - - uint32_t bit_field_; - Unique map_; -}; - - -class HContext final : public HTemplateInstruction<0> { - public: - static HContext* New(Zone* zone) { - return new(zone) HContext(); - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(Context) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - HContext() { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } - - bool IsDeletable() const override { return true; } -}; - - -class HReturn final : public HTemplateControlInstruction<0, 3> { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*); - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*); - - Representation RequiredInputRepresentation(int index) override { - // TODO(titzer): require an Int32 input for faster returns. - if (index == 2) return Representation::Smi(); - return Representation::Tagged(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HValue* value() const { return OperandAt(0); } - HValue* context() const { return OperandAt(1); } - HValue* parameter_count() const { return OperandAt(2); } - - DECLARE_CONCRETE_INSTRUCTION(Return) - - private: - HReturn(HValue* context, HValue* value, HValue* parameter_count = 0) { - SetOperandAt(0, value); - SetOperandAt(1, context); - SetOperandAt(2, parameter_count); - } -}; - - -class HAbnormalExit final : public HTemplateControlInstruction<0, 0> { - public: - DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit); - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(AbnormalExit) - private: - HAbnormalExit() {} -}; - - -class HUnaryOperation : public HTemplateInstruction<1> { - public: - explicit HUnaryOperation(HValue* value, HType type = HType::Tagged()) - : HTemplateInstruction<1>(type) { - SetOperandAt(0, value); - } - - static HUnaryOperation* cast(HValue* value) { - return reinterpret_cast(value); - } - - HValue* value() const { return OperandAt(0); } - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT -}; - - -class HUseConst final : public HUnaryOperation { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(UseConst) - - private: - explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { } -}; - - -class HForceRepresentation final : public HTemplateInstruction<1> { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* value, - Representation required_representation); - - HValue* value() const { return OperandAt(0); } - - Representation observed_input_representation(int index) override { - // We haven't actually *observed* this, but it's closer to the truth - // than 'None'. - return representation(); // Same as the output representation. - } - Representation RequiredInputRepresentation(int index) override { - return representation(); // Same as the output representation. - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation) - - private: - HForceRepresentation(HValue* value, Representation required_representation) { - SetOperandAt(0, value); - set_representation(required_representation); - } -}; - -class HChange final : public HUnaryOperation { - public: - HChange(HValue* value, Representation to, bool is_truncating_to_smi, - bool is_truncating_to_int32, bool is_truncating_to_number) - : HUnaryOperation(value) { - DCHECK(!value->representation().IsNone()); - DCHECK(!to.IsNone()); - DCHECK(!value->representation().Equals(to)); - set_representation(to); - SetFlag(kUseGVN); - SetFlag(kCanOverflow); - if (is_truncating_to_smi && to.IsSmi()) { - SetFlag(kTruncatingToSmi); - SetFlag(kTruncatingToInt32); - SetFlag(kTruncatingToNumber); - } else if (is_truncating_to_int32) { - SetFlag(kTruncatingToInt32); - SetFlag(kTruncatingToNumber); - } else if (is_truncating_to_number) { - SetFlag(kTruncatingToNumber); - } - if (value->representation().IsSmi() || value->type().IsSmi()) { - set_type(HType::Smi()); - } else { - set_type(HType::TaggedNumber()); - if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion); - } - } - - HType CalculateInferredType() override; - HValue* Canonicalize() override; - - Representation from() const { return value()->representation(); } - Representation to() const { return representation(); } - bool deoptimize_on_minus_zero() const { - return CheckFlag(kBailoutOnMinusZero); - } - Representation RequiredInputRepresentation(int index) override { - return from(); - } - - Range* InferRange(Zone* zone) override; - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(Change) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - bool IsDeletable() const override { - return !from().IsTagged() || value()->type().IsSmi(); - } -}; - - -class HClampToUint8 final : public HUnaryOperation { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(ClampToUint8) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - explicit HClampToUint8(HValue* value) - : HUnaryOperation(value) { - set_representation(Representation::Integer32()); - SetFlag(kTruncatingToNumber); - SetFlag(kUseGVN); - } - - bool IsDeletable() const override { return true; } -}; - - -enum RemovableSimulate { - REMOVABLE_SIMULATE, - FIXED_SIMULATE -}; - - -class HSimulate final : public HInstruction { - public: - HSimulate(BailoutId ast_id, int pop_count, Zone* zone, - RemovableSimulate removable) - : ast_id_(ast_id), - pop_count_(pop_count), - values_(2, zone), - assigned_indexes_(2, zone), - zone_(zone), - bit_field_(RemovableField::encode(removable) | - DoneWithReplayField::encode(false)) {} - ~HSimulate() {} - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - bool HasAstId() const { return !ast_id_.IsNone(); } - BailoutId ast_id() const { return ast_id_; } - void set_ast_id(BailoutId id) { - DCHECK(!HasAstId()); - ast_id_ = id; - } - - int pop_count() const { return pop_count_; } - const ZoneList* values() const { return &values_; } - int GetAssignedIndexAt(int index) const { - DCHECK(HasAssignedIndexAt(index)); - return assigned_indexes_[index]; - } - bool HasAssignedIndexAt(int index) const { - return assigned_indexes_[index] != kNoIndex; - } - void AddAssignedValue(int index, HValue* value) { - AddValue(index, value); - } - void AddPushedValue(HValue* value) { - AddValue(kNoIndex, value); - } - int ToOperandIndex(int environment_index) { - for (int i = 0; i < assigned_indexes_.length(); ++i) { - if (assigned_indexes_[i] == environment_index) return i; - } - return -1; - } - int OperandCount() const override { return values_.length(); } - HValue* OperandAt(int index) const override { return values_[index]; } - - bool HasEscapingOperandAt(int index) override { return false; } - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - void MergeWith(ZoneList* list); - bool is_candidate_for_removal() { - return RemovableField::decode(bit_field_) == REMOVABLE_SIMULATE; - } - - // Replay effects of this instruction on the given environment. - void ReplayEnvironment(HEnvironment* env); - - DECLARE_CONCRETE_INSTRUCTION(Simulate) - -#ifdef DEBUG - void Verify() override; - void set_closure(Handle closure) { closure_ = closure; } - Handle closure() const { return closure_; } -#endif - - protected: - void InternalSetOperandAt(int index, HValue* value) override { - values_[index] = value; - } - - private: - static const int kNoIndex = -1; - void AddValue(int index, HValue* value) { - assigned_indexes_.Add(index, zone_); - // Resize the list of pushed values. - values_.Add(NULL, zone_); - // Set the operand through the base method in HValue to make sure that the - // use lists are correctly updated. - SetOperandAt(values_.length() - 1, value); - } - bool HasValueForIndex(int index) { - for (int i = 0; i < assigned_indexes_.length(); ++i) { - if (assigned_indexes_[i] == index) return true; - } - return false; - } - bool is_done_with_replay() const { - return DoneWithReplayField::decode(bit_field_); - } - void set_done_with_replay() { - bit_field_ = DoneWithReplayField::update(bit_field_, true); - } - - class RemovableField : public BitField {}; - class DoneWithReplayField : public BitField {}; - - BailoutId ast_id_; - int pop_count_; - ZoneList values_; - ZoneList assigned_indexes_; - Zone* zone_; - uint32_t bit_field_; - -#ifdef DEBUG - Handle closure_; -#endif -}; - - -class HEnvironmentMarker final : public HTemplateInstruction<1> { - public: - enum Kind { BIND, LOOKUP }; - - DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int); - - Kind kind() const { return kind_; } - int index() const { return index_; } - HSimulate* next_simulate() { return next_simulate_; } - void set_next_simulate(HSimulate* simulate) { - next_simulate_ = simulate; - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - -#ifdef DEBUG - void set_closure(Handle closure) { - DCHECK(closure_.is_null()); - DCHECK(!closure.is_null()); - closure_ = closure; - } - Handle closure() const { return closure_; } -#endif - - DECLARE_CONCRETE_INSTRUCTION(EnvironmentMarker); - - private: - HEnvironmentMarker(Kind kind, int index) - : kind_(kind), index_(index), next_simulate_(NULL) { } - - Kind kind_; - int index_; - HSimulate* next_simulate_; - -#ifdef DEBUG - Handle closure_; -#endif -}; - - -class HStackCheck final : public HTemplateInstruction<1> { - public: - enum Type { - kFunctionEntry, - kBackwardsBranch - }; - - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HStackCheck, Type); - - HValue* context() { return OperandAt(0); } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - void Eliminate() { - // The stack check eliminator might try to eliminate the same stack - // check instruction multiple times. - if (IsLinked()) { - DeleteAndReplaceWith(NULL); - } - } - - bool is_function_entry() { return type_ == kFunctionEntry; } - bool is_backwards_branch() { return type_ == kBackwardsBranch; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck) - - private: - HStackCheck(HValue* context, Type type) : type_(type) { - SetOperandAt(0, context); - SetChangesFlag(kNewSpacePromotion); - } - - Type type_; -}; - - -enum InliningKind { - NORMAL_RETURN, // Drop the function from the environment on return. - CONSTRUCT_CALL_RETURN, // Either use allocated receiver or return value. - GETTER_CALL_RETURN, // Returning from a getter, need to restore context. - SETTER_CALL_RETURN // Use the RHS of the assignment as the return value. -}; - - -class HArgumentsObject; -class HConstant; - - -class HEnterInlined final : public HTemplateInstruction<0> { - public: - static HEnterInlined* New(Isolate* isolate, Zone* zone, HValue* context, - BailoutId return_id, Handle closure, - HConstant* closure_context, int arguments_count, - FunctionLiteral* function, - InliningKind inlining_kind, Variable* arguments_var, - HArgumentsObject* arguments_object, - TailCallMode syntactic_tail_call_mode) { - return new (zone) - HEnterInlined(return_id, closure, closure_context, arguments_count, - function, inlining_kind, arguments_var, arguments_object, - syntactic_tail_call_mode, zone); - } - - ZoneList* return_targets() { return &return_targets_; } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Handle shared() const { return shared_; } - Handle closure() const { return closure_; } - HConstant* closure_context() const { return closure_context_; } - int arguments_count() const { return arguments_count_; } - bool arguments_pushed() const { return arguments_pushed_; } - void set_arguments_pushed() { arguments_pushed_ = true; } - FunctionLiteral* function() const { return function_; } - InliningKind inlining_kind() const { return inlining_kind_; } - TailCallMode syntactic_tail_call_mode() const { - return syntactic_tail_call_mode_; - } - BailoutId ReturnId() const { return return_id_; } - int inlining_id() const { return inlining_id_; } - void set_inlining_id(int inlining_id) { inlining_id_ = inlining_id; } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - Variable* arguments_var() { return arguments_var_; } - HArgumentsObject* arguments_object() { return arguments_object_; } - - DECLARE_CONCRETE_INSTRUCTION(EnterInlined) - - private: - HEnterInlined(BailoutId return_id, Handle closure, - HConstant* closure_context, int arguments_count, - FunctionLiteral* function, InliningKind inlining_kind, - Variable* arguments_var, HArgumentsObject* arguments_object, - TailCallMode syntactic_tail_call_mode, Zone* zone) - : return_id_(return_id), - shared_(handle(closure->shared())), - closure_(closure), - closure_context_(closure_context), - arguments_count_(arguments_count), - arguments_pushed_(false), - function_(function), - inlining_kind_(inlining_kind), - syntactic_tail_call_mode_(syntactic_tail_call_mode), - inlining_id_(-1), - arguments_var_(arguments_var), - arguments_object_(arguments_object), - return_targets_(2, zone) {} - - BailoutId return_id_; - Handle shared_; - Handle closure_; - HConstant* closure_context_; - int arguments_count_; - bool arguments_pushed_; - FunctionLiteral* function_; - InliningKind inlining_kind_; - TailCallMode syntactic_tail_call_mode_; - int inlining_id_; - Variable* arguments_var_; - HArgumentsObject* arguments_object_; - ZoneList return_targets_; -}; - - -class HLeaveInlined final : public HTemplateInstruction<0> { - public: - HLeaveInlined(HEnterInlined* entry, - int drop_count) - : entry_(entry), - drop_count_(drop_count) { } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - int argument_delta() const override { - return entry_->arguments_pushed() ? -drop_count_ : 0; - } - - DECLARE_CONCRETE_INSTRUCTION(LeaveInlined) - - private: - HEnterInlined* entry_; - int drop_count_; -}; - - -class HPushArguments final : public HInstruction { - public: - static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context) { - return new(zone) HPushArguments(zone); - } - static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* arg1) { - HPushArguments* instr = new(zone) HPushArguments(zone); - instr->AddInput(arg1); - return instr; - } - static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* arg1, HValue* arg2) { - HPushArguments* instr = new(zone) HPushArguments(zone); - instr->AddInput(arg1); - instr->AddInput(arg2); - return instr; - } - static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* arg1, HValue* arg2, HValue* arg3) { - HPushArguments* instr = new(zone) HPushArguments(zone); - instr->AddInput(arg1); - instr->AddInput(arg2); - instr->AddInput(arg3); - return instr; - } - static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* arg1, HValue* arg2, HValue* arg3, - HValue* arg4) { - HPushArguments* instr = new(zone) HPushArguments(zone); - instr->AddInput(arg1); - instr->AddInput(arg2); - instr->AddInput(arg3); - instr->AddInput(arg4); - return instr; - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - int argument_delta() const override { return inputs_.length(); } - HValue* argument(int i) { return OperandAt(i); } - - int OperandCount() const final { return inputs_.length(); } - HValue* OperandAt(int i) const final { return inputs_[i]; } - - void AddInput(HValue* value); - - DECLARE_CONCRETE_INSTRUCTION(PushArguments) - - protected: - void InternalSetOperandAt(int i, HValue* value) final { inputs_[i] = value; } - - private: - explicit HPushArguments(Zone* zone) - : HInstruction(HType::Tagged()), inputs_(4, zone) { - set_representation(Representation::Tagged()); - } - - ZoneList inputs_; -}; - - -class HThisFunction final : public HTemplateInstruction<0> { - public: - DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction); - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(ThisFunction) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - HThisFunction() { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } - - bool IsDeletable() const override { return true; } -}; - - -class HDeclareGlobals final : public HUnaryOperation { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HDeclareGlobals, - Handle, int, - Handle); - - HValue* context() { return OperandAt(0); } - Handle declarations() const { return declarations_; } - int flags() const { return flags_; } - Handle feedback_vector() const { return feedback_vector_; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals) - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - private: - HDeclareGlobals(HValue* context, Handle declarations, int flags, - Handle feedback_vector) - : HUnaryOperation(context), - declarations_(declarations), - feedback_vector_(feedback_vector), - flags_(flags) { - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } - - Handle declarations_; - Handle feedback_vector_; - int flags_; -}; - - -template -class HCall : public HTemplateInstruction { - public: - // The argument count includes the receiver. - explicit HCall(int argument_count) : argument_count_(argument_count) { - this->set_representation(Representation::Tagged()); - this->SetAllSideEffects(); - } - - virtual int argument_count() const { - return argument_count_; - } - - int argument_delta() const override { return -argument_count(); } - - private: - int argument_count_; -}; - - -class HUnaryCall : public HCall<1> { - public: - HUnaryCall(HValue* value, int argument_count) - : HCall<1>(argument_count) { - SetOperandAt(0, value); - } - - Representation RequiredInputRepresentation(int index) final { - return Representation::Tagged(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HValue* value() const { return OperandAt(0); } -}; - - -class HBinaryCall : public HCall<2> { - public: - HBinaryCall(HValue* first, HValue* second, int argument_count) - : HCall<2>(argument_count) { - SetOperandAt(0, first); - SetOperandAt(1, second); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) final { - return Representation::Tagged(); - } - - HValue* first() const { return OperandAt(0); } - HValue* second() const { return OperandAt(1); } -}; - - -class HCallWithDescriptor final : public HInstruction { - public: - static HCallWithDescriptor* New( - Isolate* isolate, Zone* zone, HValue* context, HValue* target, - int argument_count, CallInterfaceDescriptor descriptor, - const Vector& operands, - TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow, - TailCallMode tail_call_mode = TailCallMode::kDisallow) { - HCallWithDescriptor* res = new (zone) HCallWithDescriptor( - Code::STUB, context, target, argument_count, descriptor, operands, - syntactic_tail_call_mode, tail_call_mode, zone); - return res; - } - - static HCallWithDescriptor* New( - Isolate* isolate, Zone* zone, HValue* context, Code::Kind kind, - HValue* target, int argument_count, CallInterfaceDescriptor descriptor, - const Vector& operands, - TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow, - TailCallMode tail_call_mode = TailCallMode::kDisallow) { - HCallWithDescriptor* res = new (zone) HCallWithDescriptor( - kind, context, target, argument_count, descriptor, operands, - syntactic_tail_call_mode, tail_call_mode, zone); - return res; - } - - int OperandCount() const final { return values_.length(); } - HValue* OperandAt(int index) const final { return values_[index]; } - - Representation RequiredInputRepresentation(int index) final { - if (index == 0 || index == 1) { - // Target + context - return Representation::Tagged(); - } else { - int par_index = index - 2; - DCHECK(par_index < GetParameterCount()); - return RepresentationFromMachineType( - descriptor_.GetParameterType(par_index)); - } - } - - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor) - - // Defines whether this instruction corresponds to a JS call at tail position. - TailCallMode syntactic_tail_call_mode() const { - return SyntacticTailCallModeField::decode(bit_field_); - } - - // Defines whether this call should be generated as a tail call. - TailCallMode tail_call_mode() const { - return TailCallModeField::decode(bit_field_); - } - bool IsTailCall() const { return tail_call_mode() == TailCallMode::kAllow; } - - Code::Kind kind() const { return KindField::decode(bit_field_); } - - virtual int argument_count() const { - return argument_count_; - } - - int argument_delta() const override { return -argument_count_; } - - CallInterfaceDescriptor descriptor() const { return descriptor_; } - - HValue* target() { return OperandAt(0); } - HValue* context() { return OperandAt(1); } - HValue* parameter(int index) { - DCHECK_LT(index, GetParameterCount()); - return OperandAt(index + 2); - } - - HValue* Canonicalize() override; - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - private: - // The argument count includes the receiver. - HCallWithDescriptor(Code::Kind kind, HValue* context, HValue* target, - int argument_count, CallInterfaceDescriptor descriptor, - const Vector& operands, - TailCallMode syntactic_tail_call_mode, - TailCallMode tail_call_mode, Zone* zone) - : descriptor_(descriptor), - values_(GetParameterCount() + 2, zone), // +2 for context and target. - argument_count_(argument_count), - bit_field_( - TailCallModeField::encode(tail_call_mode) | - SyntacticTailCallModeField::encode(syntactic_tail_call_mode) | - KindField::encode(kind)) { - DCHECK_EQ(operands.length(), GetParameterCount()); - // We can only tail call without any stack arguments. - DCHECK(tail_call_mode != TailCallMode::kAllow || argument_count == 0); - AddOperand(target, zone); - AddOperand(context, zone); - for (int i = 0; i < operands.length(); i++) { - AddOperand(operands[i], zone); - } - this->set_representation(Representation::Tagged()); - this->SetAllSideEffects(); - } - - void AddOperand(HValue* v, Zone* zone) { - values_.Add(NULL, zone); - SetOperandAt(values_.length() - 1, v); - } - - int GetParameterCount() const { return descriptor_.GetParameterCount(); } - - void InternalSetOperandAt(int index, HValue* value) final { - values_[index] = value; - } - - CallInterfaceDescriptor descriptor_; - ZoneList values_; - int argument_count_; - class TailCallModeField : public BitField {}; - class SyntacticTailCallModeField - : public BitField {}; - class KindField - : public BitField {}; - uint32_t bit_field_; -}; - - -class HInvokeFunction final : public HBinaryCall { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HInvokeFunction, HValue*, - Handle, int, - TailCallMode, TailCallMode); - - HValue* context() { return first(); } - HValue* function() { return second(); } - Handle known_function() { return known_function_; } - int formal_parameter_count() const { return formal_parameter_count_; } - - bool HasStackCheck() final { return HasStackCheckField::decode(bit_field_); } - - // Defines whether this instruction corresponds to a JS call at tail position. - TailCallMode syntactic_tail_call_mode() const { - return SyntacticTailCallModeField::decode(bit_field_); - } - - // Defines whether this call should be generated as a tail call. - TailCallMode tail_call_mode() const { - return TailCallModeField::decode(bit_field_); - } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction) - - std::ostream& PrintTo(std::ostream& os) const override; // NOLINT - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - private: - void set_has_stack_check(bool has_stack_check) { - bit_field_ = HasStackCheckField::update(bit_field_, has_stack_check); - } - - HInvokeFunction(HValue* context, HValue* function, - Handle known_function, int argument_count, - TailCallMode syntactic_tail_call_mode, - TailCallMode tail_call_mode) - : HBinaryCall(context, function, argument_count), - known_function_(known_function), - bit_field_( - TailCallModeField::encode(tail_call_mode) | - SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) { - DCHECK(tail_call_mode != TailCallMode::kAllow || - syntactic_tail_call_mode == TailCallMode::kAllow); - formal_parameter_count_ = - known_function.is_null() - ? 0 - : known_function->shared()->internal_formal_parameter_count(); - set_has_stack_check( - !known_function.is_null() && - (known_function->code()->kind() == Code::FUNCTION || - known_function->code()->kind() == Code::OPTIMIZED_FUNCTION)); - } - - Handle known_function_; - int formal_parameter_count_; - - class HasStackCheckField : public BitField {}; - class TailCallModeField - : public BitField {}; - class SyntacticTailCallModeField - : public BitField {}; - uint32_t bit_field_; -}; - - -class HCallNewArray final : public HBinaryCall { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray, HValue*, int, - ElementsKind, - Handle); - - HValue* context() { return first(); } - HValue* constructor() { return second(); } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - ElementsKind elements_kind() const { return elements_kind_; } - Handle site() const { return site_; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray) - - private: - HCallNewArray(HValue* context, HValue* constructor, int argument_count, - ElementsKind elements_kind, Handle site) - : HBinaryCall(context, constructor, argument_count), - elements_kind_(elements_kind), - site_(site) {} - - ElementsKind elements_kind_; - Handle site_; -}; - - -class HCallRuntime final : public HCall<1> { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallRuntime, - const Runtime::Function*, int); - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HValue* context() { return OperandAt(0); } - const Runtime::Function* function() const { return c_function_; } - SaveFPRegsMode save_doubles() const { return save_doubles_; } - void set_save_doubles(SaveFPRegsMode save_doubles) { - save_doubles_ = save_doubles; - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime) - - private: - HCallRuntime(HValue* context, const Runtime::Function* c_function, - int argument_count) - : HCall<1>(argument_count), - c_function_(c_function), - save_doubles_(kDontSaveFPRegs) { - SetOperandAt(0, context); - } - - const Runtime::Function* c_function_; - SaveFPRegsMode save_doubles_; -}; - - -class HUnaryMathOperation final : public HTemplateInstruction<2> { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* value, BuiltinFunctionId op); - - HValue* context() const { return OperandAt(0); } - HValue* value() const { return OperandAt(1); } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - if (index == 0) { - return Representation::Tagged(); - } else { - switch (op_) { - case kMathCos: - case kMathFloor: - case kMathRound: - case kMathFround: - case kMathSin: - case kMathSqrt: - case kMathPowHalf: - case kMathLog: - case kMathExp: - return Representation::Double(); - case kMathAbs: - return representation(); - case kMathClz32: - return Representation::Integer32(); - default: - UNREACHABLE(); - } - } - } - - Range* InferRange(Zone* zone) override; - - HValue* Canonicalize() override; - Representation RepresentationFromUses() override; - Representation RepresentationFromInputs() override; - - BuiltinFunctionId op() const { return op_; } - const char* OpName() const; - - DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation) - - protected: - bool DataEquals(HValue* other) override { - HUnaryMathOperation* b = HUnaryMathOperation::cast(other); - return op_ == b->op(); - } - - private: - // Indicates if we support a double (and int32) output for Math.floor and - // Math.round. - bool SupportsFlexibleFloorAndRound() const { -#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC - return true; -#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 - return CpuFeatures::IsSupported(SSE4_1); -#else - return false; -#endif - } - HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op) - : HTemplateInstruction<2>(HType::TaggedNumber()), op_(op) { - SetOperandAt(0, context); - SetOperandAt(1, value); - switch (op) { - case kMathFloor: - case kMathRound: - if (SupportsFlexibleFloorAndRound()) { - SetFlag(kFlexibleRepresentation); - } else { - set_representation(Representation::Integer32()); - } - break; - case kMathClz32: - set_representation(Representation::Integer32()); - break; - case kMathAbs: - // Not setting representation here: it is None intentionally. - SetFlag(kFlexibleRepresentation); - // TODO(svenpanne) This flag is actually only needed if representation() - // is tagged, and not when it is an unboxed double or unboxed integer. - SetChangesFlag(kNewSpacePromotion); - break; - case kMathCos: - case kMathFround: - case kMathLog: - case kMathExp: - case kMathSin: - case kMathSqrt: - case kMathPowHalf: - set_representation(Representation::Double()); - break; - default: - UNREACHABLE(); - } - SetFlag(kUseGVN); - SetFlag(kTruncatingToNumber); - } - - bool IsDeletable() const override { - // TODO(crankshaft): This should be true, however the semantics of this - // instruction also include the ToNumber conversion that is mentioned in the - // spec, which is of course observable. - return false; - } - - HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv); - HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv); - - BuiltinFunctionId op_; -}; - - -class HLoadRoot final : public HTemplateInstruction<0> { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex); - DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType); - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - Heap::RootListIndex index() const { return index_; } - - DECLARE_CONCRETE_INSTRUCTION(LoadRoot) - - protected: - bool DataEquals(HValue* other) override { - HLoadRoot* b = HLoadRoot::cast(other); - return index_ == b->index_; - } - - private: - explicit HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged()) - : HTemplateInstruction<0>(type), index_(index) { - SetFlag(kUseGVN); - // TODO(bmeurer): We'll need kDependsOnRoots once we add the - // corresponding HStoreRoot instruction. - SetDependsOnFlag(kCalls); - set_representation(Representation::Tagged()); - } - - bool IsDeletable() const override { return true; } - - const Heap::RootListIndex index_; -}; - - -class HCheckMaps final : public HTemplateInstruction<2> { - public: - static HCheckMaps* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* value, Handle map, - HValue* typecheck = NULL) { - return new(zone) HCheckMaps(value, new(zone) UniqueSet( - Unique::CreateImmovable(map), zone), typecheck); - } - static HCheckMaps* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* value, SmallMapList* map_list, - HValue* typecheck = NULL) { - UniqueSet* maps = new(zone) UniqueSet(map_list->length(), zone); - for (int i = 0; i < map_list->length(); ++i) { - maps->Add(Unique::CreateImmovable(map_list->at(i)), zone); - } - return new(zone) HCheckMaps(value, maps, typecheck); - } - - bool IsStabilityCheck() const { - return IsStabilityCheckField::decode(bit_field_); - } - void MarkAsStabilityCheck() { - bit_field_ = MapsAreStableField::encode(true) | - HasMigrationTargetField::encode(false) | - IsStabilityCheckField::encode(true); - ClearChangesFlag(kNewSpacePromotion); - ClearDependsOnFlag(kElementsKind); - ClearDependsOnFlag(kMaps); - } - - bool HasEscapingOperandAt(int index) override { return false; } - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HType CalculateInferredType() override { - if (value()->type().IsHeapObject()) return value()->type(); - return HType::HeapObject(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HValue* value() const { return OperandAt(0); } - HValue* typecheck() const { return OperandAt(1); } - - const UniqueSet* maps() const { return maps_; } - void set_maps(const UniqueSet* maps) { maps_ = maps; } - - bool maps_are_stable() const { - return MapsAreStableField::decode(bit_field_); - } - - bool HasMigrationTarget() const { - return HasMigrationTargetField::decode(bit_field_); - } - - HValue* Canonicalize() override; - - static HCheckMaps* CreateAndInsertAfter(Zone* zone, - HValue* value, - Unique map, - bool map_is_stable, - HInstruction* instr) { - return instr->Append(new(zone) HCheckMaps( - value, new(zone) UniqueSet(map, zone), map_is_stable)); - } - - static HCheckMaps* CreateAndInsertBefore(Zone* zone, - HValue* value, - const UniqueSet* maps, - bool maps_are_stable, - HInstruction* instr) { - return instr->Prepend(new(zone) HCheckMaps(value, maps, maps_are_stable)); - } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps) - - protected: - bool DataEquals(HValue* other) override { - return this->maps()->Equals(HCheckMaps::cast(other)->maps()); - } - - int RedefinedOperandIndex() override { return 0; } - - private: - HCheckMaps(HValue* value, const UniqueSet* maps, bool maps_are_stable) - : HTemplateInstruction<2>(HType::HeapObject()), - maps_(maps), - bit_field_(HasMigrationTargetField::encode(false) | - IsStabilityCheckField::encode(false) | - MapsAreStableField::encode(maps_are_stable)) { - DCHECK_NE(0, maps->size()); - SetOperandAt(0, value); - // Use the object value for the dependency. - SetOperandAt(1, value); - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetDependsOnFlag(kMaps); - SetDependsOnFlag(kElementsKind); - } - - HCheckMaps(HValue* value, const UniqueSet* maps, HValue* typecheck) - : HTemplateInstruction<2>(HType::HeapObject()), - maps_(maps), - bit_field_(HasMigrationTargetField::encode(false) | - IsStabilityCheckField::encode(false) | - MapsAreStableField::encode(true)) { - DCHECK_NE(0, maps->size()); - SetOperandAt(0, value); - // Use the object value for the dependency if NULL is passed. - SetOperandAt(1, typecheck ? typecheck : value); - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetDependsOnFlag(kMaps); - SetDependsOnFlag(kElementsKind); - for (int i = 0; i < maps->size(); ++i) { - Handle map = maps->at(i).handle(); - if (map->is_migration_target()) { - bit_field_ = HasMigrationTargetField::update(bit_field_, true); - } - if (!map->is_stable()) { - bit_field_ = MapsAreStableField::update(bit_field_, false); - } - } - if (HasMigrationTarget()) SetChangesFlag(kNewSpacePromotion); - } - - class HasMigrationTargetField : public BitField {}; - class IsStabilityCheckField : public BitField {}; - class MapsAreStableField : public BitField {}; - - const UniqueSet* maps_; - uint32_t bit_field_; -}; - - -class HCheckValue final : public HUnaryOperation { - public: - static HCheckValue* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* value, Handle func) { - bool in_new_space = isolate->heap()->InNewSpace(*func); - // NOTE: We create an uninitialized Unique and initialize it later. - // This is because a JSFunction can move due to GC during graph creation. - Unique target = Unique::CreateUninitialized(func); - HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space); - return check; - } - static HCheckValue* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* value, Unique target, - bool object_in_new_space) { - return new(zone) HCheckValue(value, target, object_in_new_space); - } - - void FinalizeUniqueness() override { - object_ = Unique(object_.handle()); - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HValue* Canonicalize() override; - -#ifdef DEBUG - void Verify() override; -#endif - - Unique object() const { return object_; } - bool object_in_new_space() const { return object_in_new_space_; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue) - - protected: - bool DataEquals(HValue* other) override { - HCheckValue* b = HCheckValue::cast(other); - return object_ == b->object_; - } - - private: - HCheckValue(HValue* value, Unique object, - bool object_in_new_space) - : HUnaryOperation(value, value->type()), - object_(object), - object_in_new_space_(object_in_new_space) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } - - Unique object_; - bool object_in_new_space_; -}; - - -class HCheckInstanceType final : public HUnaryOperation { - public: - enum Check { - IS_JS_RECEIVER, - IS_JS_ARRAY, - IS_JS_FUNCTION, - IS_JS_DATE, - IS_STRING, - IS_INTERNALIZED_STRING, - LAST_INTERVAL_CHECK = IS_JS_DATE - }; - - DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check); - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HType CalculateInferredType() override { - switch (check_) { - case IS_JS_RECEIVER: return HType::JSReceiver(); - case IS_JS_ARRAY: return HType::JSArray(); - case IS_JS_FUNCTION: - return HType::JSObject(); - case IS_JS_DATE: return HType::JSObject(); - case IS_STRING: return HType::String(); - case IS_INTERNALIZED_STRING: return HType::String(); - } - UNREACHABLE(); - } - - HValue* Canonicalize() override; - - bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; } - void GetCheckInterval(InstanceType* first, InstanceType* last); - void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag); - - Check check() const { return check_; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType) - - protected: - // TODO(ager): It could be nice to allow the ommision of instance - // type checks if we have already performed an instance type check - // with a larger range. - bool DataEquals(HValue* other) override { - HCheckInstanceType* b = HCheckInstanceType::cast(other); - return check_ == b->check_; - } - - int RedefinedOperandIndex() override { return 0; } - - private: - const char* GetCheckName() const; - - HCheckInstanceType(HValue* value, Check check) - : HUnaryOperation(value, HType::HeapObject()), check_(check) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } - - const Check check_; -}; - - -class HCheckSmi final : public HUnaryOperation { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HValue* Canonicalize() override { - HType value_type = value()->type(); - if (value_type.IsSmi()) { - return NULL; - } - return this; - } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) { - set_representation(Representation::Smi()); - SetFlag(kUseGVN); - } -}; - - -class HCheckArrayBufferNotNeutered final : public HUnaryOperation { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HCheckArrayBufferNotNeutered, HValue*); - - bool HasEscapingOperandAt(int index) override { return false; } - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HType CalculateInferredType() override { - if (value()->type().IsHeapObject()) return value()->type(); - return HType::HeapObject(); - } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered) - - protected: - bool DataEquals(HValue* other) override { return true; } - int RedefinedOperandIndex() override { return 0; } - - private: - explicit HCheckArrayBufferNotNeutered(HValue* value) - : HUnaryOperation(value) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetDependsOnFlag(kCalls); - } -}; - - -class HCheckHeapObject final : public HUnaryOperation { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*); - - bool HasEscapingOperandAt(int index) override { return false; } - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HType CalculateInferredType() override { - if (value()->type().IsHeapObject()) return value()->type(); - return HType::HeapObject(); - } - -#ifdef DEBUG - void Verify() override; -#endif - - HValue* Canonicalize() override { - return value()->type().IsHeapObject() ? NULL : this; - } - - DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } -}; - - -class HPhi final : public HValue { - public: - HPhi(int merged_index, Zone* zone) - : inputs_(2, zone), merged_index_(merged_index) { - DCHECK(merged_index >= 0 || merged_index == kInvalidMergedIndex); - SetFlag(kFlexibleRepresentation); - } - - Representation RepresentationFromInputs() override; - - Range* InferRange(Zone* zone) override; - void InferRepresentation(HInferRepresentationPhase* h_infer) override; - Representation RequiredInputRepresentation(int index) override { - return representation(); - } - Representation KnownOptimalRepresentation() override { - return representation(); - } - HType CalculateInferredType() override; - int OperandCount() const override { return inputs_.length(); } - HValue* OperandAt(int index) const override { return inputs_[index]; } - HValue* GetRedundantReplacement(); - void AddInput(HValue* value); - bool HasRealUses(); - - bool IsReceiver() const { return merged_index_ == 0; } - bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; } - - SourcePosition position() const override; - - int merged_index() const { return merged_index_; } - - std::ostream& PrintTo(std::ostream& os) const override; // NOLINT - -#ifdef DEBUG - void Verify() override; -#endif - - void InitRealUses(int id); - void AddNonPhiUsesFrom(HPhi* other); - - Representation representation_from_indirect_uses() const { - return representation_from_indirect_uses_; - } - - bool has_type_feedback_from_uses() const { - return has_type_feedback_from_uses_; - } - - int phi_id() { return phi_id_; } - - static HPhi* cast(HValue* value) { - DCHECK(value->IsPhi()); - return reinterpret_cast(value); - } - Opcode opcode() const override { return HValue::kPhi; } - - void SimplifyConstantInputs(); - - // Marker value representing an invalid merge index. - static const int kInvalidMergedIndex = -1; - - protected: - void DeleteFromGraph() override; - void InternalSetOperandAt(int index, HValue* value) override { - inputs_[index] = value; - } - - private: - Representation representation_from_non_phi_uses() const { - return representation_from_non_phi_uses_; - } - - ZoneList inputs_; - int merged_index_ = 0; - - int phi_id_ = -1; - - Representation representation_from_indirect_uses_ = Representation::None(); - Representation representation_from_non_phi_uses_ = Representation::None(); - bool has_type_feedback_from_uses_ = false; - - bool IsDeletable() const override { return !IsReceiver(); } -}; - - -// Common base class for HArgumentsObject and HCapturedObject. -class HDematerializedObject : public HInstruction { - public: - HDematerializedObject(int count, Zone* zone) : values_(count, zone) {} - - int OperandCount() const final { return values_.length(); } - HValue* OperandAt(int index) const final { return values_[index]; } - - bool HasEscapingOperandAt(int index) final { return false; } - Representation RequiredInputRepresentation(int index) final { - return Representation::None(); - } - - protected: - void InternalSetOperandAt(int index, HValue* value) final { - values_[index] = value; - } - - // List of values tracked by this marker. - ZoneList values_; -}; - - -class HArgumentsObject final : public HDematerializedObject { - public: - static HArgumentsObject* New(Isolate* isolate, Zone* zone, HValue* context, - int count) { - return new(zone) HArgumentsObject(count, zone); - } - - // The values contain a list of all elements in the arguments object - // including the receiver object, which is skipped when materializing. - const ZoneList* arguments_values() const { return &values_; } - int arguments_count() const { return values_.length(); } - - void AddArgument(HValue* argument, Zone* zone) { - values_.Add(NULL, zone); // Resize list. - SetOperandAt(values_.length() - 1, argument); - } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject) - - private: - HArgumentsObject(int count, Zone* zone) - : HDematerializedObject(count, zone) { - set_representation(Representation::Tagged()); - SetFlag(kIsArguments); - } -}; - - -class HCapturedObject final : public HDematerializedObject { - public: - HCapturedObject(int length, int id, Zone* zone) - : HDematerializedObject(length, zone), capture_id_(id) { - set_representation(Representation::Tagged()); - values_.AddBlock(NULL, length, zone); // Resize list. - } - - // The values contain a list of all in-object properties inside the - // captured object and is index by field index. Properties in the - // properties or elements backing store are not tracked here. - const ZoneList* values() const { return &values_; } - int length() const { return values_.length(); } - int capture_id() const { return capture_id_; } - - // Shortcut for the map value of this captured object. - HValue* map_value() const { return values()->first(); } - - void ReuseSideEffectsFromStore(HInstruction* store) { - DCHECK(store->HasObservableSideEffects()); - DCHECK(store->IsStoreNamedField()); - changes_flags_.Add(store->ChangesFlags()); - } - - // Replay effects of this instruction on the given environment. - void ReplayEnvironment(HEnvironment* env); - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(CapturedObject) - - private: - int capture_id_; - - // Note that we cannot DCE captured objects as they are used to replay - // the environment. This method is here as an explicit reminder. - // TODO(mstarzinger): Turn HSimulates into full snapshots maybe? - bool IsDeletable() const final { return false; } -}; - - -class HConstant final : public HTemplateInstruction<0> { - public: - enum Special { kHoleNaN }; - - DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Special); - DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t); - DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation); - DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double); - DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle); - DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference); - - static HConstant* CreateAndInsertAfter(Isolate* isolate, Zone* zone, - HValue* context, int32_t value, - Representation representation, - HInstruction* instruction) { - return instruction->Append( - HConstant::New(isolate, zone, context, value, representation)); - } - - Handle GetMonomorphicJSObjectMap() override { - Handle object = object_.handle(); - if (!object.is_null() && object->IsHeapObject()) { - return v8::internal::handle(HeapObject::cast(*object)->map()); - } - return Handle(); - } - - static HConstant* CreateAndInsertBefore(Isolate* isolate, Zone* zone, - HValue* context, int32_t value, - Representation representation, - HInstruction* instruction) { - return instruction->Prepend( - HConstant::New(isolate, zone, context, value, representation)); - } - - static HConstant* CreateAndInsertBefore(Zone* zone, - Unique map, - bool map_is_stable, - HInstruction* instruction) { - return instruction->Prepend(new(zone) HConstant( - map, Unique(Handle::null()), map_is_stable, - Representation::Tagged(), HType::HeapObject(), true, - false, false, MAP_TYPE)); - } - - static HConstant* CreateAndInsertAfter(Zone* zone, - Unique map, - bool map_is_stable, - HInstruction* instruction) { - return instruction->Append(new(zone) HConstant( - map, Unique(Handle::null()), map_is_stable, - Representation::Tagged(), HType::HeapObject(), true, - false, false, MAP_TYPE)); - } - - Handle handle(Isolate* isolate) { - if (object_.handle().is_null()) { - // Default arguments to is_not_in_new_space depend on this heap number - // to be tenured so that it's guaranteed not to be located in new space. - object_ = Unique::CreateUninitialized( - isolate->factory()->NewNumber(double_value_, TENURED)); - } - AllowDeferredHandleDereference smi_check; - DCHECK(HasInteger32Value() || !object_.handle()->IsSmi()); - return object_.handle(); - } - - bool IsSpecialDouble() const { - return HasDoubleValue() && - (bit_cast(double_value_) == bit_cast(-0.0) || - std::isnan(double_value_)); - } - - bool NotInNewSpace() const { - return IsNotInNewSpaceField::decode(bit_field_); - } - - bool ImmortalImmovable() const; - - bool IsCell() const { - InstanceType instance_type = GetInstanceType(); - return instance_type == CELL_TYPE; - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - Representation KnownOptimalRepresentation() override { - if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi(); - if (HasInteger32Value()) return Representation::Integer32(); - if (HasNumberValue()) return Representation::Double(); - if (HasExternalReferenceValue()) return Representation::External(); - return Representation::Tagged(); - } - - bool EmitAtUses() override; - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - HConstant* CopyToRepresentation(Representation r, Zone* zone) const; - Maybe CopyToTruncatedInt32(Zone* zone); - Maybe CopyToTruncatedNumber(Isolate* isolate, Zone* zone); - bool HasInteger32Value() const { - return HasInt32ValueField::decode(bit_field_); - } - int32_t Integer32Value() const { - DCHECK(HasInteger32Value()); - return int32_value_; - } - bool HasSmiValue() const { return HasSmiValueField::decode(bit_field_); } - bool HasDoubleValue() const { - return HasDoubleValueField::decode(bit_field_); - } - double DoubleValue() const { - DCHECK(HasDoubleValue()); - return double_value_; - } - uint64_t DoubleValueAsBits() const { - DCHECK(HasDoubleValue()); - return bit_cast(double_value_); - } - bool IsTheHole() const { - if (HasDoubleValue() && DoubleValueAsBits() == kHoleNanInt64) { - return true; - } - return object_.IsInitialized() && - object_.IsKnownGlobal(isolate()->heap()->the_hole_value()); - } - bool HasNumberValue() const { return HasDoubleValue(); } - int32_t NumberValueAsInteger32() const { - DCHECK(HasNumberValue()); - // Irrespective of whether a numeric HConstant can be safely - // represented as an int32, we store the (in some cases lossy) - // representation of the number in int32_value_. - return int32_value_; - } - bool HasStringValue() const { - if (HasNumberValue()) return false; - DCHECK(!object_.handle().is_null()); - return GetInstanceType() < FIRST_NONSTRING_TYPE; - } - Handle StringValue() const { - DCHECK(HasStringValue()); - return Handle::cast(object_.handle()); - } - bool HasInternalizedStringValue() const { - return HasStringValue() && StringShape(GetInstanceType()).IsInternalized(); - } - - bool HasExternalReferenceValue() const { - return HasExternalReferenceValueField::decode(bit_field_); - } - ExternalReference ExternalReferenceValue() const { - return external_reference_value_; - } - - bool HasBooleanValue() const { return type_.IsBoolean(); } - bool BooleanValue() const { return BooleanValueField::decode(bit_field_); } - bool IsCallable() const { return IsCallableField::decode(bit_field_); } - bool IsUndetectable() const { - return IsUndetectableField::decode(bit_field_); - } - InstanceType GetInstanceType() const { - return InstanceTypeField::decode(bit_field_); - } - - bool HasMapValue() const { return GetInstanceType() == MAP_TYPE; } - Unique MapValue() const { - DCHECK(HasMapValue()); - return Unique::cast(GetUnique()); - } - bool HasStableMapValue() const { - DCHECK(HasMapValue() || !HasStableMapValueField::decode(bit_field_)); - return HasStableMapValueField::decode(bit_field_); - } - - bool HasObjectMap() const { return !object_map_.IsNull(); } - Unique ObjectMap() const { - DCHECK(HasObjectMap()); - return object_map_; - } - - intptr_t Hashcode() override { - if (HasInteger32Value()) { - return static_cast(int32_value_); - } else if (HasDoubleValue()) { - uint64_t bits = DoubleValueAsBits(); - if (sizeof(bits) > sizeof(intptr_t)) { - bits ^= (bits >> 32); - } - return static_cast(bits); - } else if (HasExternalReferenceValue()) { - return reinterpret_cast(external_reference_value_.address()); - } else { - DCHECK(!object_.handle().is_null()); - return object_.Hashcode(); - } - } - - void FinalizeUniqueness() override { - if (!HasDoubleValue() && !HasExternalReferenceValue()) { - DCHECK(!object_.handle().is_null()); - object_ = Unique(object_.handle()); - } - } - - Unique GetUnique() const { - return object_; - } - - bool EqualsUnique(Unique other) const { - return object_.IsInitialized() && object_ == other; - } - - bool DataEquals(HValue* other) override { - HConstant* other_constant = HConstant::cast(other); - if (HasInteger32Value()) { - return other_constant->HasInteger32Value() && - int32_value_ == other_constant->int32_value_; - } else if (HasDoubleValue()) { - return other_constant->HasDoubleValue() && - std::memcmp(&double_value_, &other_constant->double_value_, - sizeof(double_value_)) == 0; - } else if (HasExternalReferenceValue()) { - return other_constant->HasExternalReferenceValue() && - external_reference_value_ == - other_constant->external_reference_value_; - } else { - if (other_constant->HasInteger32Value() || - other_constant->HasDoubleValue() || - other_constant->HasExternalReferenceValue()) { - return false; - } - DCHECK(!object_.handle().is_null()); - return other_constant->object_ == object_; - } - } - -#ifdef DEBUG - void Verify() override {} -#endif - - DECLARE_CONCRETE_INSTRUCTION(Constant) - - protected: - Range* InferRange(Zone* zone) override; - - private: - friend class HGraph; - explicit HConstant(Special special); - explicit HConstant(Handle handle, - Representation r = Representation::None()); - HConstant(int32_t value, - Representation r = Representation::None(), - bool is_not_in_new_space = true, - Unique optional = Unique(Handle::null())); - HConstant(double value, - Representation r = Representation::None(), - bool is_not_in_new_space = true, - Unique optional = Unique(Handle::null())); - HConstant(Unique object, - Unique object_map, - bool has_stable_map_value, - Representation r, - HType type, - bool is_not_in_new_space, - bool boolean_value, - bool is_undetectable, - InstanceType instance_type); - - explicit HConstant(ExternalReference reference); - - void Initialize(Representation r); - - bool IsDeletable() const override { return true; } - - // If object_ is a map, this indicates whether the map is stable. - class HasStableMapValueField : public BitField {}; - - // We store the HConstant in the most specific form safely possible. - // These flags tell us if the respective member fields hold valid, safe - // representations of the constant. More specific flags imply more general - // flags, but not the converse (i.e. smi => int32 => double). - class HasSmiValueField : public BitField {}; - class HasInt32ValueField : public BitField {}; - class HasDoubleValueField : public BitField {}; - - class HasExternalReferenceValueField : public BitField {}; - class IsNotInNewSpaceField : public BitField {}; - class BooleanValueField : public BitField {}; - class IsUndetectableField : public BitField {}; - class IsCallableField : public BitField {}; - - static const InstanceType kUnknownInstanceType = FILLER_TYPE; - class InstanceTypeField : public BitField {}; - - // If this is a numerical constant, object_ either points to the - // HeapObject the constant originated from or is null. If the - // constant is non-numeric, object_ always points to a valid - // constant HeapObject. - Unique object_; - - // If object_ is a heap object, this points to the stable map of the object. - Unique object_map_; - - uint32_t bit_field_; - - int32_t int32_value_; - double double_value_; - ExternalReference external_reference_value_; -}; - - -class HBinaryOperation : public HTemplateInstruction<3> { - public: - HBinaryOperation(HValue* context, HValue* left, HValue* right, - HType type = HType::Tagged()) - : HTemplateInstruction<3>(type), - observed_output_representation_(Representation::None()) { - DCHECK(left != NULL && right != NULL); - SetOperandAt(0, context); - SetOperandAt(1, left); - SetOperandAt(2, right); - observed_input_representation_[0] = Representation::None(); - observed_input_representation_[1] = Representation::None(); - } - - HValue* context() const { return OperandAt(0); } - HValue* left() const { return OperandAt(1); } - HValue* right() const { return OperandAt(2); } - - // True if switching left and right operands likely generates better code. - bool AreOperandsBetterSwitched() { - if (!IsCommutative()) return false; - - // Constant operands are better off on the right, they can be inlined in - // many situations on most platforms. - if (left()->IsConstant()) return true; - if (right()->IsConstant()) return false; - - // Otherwise, if there is only one use of the right operand, it would be - // better off on the left for platforms that only have 2-arg arithmetic - // ops (e.g ia32, x64) that clobber the left operand. - return right()->HasOneUse(); - } - - HValue* BetterLeftOperand() { - return AreOperandsBetterSwitched() ? right() : left(); - } - - HValue* BetterRightOperand() { - return AreOperandsBetterSwitched() ? left() : right(); - } - - void set_observed_input_representation(int index, Representation rep) { - DCHECK(index >= 1 && index <= 2); - observed_input_representation_[index - 1] = rep; - } - - virtual void initialize_output_representation(Representation observed) { - observed_output_representation_ = observed; - } - - Representation observed_input_representation(int index) override { - if (index == 0) return Representation::Tagged(); - return observed_input_representation_[index - 1]; - } - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - Representation rep = !FLAG_smi_binop && new_rep.IsSmi() - ? Representation::Integer32() : new_rep; - HValue::UpdateRepresentation(rep, h_infer, reason); - } - - void InferRepresentation(HInferRepresentationPhase* h_infer) override; - Representation RepresentationFromInputs() override; - Representation RepresentationFromOutput(); - void AssumeRepresentation(Representation r) override; - - virtual bool IsCommutative() const { return false; } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - if (index == 0) return Representation::Tagged(); - return representation(); - } - - bool RightIsPowerOf2() { - if (!right()->IsInteger32Constant()) return false; - int32_t value = right()->GetInteger32Constant(); - if (value < 0) { - return base::bits::IsPowerOfTwo32(static_cast(-value)); - } - return base::bits::IsPowerOfTwo32(static_cast(value)); - } - - DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation) - - private: - bool IgnoreObservedOutputRepresentation(Representation current_rep); - - Representation observed_input_representation_[2]; - Representation observed_output_representation_; -}; - - -class HWrapReceiver final : public HTemplateInstruction<2> { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*); - - bool DataEquals(HValue* other) override { return true; } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HValue* receiver() const { return OperandAt(0); } - HValue* function() const { return OperandAt(1); } - - HValue* Canonicalize() override; - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - bool known_function() const { return known_function_; } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver) - - private: - HWrapReceiver(HValue* receiver, HValue* function) { - known_function_ = function->IsConstant() && - HConstant::cast(function)->handle(function->isolate())->IsJSFunction(); - set_representation(Representation::Tagged()); - SetOperandAt(0, receiver); - SetOperandAt(1, function); - SetFlag(kUseGVN); - } - - bool known_function_; -}; - - -class HApplyArguments final : public HTemplateInstruction<4> { - public: - DECLARE_INSTRUCTION_FACTORY_P5(HApplyArguments, HValue*, HValue*, HValue*, - HValue*, TailCallMode); - - Representation RequiredInputRepresentation(int index) override { - // The length is untagged, all other inputs are tagged. - return (index == 2) - ? Representation::Integer32() - : Representation::Tagged(); - } - - HValue* function() { return OperandAt(0); } - HValue* receiver() { return OperandAt(1); } - HValue* length() { return OperandAt(2); } - HValue* elements() { return OperandAt(3); } - - TailCallMode tail_call_mode() const { - return TailCallModeField::decode(bit_field_); - } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments) - - private: - HApplyArguments(HValue* function, HValue* receiver, HValue* length, - HValue* elements, TailCallMode tail_call_mode) - : bit_field_(TailCallModeField::encode(tail_call_mode)) { - set_representation(Representation::Tagged()); - SetOperandAt(0, function); - SetOperandAt(1, receiver); - SetOperandAt(2, length); - SetOperandAt(3, elements); - SetAllSideEffects(); - } - - class TailCallModeField : public BitField {}; - uint32_t bit_field_; -}; - - -class HArgumentsElements final : public HTemplateInstruction<0> { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool); - DECLARE_INSTRUCTION_FACTORY_P2(HArgumentsElements, bool, bool); - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements) - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - bool from_inlined() const { return from_inlined_; } - bool arguments_adaptor() const { return arguments_adaptor_; } - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - explicit HArgumentsElements(bool from_inlined, bool arguments_adaptor = true) - : from_inlined_(from_inlined), arguments_adaptor_(arguments_adaptor) { - // The value produced by this instruction is a pointer into the stack - // that looks as if it was a smi because of alignment. - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } - - bool IsDeletable() const override { return true; } - - bool from_inlined_; - bool arguments_adaptor_; -}; - - -class HArgumentsLength final : public HUnaryOperation { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) { - set_representation(Representation::Integer32()); - SetFlag(kUseGVN); - } - - bool IsDeletable() const override { return true; } -}; - - -class HAccessArgumentsAt final : public HTemplateInstruction<3> { - public: - DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*); - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - // The arguments elements is considered tagged. - return index == 0 - ? Representation::Tagged() - : Representation::Integer32(); - } - - HValue* arguments() const { return OperandAt(0); } - HValue* length() const { return OperandAt(1); } - HValue* index() const { return OperandAt(2); } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt) - - private: - HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetOperandAt(0, arguments); - SetOperandAt(1, length); - SetOperandAt(2, index); - } - - bool DataEquals(HValue* other) override { return true; } -}; - - -class HBoundsCheck final : public HTemplateInstruction<2> { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*); - - bool skip_check() const { return skip_check_; } - void set_skip_check() { skip_check_ = true; } - - HValue* base() const { return base_; } - int offset() const { return offset_; } - int scale() const { return scale_; } - - Representation RequiredInputRepresentation(int index) override { - return representation(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - void InferRepresentation(HInferRepresentationPhase* h_infer) override; - - HValue* index() const { return OperandAt(0); } - HValue* length() const { return OperandAt(1); } - bool allow_equality() const { return allow_equality_; } - void set_allow_equality(bool v) { allow_equality_ = v; } - - int RedefinedOperandIndex() override { return 0; } - bool IsPurelyInformativeDefinition() override { return skip_check(); } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck) - - protected: - Range* InferRange(Zone* zone) override; - - bool DataEquals(HValue* other) override { return true; } - bool skip_check_; - HValue* base_; - int offset_; - int scale_; - bool allow_equality_; - - private: - // Normally HBoundsCheck should be created using the - // HGraphBuilder::AddBoundsCheck() helper. - // However when building stubs, where we know that the arguments are Int32, - // it makes sense to invoke this constructor directly. - HBoundsCheck(HValue* index, HValue* length) - : skip_check_(false), - base_(NULL), offset_(0), scale_(0), - allow_equality_(false) { - SetOperandAt(0, index); - SetOperandAt(1, length); - SetFlag(kFlexibleRepresentation); - SetFlag(kUseGVN); - } - - bool IsDeletable() const override { return skip_check() && !FLAG_debug_code; } -}; - - -class HBitwiseBinaryOperation : public HBinaryOperation { - public: - HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right, - HType type = HType::TaggedNumber()) - : HBinaryOperation(context, left, right, type) { - SetFlag(kFlexibleRepresentation); - SetFlag(kTruncatingToInt32); - SetFlag(kTruncatingToNumber); - SetAllSideEffects(); - } - - void RepresentationChanged(Representation to) override { - if (to.IsTagged() && - (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) { - SetAllSideEffects(); - ClearFlag(kUseGVN); - } else { - ClearAllSideEffects(); - SetFlag(kUseGVN); - } - if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion); - } - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - // We only generate either int32 or generic tagged bitwise operations. - if (new_rep.IsDouble()) new_rep = Representation::Integer32(); - HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); - } - - Representation observed_input_representation(int index) override { - Representation r = HBinaryOperation::observed_input_representation(index); - if (r.IsDouble()) return Representation::Integer32(); - return r; - } - - void initialize_output_representation(Representation observed) override { - if (observed.IsDouble()) observed = Representation::Integer32(); - HBinaryOperation::initialize_output_representation(observed); - } - - DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation) - - private: - bool IsDeletable() const override { return true; } -}; - - -class HMathFloorOfDiv final : public HBinaryOperation { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv, - HValue*, - HValue*); - - DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - HMathFloorOfDiv(HValue* context, HValue* left, HValue* right) - : HBinaryOperation(context, left, right) { - set_representation(Representation::Integer32()); - SetFlag(kUseGVN); - SetFlag(kCanOverflow); - SetFlag(kCanBeDivByZero); - SetFlag(kLeftCanBeMinInt); - SetFlag(kLeftCanBeNegative); - SetFlag(kLeftCanBePositive); - SetFlag(kTruncatingToNumber); - } - - Range* InferRange(Zone* zone) override; - - bool IsDeletable() const override { return true; } -}; - - -class HArithmeticBinaryOperation : public HBinaryOperation { - public: - HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right, - HType type = HType::TaggedNumber()) - : HBinaryOperation(context, left, right, type) { - SetAllSideEffects(); - SetFlag(kFlexibleRepresentation); - SetFlag(kTruncatingToNumber); - } - - void RepresentationChanged(Representation to) override { - if (to.IsTagged() && - (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) { - SetAllSideEffects(); - ClearFlag(kUseGVN); - } else { - ClearAllSideEffects(); - SetFlag(kUseGVN); - } - if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion); - } - - DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation) - - private: - bool IsDeletable() const override { return true; } -}; - - -class HCompareGeneric final : public HBinaryOperation { - public: - static HCompareGeneric* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right, Token::Value token) { - return new (zone) HCompareGeneric(context, left, right, token); - } - - Representation RequiredInputRepresentation(int index) override { - return index == 0 - ? Representation::Tagged() - : representation(); - } - - Token::Value token() const { return token_; } - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(CompareGeneric) - - private: - HCompareGeneric(HValue* context, HValue* left, HValue* right, - Token::Value token) - : HBinaryOperation(context, left, right, HType::Boolean()), - token_(token) { - DCHECK(Token::IsCompareOp(token)); - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } - - Token::Value token_; -}; - - -class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> { - public: - static HCompareNumericAndBranch* New(Isolate* isolate, Zone* zone, - HValue* context, HValue* left, - HValue* right, Token::Value token, - HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) { - return new (zone) - HCompareNumericAndBranch(left, right, token, true_target, false_target); - } - - HValue* left() const { return OperandAt(0); } - HValue* right() const { return OperandAt(1); } - Token::Value token() const { return token_; } - - void set_observed_input_representation(Representation left, - Representation right) { - observed_input_representation_[0] = left; - observed_input_representation_[1] = right; - } - - void InferRepresentation(HInferRepresentationPhase* h_infer) override; - - Representation RequiredInputRepresentation(int index) override { - return representation(); - } - Representation observed_input_representation(int index) override { - return observed_input_representation_[index]; - } - - bool KnownSuccessorBlock(HBasicBlock** block) override; - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch) - - private: - HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token, - HBasicBlock* true_target, HBasicBlock* false_target) - : token_(token) { - SetFlag(kFlexibleRepresentation); - DCHECK(Token::IsCompareOp(token)); - SetOperandAt(0, left); - SetOperandAt(1, right); - SetSuccessorAt(0, true_target); - SetSuccessorAt(1, false_target); - } - - Representation observed_input_representation_[2]; - Token::Value token_; -}; - - -class HCompareHoleAndBranch final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*); - DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*, - HBasicBlock*, HBasicBlock*); - - void InferRepresentation(HInferRepresentationPhase* h_infer) override; - - Representation RequiredInputRepresentation(int index) override { - return representation(); - } - - DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch) - - private: - HCompareHoleAndBranch(HValue* value, - HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target) { - SetFlag(kFlexibleRepresentation); - } -}; - - -class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*); - DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*, - HBasicBlock*, HBasicBlock*); - - bool KnownSuccessorBlock(HBasicBlock** block) override; - - static const int kNoKnownSuccessorIndex = -1; - int known_successor_index() const { return known_successor_index_; } - void set_known_successor_index(int known_successor_index) { - known_successor_index_ = known_successor_index; - } - - HValue* left() const { return OperandAt(0); } - HValue* right() const { return OperandAt(1); } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - Representation observed_input_representation(int index) override { - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch) - - private: - HCompareObjectEqAndBranch(HValue* left, - HValue* right, - HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) - : known_successor_index_(kNoKnownSuccessorIndex) { - SetOperandAt(0, left); - SetOperandAt(1, right); - SetSuccessorAt(0, true_target); - SetSuccessorAt(1, false_target); - } - - int known_successor_index_; -}; - - -class HIsStringAndBranch final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*); - DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*, - HBasicBlock*, HBasicBlock*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - bool KnownSuccessorBlock(HBasicBlock** block) override; - - static const int kNoKnownSuccessorIndex = -1; - int known_successor_index() const { return known_successor_index_; } - void set_known_successor_index(int known_successor_index) { - known_successor_index_ = known_successor_index; - } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch) - - protected: - int RedefinedOperandIndex() override { return 0; } - - private: - HIsStringAndBranch(HValue* value, HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target), - known_successor_index_(kNoKnownSuccessorIndex) { - set_representation(Representation::Tagged()); - } - - int known_successor_index_; -}; - - -class HIsSmiAndBranch final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*); - DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*, - HBasicBlock*, HBasicBlock*); - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch) - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - protected: - bool DataEquals(HValue* other) override { return true; } - int RedefinedOperandIndex() override { return 0; } - - private: - HIsSmiAndBranch(HValue* value, - HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target) { - set_representation(Representation::Tagged()); - } -}; - - -class HIsUndetectableAndBranch final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*); - DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*, - HBasicBlock*, HBasicBlock*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - bool KnownSuccessorBlock(HBasicBlock** block) override; - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch) - - private: - HIsUndetectableAndBranch(HValue* value, - HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target) {} -}; - - -class HStringCompareAndBranch final : public HTemplateControlInstruction<2, 3> { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch, - HValue*, - HValue*, - Token::Value); - - HValue* context() const { return OperandAt(0); } - HValue* left() const { return OperandAt(1); } - HValue* right() const { return OperandAt(2); } - Token::Value token() const { return token_; } - - std::ostream& PrintDataTo(std::ostream& os) const final; // NOLINT - - Representation RequiredInputRepresentation(int index) final { - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch) - - private: - HStringCompareAndBranch(HValue* context, HValue* left, HValue* right, - Token::Value token) - : token_(token) { - DCHECK(Token::IsCompareOp(token)); - SetOperandAt(0, context); - SetOperandAt(1, left); - SetOperandAt(2, right); - set_representation(Representation::Tagged()); - SetChangesFlag(kNewSpacePromotion); - SetDependsOnFlag(kStringChars); - SetDependsOnFlag(kStringLengths); - } - - Token::Value const token_; -}; - - -class HHasInstanceTypeAndBranch final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P2( - HHasInstanceTypeAndBranch, HValue*, InstanceType); - DECLARE_INSTRUCTION_FACTORY_P3( - HHasInstanceTypeAndBranch, HValue*, InstanceType, InstanceType); - - InstanceType from() { return from_; } - InstanceType to() { return to_; } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - bool KnownSuccessorBlock(HBasicBlock** block) override; - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch) - - private: - HHasInstanceTypeAndBranch(HValue* value, InstanceType type) - : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { } - HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to) - : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) { - DCHECK(to == LAST_TYPE); // Others not implemented yet in backend. - } - - InstanceType from_; - InstanceType to_; // Inclusive range, not all combinations work. -}; - -class HClassOfTestAndBranch final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*, - Handle); - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch) - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Handle class_name() const { return class_name_; } - - private: - HClassOfTestAndBranch(HValue* value, Handle class_name) - : HUnaryControlInstruction(value, NULL, NULL), class_name_(class_name) {} - - Handle class_name_; -}; - -class HTypeofIsAndBranch final : public HUnaryControlInstruction { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle); - - Handle type_literal() const { return type_literal_.handle(); } - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch) - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - bool KnownSuccessorBlock(HBasicBlock** block) override; - - void FinalizeUniqueness() override { - type_literal_ = Unique(type_literal_.handle()); - } - - private: - HTypeofIsAndBranch(HValue* value, Handle type_literal) - : HUnaryControlInstruction(value, NULL, NULL), - type_literal_(Unique::CreateUninitialized(type_literal)) { } - - Unique type_literal_; -}; - - -class HHasInPrototypeChainAndBranch final - : public HTemplateControlInstruction<2, 2> { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HHasInPrototypeChainAndBranch, HValue*, - HValue*); - - HValue* object() const { return OperandAt(0); } - HValue* prototype() const { return OperandAt(1); } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - bool ObjectNeedsSmiCheck() const { - return !object()->type().IsHeapObject() && - !object()->representation().IsHeapObject(); - } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch) - - private: - HHasInPrototypeChainAndBranch(HValue* object, HValue* prototype) { - SetOperandAt(0, object); - SetOperandAt(1, prototype); - SetDependsOnFlag(kCalls); - } -}; - - -class HPower final : public HTemplateInstruction<2> { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - - HValue* left() { return OperandAt(0); } - HValue* right() const { return OperandAt(1); } - - Representation RequiredInputRepresentation(int index) override { - return index == 0 - ? Representation::Double() - : Representation::None(); - } - Representation observed_input_representation(int index) override { - return RequiredInputRepresentation(index); - } - - DECLARE_CONCRETE_INSTRUCTION(Power) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - HPower(HValue* left, HValue* right) { - SetOperandAt(0, left); - SetOperandAt(1, right); - set_representation(Representation::Double()); - SetFlag(kUseGVN); - SetChangesFlag(kNewSpacePromotion); - } - - bool IsDeletable() const override { - return !right()->representation().IsTagged(); - } -}; - - -enum ExternalAddType { - AddOfExternalAndTagged, - AddOfExternalAndInt32, - NoExternalAdd -}; - - -class HAdd final : public HArithmeticBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right, - ExternalAddType external_add_type); - - // Add is only commutative if two integer values are added and not if two - // tagged values are added (because it might be a String concatenation). - // We also do not commute (pointer + offset). - bool IsCommutative() const override { - return !representation().IsTagged() && !representation().IsExternal(); - } - - HValue* Canonicalize() override; - - void RepresentationChanged(Representation to) override { - if (to.IsTagged() && - (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() || - left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) { - SetAllSideEffects(); - ClearFlag(kUseGVN); - } else { - ClearAllSideEffects(); - SetFlag(kUseGVN); - } - if (to.IsTagged()) { - SetChangesFlag(kNewSpacePromotion); - ClearFlag(kTruncatingToNumber); - } - if (!right()->type().IsTaggedNumber() && - !right()->representation().IsDouble() && - !right()->representation().IsSmiOrInteger32()) { - ClearFlag(kTruncatingToNumber); - } - } - - Representation RepresentationFromInputs() override; - - Representation RequiredInputRepresentation(int index) override; - - bool IsConsistentExternalRepresentation() { - return left()->representation().IsExternal() && - ((external_add_type_ == AddOfExternalAndInt32 && - right()->representation().IsInteger32()) || - (external_add_type_ == AddOfExternalAndTagged && - right()->representation().IsTagged())); - } - - ExternalAddType external_add_type() const { return external_add_type_; } - - DECLARE_CONCRETE_INSTRUCTION(Add) - - protected: - bool DataEquals(HValue* other) override { return true; } - - Range* InferRange(Zone* zone) override; - - private: - HAdd(HValue* context, HValue* left, HValue* right, - ExternalAddType external_add_type = NoExternalAdd) - : HArithmeticBinaryOperation(context, left, right, HType::Tagged()), - external_add_type_(external_add_type) { - SetFlag(kCanOverflow); - switch (external_add_type_) { - case AddOfExternalAndTagged: - DCHECK(left->representation().IsExternal()); - DCHECK(right->representation().IsTagged()); - SetDependsOnFlag(kNewSpacePromotion); - ClearFlag(HValue::kCanOverflow); - SetFlag(kHasNoObservableSideEffects); - break; - - case NoExternalAdd: - // This is a bit of a hack: The call to this constructor is generated - // by a macro that also supports sub and mul, so it doesn't pass in - // a value for external_add_type but uses the default. - if (left->representation().IsExternal()) { - external_add_type_ = AddOfExternalAndInt32; - } - break; - - case AddOfExternalAndInt32: - // See comment above. - UNREACHABLE(); - break; - } - } - - ExternalAddType external_add_type_; -}; - - -class HSub final : public HArithmeticBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - - HValue* Canonicalize() override; - - DECLARE_CONCRETE_INSTRUCTION(Sub) - - protected: - bool DataEquals(HValue* other) override { return true; } - - Range* InferRange(Zone* zone) override; - - private: - HSub(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanOverflow); - } -}; - - -class HMul final : public HArithmeticBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - - static HInstruction* NewImul(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right) { - HInstruction* instr = HMul::New(isolate, zone, context, left, right); - if (!instr->IsMul()) return instr; - HMul* mul = HMul::cast(instr); - // TODO(mstarzinger): Prevent bailout on minus zero for imul. - mul->AssumeRepresentation(Representation::Integer32()); - mul->ClearFlag(HValue::kCanOverflow); - return mul; - } - - HValue* Canonicalize() override; - - // Only commutative if it is certain that not two objects are multiplicated. - bool IsCommutative() const override { return !representation().IsTagged(); } - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); - } - - bool MulMinusOne(); - - DECLARE_CONCRETE_INSTRUCTION(Mul) - - protected: - bool DataEquals(HValue* other) override { return true; } - - Range* InferRange(Zone* zone) override; - - private: - HMul(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanOverflow); - } -}; - - -class HMod final : public HArithmeticBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - - HValue* Canonicalize() override; - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - if (new_rep.IsSmi()) new_rep = Representation::Integer32(); - HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); - } - - DECLARE_CONCRETE_INSTRUCTION(Mod) - - protected: - bool DataEquals(HValue* other) override { return true; } - - Range* InferRange(Zone* zone) override; - - private: - HMod(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanBeDivByZero); - SetFlag(kCanOverflow); - SetFlag(kLeftCanBeNegative); - } -}; - - -class HDiv final : public HArithmeticBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - - HValue* Canonicalize() override; - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - if (new_rep.IsSmi()) new_rep = Representation::Integer32(); - HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); - } - - DECLARE_CONCRETE_INSTRUCTION(Div) - - protected: - bool DataEquals(HValue* other) override { return true; } - - Range* InferRange(Zone* zone) override; - - private: - HDiv(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanBeDivByZero); - SetFlag(kCanOverflow); - } -}; - - -class HMathMinMax final : public HArithmeticBinaryOperation { - public: - enum Operation { kMathMin, kMathMax }; - - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right, Operation op); - - Representation observed_input_representation(int index) override { - return RequiredInputRepresentation(index); - } - - void InferRepresentation(HInferRepresentationPhase* h_infer) override; - - Representation RepresentationFromInputs() override { - Representation left_rep = left()->representation(); - Representation right_rep = right()->representation(); - Representation result = Representation::Smi(); - result = result.generalize(left_rep); - result = result.generalize(right_rep); - if (result.IsTagged()) return Representation::Double(); - return result; - } - - bool IsCommutative() const override { return true; } - - Operation operation() { return operation_; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax) - - protected: - bool DataEquals(HValue* other) override { - return other->IsMathMinMax() && - HMathMinMax::cast(other)->operation_ == operation_; - } - - Range* InferRange(Zone* zone) override; - - private: - HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op) - : HArithmeticBinaryOperation(context, left, right), operation_(op) {} - - Operation operation_; -}; - - -class HBitwise final : public HBitwiseBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - Token::Value op, HValue* left, HValue* right); - - Token::Value op() const { return op_; } - - bool IsCommutative() const override { return true; } - - HValue* Canonicalize() override; - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(Bitwise) - - protected: - bool DataEquals(HValue* other) override { - return op() == HBitwise::cast(other)->op(); - } - - Range* InferRange(Zone* zone) override; - - private: - HBitwise(HValue* context, Token::Value op, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right), op_(op) { - DCHECK(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR); - // BIT_AND with a smi-range positive value will always unset the - // entire sign-extension of the smi-sign. - if (op == Token::BIT_AND && - ((left->IsConstant() && - left->representation().IsSmi() && - HConstant::cast(left)->Integer32Value() >= 0) || - (right->IsConstant() && - right->representation().IsSmi() && - HConstant::cast(right)->Integer32Value() >= 0))) { - SetFlag(kTruncatingToSmi); - SetFlag(kTruncatingToInt32); - // BIT_OR with a smi-range negative value will always set the entire - // sign-extension of the smi-sign. - } else if (op == Token::BIT_OR && - ((left->IsConstant() && - left->representation().IsSmi() && - HConstant::cast(left)->Integer32Value() < 0) || - (right->IsConstant() && - right->representation().IsSmi() && - HConstant::cast(right)->Integer32Value() < 0))) { - SetFlag(kTruncatingToSmi); - SetFlag(kTruncatingToInt32); - } - } - - Token::Value op_; -}; - - -class HShl final : public HBitwiseBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - - Range* InferRange(Zone* zone) override; - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - if (new_rep.IsSmi() && - !(right()->IsInteger32Constant() && - right()->GetInteger32Constant() >= 0)) { - new_rep = Representation::Integer32(); - } - HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); - } - - DECLARE_CONCRETE_INSTRUCTION(Shl) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - HShl(HValue* context, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right) {} -}; - - -class HShr final : public HBitwiseBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - - Range* InferRange(Zone* zone) override; - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - if (new_rep.IsSmi()) new_rep = Representation::Integer32(); - HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); - } - - DECLARE_CONCRETE_INSTRUCTION(Shr) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - HShr(HValue* context, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right) {} -}; - - -class HSar final : public HBitwiseBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right); - - Range* InferRange(Zone* zone) override; - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - if (new_rep.IsSmi()) new_rep = Representation::Integer32(); - HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); - } - - DECLARE_CONCRETE_INSTRUCTION(Sar) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - HSar(HValue* context, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right) {} -}; - - -class HRor final : public HBitwiseBinaryOperation { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* left, HValue* right) { - return new (zone) HRor(context, left, right); - } - - void UpdateRepresentation(Representation new_rep, - HInferRepresentationPhase* h_infer, - const char* reason) override { - if (new_rep.IsSmi()) new_rep = Representation::Integer32(); - HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); - } - - DECLARE_CONCRETE_INSTRUCTION(Ror) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - HRor(HValue* context, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right) { - ChangeRepresentation(Representation::Integer32()); - } -}; - - -class HOsrEntry final : public HTemplateInstruction<0> { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HOsrEntry, BailoutId); - - BailoutId ast_id() const { return ast_id_; } - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(OsrEntry) - - private: - explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) { - SetChangesFlag(kOsrEntries); - SetChangesFlag(kNewSpacePromotion); - } - - BailoutId ast_id_; -}; - - -class HParameter final : public HTemplateInstruction<0> { - public: - enum ParameterKind { - STACK_PARAMETER, - REGISTER_PARAMETER - }; - - DECLARE_INSTRUCTION_FACTORY_P1(HParameter, unsigned); - DECLARE_INSTRUCTION_FACTORY_P2(HParameter, unsigned, ParameterKind); - DECLARE_INSTRUCTION_FACTORY_P3(HParameter, unsigned, ParameterKind, - Representation); - - unsigned index() const { return index_; } - ParameterKind kind() const { return kind_; } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - Representation KnownOptimalRepresentation() override { - // If a parameter is an input to a phi, that phi should not - // choose any more optimistic representation than Tagged. - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(Parameter) - - private: - explicit HParameter(unsigned index, - ParameterKind kind = STACK_PARAMETER) - : index_(index), - kind_(kind) { - set_representation(Representation::Tagged()); - } - - explicit HParameter(unsigned index, - ParameterKind kind, - Representation r) - : index_(index), - kind_(kind) { - set_representation(r); - } - - unsigned index_; - ParameterKind kind_; -}; - - -class HUnknownOSRValue final : public HTemplateInstruction<0> { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int); - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - return Representation::None(); - } - - void set_incoming_value(HPhi* value) { incoming_value_ = value; } - HPhi* incoming_value() { return incoming_value_; } - HEnvironment *environment() { return environment_; } - int index() { return index_; } - - Representation KnownOptimalRepresentation() override { - if (incoming_value_ == NULL) return Representation::None(); - return incoming_value_->KnownOptimalRepresentation(); - } - - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue) - - private: - HUnknownOSRValue(HEnvironment* environment, int index) - : environment_(environment), - index_(index), - incoming_value_(NULL) { - set_representation(Representation::Tagged()); - } - - HEnvironment* environment_; - int index_; - HPhi* incoming_value_; -}; - -class HAllocate final : public HTemplateInstruction<3> { - public: - static bool CompatibleInstanceTypes(InstanceType type1, - InstanceType type2) { - return ComputeFlags(TENURED, type1) == ComputeFlags(TENURED, type2) && - ComputeFlags(NOT_TENURED, type1) == ComputeFlags(NOT_TENURED, type2); - } - - static HAllocate* New( - Isolate* isolate, Zone* zone, HValue* context, HValue* size, HType type, - PretenureFlag pretenure_flag, InstanceType instance_type, - HValue* dominator, - Handle allocation_site = Handle::null()) { - return new (zone) HAllocate(context, size, type, pretenure_flag, - instance_type, dominator, allocation_site); - } - - // Maximum instance size for which allocations will be inlined. - static const int kMaxInlineSize = 64 * kPointerSize; - - HValue* context() const { return OperandAt(0); } - HValue* size() const { return OperandAt(1); } - HValue* allocation_folding_dominator() const { return OperandAt(2); } - - Representation RequiredInputRepresentation(int index) override { - if (index == 0) { - return Representation::Tagged(); - } else { - return Representation::Integer32(); - } - } - - Handle GetMonomorphicJSObjectMap() override { - return known_initial_map_; - } - - void set_known_initial_map(Handle known_initial_map) { - known_initial_map_ = known_initial_map; - } - - bool IsNewSpaceAllocation() const { - return (flags_ & ALLOCATE_IN_NEW_SPACE) != 0; - } - - bool IsOldSpaceAllocation() const { - return (flags_ & ALLOCATE_IN_OLD_SPACE) != 0; - } - - bool MustAllocateDoubleAligned() const { - return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0; - } - - bool MustPrefillWithFiller() const { - return (flags_ & PREFILL_WITH_FILLER) != 0; - } - - void MakePrefillWithFiller() { - flags_ = static_cast(flags_ | PREFILL_WITH_FILLER); - } - - void MakeDoubleAligned() { - flags_ = static_cast(flags_ | ALLOCATE_DOUBLE_ALIGNED); - } - - void MakeAllocationFoldingDominator() { - flags_ = - static_cast(flags_ | ALLOCATION_FOLDING_DOMINATOR); - } - - bool IsAllocationFoldingDominator() const { - return (flags_ & ALLOCATION_FOLDING_DOMINATOR) != 0; - } - - void MakeFoldedAllocation(HAllocate* dominator) { - flags_ = static_cast(flags_ | ALLOCATION_FOLDED); - ClearFlag(kTrackSideEffectDominators); - ClearChangesFlag(kNewSpacePromotion); - SetOperandAt(2, dominator); - } - - bool IsAllocationFolded() const { return (flags_ & ALLOCATION_FOLDED) != 0; } - - bool HandleSideEffectDominator(GVNFlag side_effect, - HValue* dominator) override; - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(Allocate) - - private: - enum Flags { - ALLOCATE_IN_NEW_SPACE = 1 << 0, - ALLOCATE_IN_OLD_SPACE = 1 << 2, - ALLOCATE_DOUBLE_ALIGNED = 1 << 3, - PREFILL_WITH_FILLER = 1 << 4, - ALLOCATION_FOLDING_DOMINATOR = 1 << 5, - ALLOCATION_FOLDED = 1 << 6 - }; - - HAllocate( - HValue* context, HValue* size, HType type, PretenureFlag pretenure_flag, - InstanceType instance_type, HValue* dominator, - Handle allocation_site = Handle::null()) - : HTemplateInstruction<3>(type), - flags_(ComputeFlags(pretenure_flag, instance_type)) { - SetOperandAt(0, context); - UpdateSize(size); - SetOperandAt(2, dominator); - set_representation(Representation::Tagged()); - SetFlag(kTrackSideEffectDominators); - SetChangesFlag(kNewSpacePromotion); - SetDependsOnFlag(kNewSpacePromotion); - - if (FLAG_trace_pretenuring) { - PrintF("HAllocate with AllocationSite %p %s\n", - allocation_site.is_null() - ? static_cast(NULL) - : static_cast(*allocation_site), - pretenure_flag == TENURED ? "tenured" : "not tenured"); - } - } - - static Flags ComputeFlags(PretenureFlag pretenure_flag, - InstanceType instance_type) { - Flags flags = pretenure_flag == TENURED ? ALLOCATE_IN_OLD_SPACE - : ALLOCATE_IN_NEW_SPACE; - if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) { - flags = static_cast(flags | ALLOCATE_DOUBLE_ALIGNED); - } - // We have to fill the allocated object with one word fillers if we do - // not use allocation folding since some allocations may depend on each - // other, i.e., have a pointer to each other. A GC in between these - // allocations may leave such objects behind in a not completely initialized - // state. - if (!FLAG_use_gvn || !FLAG_use_allocation_folding) { - flags = static_cast(flags | PREFILL_WITH_FILLER); - } - return flags; - } - - void UpdateSize(HValue* size) { - SetOperandAt(1, size); - } - - bool IsFoldable(HAllocate* allocate) { - return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) || - (IsOldSpaceAllocation() && allocate->IsOldSpaceAllocation()); - } - - Flags flags_; - Handle known_initial_map_; -}; - - -class HStoreCodeEntry final : public HTemplateInstruction<2> { - public: - static HStoreCodeEntry* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* function, HValue* code) { - return new(zone) HStoreCodeEntry(function, code); - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HValue* function() { return OperandAt(0); } - HValue* code_object() { return OperandAt(1); } - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry) - - private: - HStoreCodeEntry(HValue* function, HValue* code) { - SetOperandAt(0, function); - SetOperandAt(1, code); - } -}; - - -class HInnerAllocatedObject final : public HTemplateInstruction<2> { - public: - static HInnerAllocatedObject* New(Isolate* isolate, Zone* zone, - HValue* context, HValue* value, - HValue* offset, HType type) { - return new(zone) HInnerAllocatedObject(value, offset, type); - } - - HValue* base_object() const { return OperandAt(0); } - HValue* offset() const { return OperandAt(1); } - - Representation RequiredInputRepresentation(int index) override { - return index == 0 ? Representation::Tagged() : Representation::Integer32(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject) - - private: - HInnerAllocatedObject(HValue* value, - HValue* offset, - HType type) : HTemplateInstruction<2>(type) { - DCHECK(value->IsAllocate()); - DCHECK(type.IsHeapObject()); - SetOperandAt(0, value); - SetOperandAt(1, offset); - set_representation(Representation::Tagged()); - } -}; - - -inline bool StoringValueNeedsWriteBarrier(HValue* value) { - return !value->type().IsSmi() - && !value->type().IsNull() - && !value->type().IsBoolean() - && !value->type().IsUndefined() - && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable()); -} - - -inline bool ReceiverObjectNeedsWriteBarrier(HValue* object, - HValue* value, - HValue* dominator) { - // There may be multiple inner allocates dominated by one allocate. - while (object->IsInnerAllocatedObject()) { - object = HInnerAllocatedObject::cast(object)->base_object(); - } - - if (object->IsAllocate()) { - HAllocate* allocate = HAllocate::cast(object); - if (allocate->IsAllocationFolded()) { - HValue* dominator = allocate->allocation_folding_dominator(); - // There is no guarantee that all allocations are folded together because - // GVN performs a fixpoint. - if (HAllocate::cast(dominator)->IsAllocationFoldingDominator()) { - object = dominator; - } - } - } - - if (object->IsConstant() && - HConstant::cast(object)->HasExternalReferenceValue()) { - // Stores to external references require no write barriers - return false; - } - // We definitely need a write barrier unless the object is the allocation - // dominator. - if (object == dominator && object->IsAllocate()) { - // Stores to new space allocations require no write barriers. - if (HAllocate::cast(object)->IsNewSpaceAllocation()) { - return false; - } - } - return true; -} - - -inline PointersToHereCheck PointersToHereCheckForObject(HValue* object, - HValue* dominator) { - while (object->IsInnerAllocatedObject()) { - object = HInnerAllocatedObject::cast(object)->base_object(); - } - if (object == dominator && - object->IsAllocate() && - HAllocate::cast(object)->IsNewSpaceAllocation()) { - return kPointersToHereAreAlwaysInteresting; - } - return kPointersToHereMaybeInteresting; -} - - -class HLoadContextSlot final : public HUnaryOperation { - public: - enum Mode { - // Perform a normal load of the context slot without checking its value. - kNoCheck, - // Load and check the value of the context slot. Deoptimize if it's the - // hole value. This is used for checking for loading of uninitialized - // harmony bindings where we deoptimize into full-codegen generated code - // which will subsequently throw a reference error. - kCheckDeoptimize - }; - - HLoadContextSlot(HValue* context, int slot_index, Mode mode) - : HUnaryOperation(context), slot_index_(slot_index), mode_(mode) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetDependsOnFlag(kContextSlots); - } - - int slot_index() const { return slot_index_; } - Mode mode() const { return mode_; } - - bool DeoptimizesOnHole() { - return mode_ == kCheckDeoptimize; - } - - bool RequiresHoleCheck() const { - return mode_ != kNoCheck; - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot) - - protected: - bool DataEquals(HValue* other) override { - HLoadContextSlot* b = HLoadContextSlot::cast(other); - return (slot_index() == b->slot_index()); - } - - private: - bool IsDeletable() const override { return !RequiresHoleCheck(); } - - int slot_index_; - Mode mode_; -}; - - -class HStoreContextSlot final : public HTemplateInstruction<2> { - public: - enum Mode { - // Perform a normal store to the context slot without checking its previous - // value. - kNoCheck, - // Check the previous value of the context slot and deoptimize if it's the - // hole value. This is used for checking for assignments to uninitialized - // harmony bindings where we deoptimize into full-codegen generated code - // which will subsequently throw a reference error. - kCheckDeoptimize - }; - - DECLARE_INSTRUCTION_FACTORY_P4(HStoreContextSlot, HValue*, int, - Mode, HValue*); - - HValue* context() const { return OperandAt(0); } - HValue* value() const { return OperandAt(1); } - int slot_index() const { return slot_index_; } - Mode mode() const { return mode_; } - - bool NeedsWriteBarrier() { - return StoringValueNeedsWriteBarrier(value()); - } - - bool DeoptimizesOnHole() { - return mode_ == kCheckDeoptimize; - } - - bool RequiresHoleCheck() { - return mode_ != kNoCheck; - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot) - - private: - HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value) - : slot_index_(slot_index), mode_(mode) { - SetOperandAt(0, context); - SetOperandAt(1, value); - SetChangesFlag(kContextSlots); - } - - int slot_index_; - Mode mode_; -}; - - -// Represents an access to a portion of an object, such as the map pointer, -// array elements pointer, etc, but not accesses to array elements themselves. -class HObjectAccess final { - public: - inline bool IsInobject() const { - return portion() != kBackingStore && portion() != kExternalMemory; - } - - inline bool IsExternalMemory() const { - return portion() == kExternalMemory; - } - - inline bool IsStringLength() const { - return portion() == kStringLengths; - } - - inline bool IsMap() const { - return portion() == kMaps; - } - - inline int offset() const { - return OffsetField::decode(value_); - } - - inline Representation representation() const { - return Representation::FromKind(RepresentationField::decode(value_)); - } - - inline Handle name() const { return name_; } - - inline bool immutable() const { - return ImmutableField::decode(value_); - } - - // Returns true if access is being made to an in-object property that - // was already added to the object. - inline bool existing_inobject_property() const { - return ExistingInobjectPropertyField::decode(value_); - } - - inline HObjectAccess WithRepresentation(Representation representation) { - return HObjectAccess(portion(), offset(), representation, name(), - immutable(), existing_inobject_property()); - } - - static HObjectAccess ForHeapNumberValue() { - return HObjectAccess( - kDouble, HeapNumber::kValueOffset, Representation::Double()); - } - - static HObjectAccess ForHeapNumberValueLowestBits() { - return HObjectAccess(kDouble, - HeapNumber::kValueOffset, - Representation::Integer32()); - } - - static HObjectAccess ForHeapNumberValueHighestBits() { - return HObjectAccess(kDouble, - HeapNumber::kValueOffset + kIntSize, - Representation::Integer32()); - } - - static HObjectAccess ForOddballToNumber( - Representation representation = Representation::Tagged()) { - return HObjectAccess(kInobject, Oddball::kToNumberOffset, representation); - } - - static HObjectAccess ForOddballTypeOf() { - return HObjectAccess(kInobject, Oddball::kTypeOfOffset, - Representation::HeapObject()); - } - - static HObjectAccess ForElementsPointer() { - return HObjectAccess(kElementsPointer, JSObject::kElementsOffset); - } - - static HObjectAccess ForNextFunctionLinkPointer() { - return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset); - } - - static HObjectAccess ForArrayLength(ElementsKind elements_kind) { - return HObjectAccess( - kArrayLengths, - JSArray::kLengthOffset, - IsFastElementsKind(elements_kind) - ? Representation::Smi() : Representation::Tagged()); - } - - static HObjectAccess ForAllocationSiteOffset(int offset); - - static HObjectAccess ForAllocationSiteList() { - return HObjectAccess(kExternalMemory, 0, Representation::Tagged(), - Handle::null(), false, false); - } - - static HObjectAccess ForFixedArrayLength() { - return HObjectAccess( - kArrayLengths, - FixedArray::kLengthOffset, - Representation::Smi()); - } - - static HObjectAccess ForFixedTypedArrayBaseBasePointer() { - return HObjectAccess(kInobject, FixedTypedArrayBase::kBasePointerOffset, - Representation::Tagged()); - } - - static HObjectAccess ForFixedTypedArrayBaseExternalPointer() { - return HObjectAccess::ForObservableJSObjectOffset( - FixedTypedArrayBase::kExternalPointerOffset, - Representation::External()); - } - - static HObjectAccess ForStringHashField() { - return HObjectAccess(kInobject, - String::kHashFieldOffset, - Representation::Integer32()); - } - - static HObjectAccess ForStringLength() { - STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); - return HObjectAccess( - kStringLengths, - String::kLengthOffset, - Representation::Smi()); - } - - static HObjectAccess ForConsStringFirst() { - return HObjectAccess(kInobject, ConsString::kFirstOffset); - } - - static HObjectAccess ForConsStringSecond() { - return HObjectAccess(kInobject, ConsString::kSecondOffset); - } - - static HObjectAccess ForPropertiesPointer() { - return HObjectAccess(kInobject, JSObject::kPropertiesOffset); - } - - static HObjectAccess ForPrototypeOrInitialMap() { - return HObjectAccess(kInobject, JSFunction::kPrototypeOrInitialMapOffset); - } - - static HObjectAccess ForSharedFunctionInfoPointer() { - return HObjectAccess(kInobject, JSFunction::kSharedFunctionInfoOffset); - } - - static HObjectAccess ForCodeEntryPointer() { - return HObjectAccess(kInobject, JSFunction::kCodeEntryOffset); - } - - static HObjectAccess ForCodeOffset() { - return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset); - } - - static HObjectAccess ForFunctionContextPointer() { - return HObjectAccess(kInobject, JSFunction::kContextOffset); - } - - static HObjectAccess ForMap() { - return HObjectAccess(kMaps, JSObject::kMapOffset); - } - - static HObjectAccess ForPrototype() { - return HObjectAccess(kMaps, Map::kPrototypeOffset); - } - - static HObjectAccess ForMapAsInteger32() { - return HObjectAccess(kMaps, JSObject::kMapOffset, - Representation::Integer32()); - } - - static HObjectAccess ForMapInObjectPropertiesOrConstructorFunctionIndex() { - return HObjectAccess( - kInobject, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset, - Representation::UInteger8()); - } - - static HObjectAccess ForMapInstanceType() { - return HObjectAccess(kInobject, - Map::kInstanceTypeOffset, - Representation::UInteger8()); - } - - static HObjectAccess ForMapInstanceSize() { - return HObjectAccess(kInobject, - Map::kInstanceSizeOffset, - Representation::UInteger8()); - } - - static HObjectAccess ForMapBitField() { - return HObjectAccess(kInobject, - Map::kBitFieldOffset, - Representation::UInteger8()); - } - - static HObjectAccess ForMapBitField2() { - return HObjectAccess(kInobject, - Map::kBitField2Offset, - Representation::UInteger8()); - } - - static HObjectAccess ForMapBitField3() { - return HObjectAccess(kInobject, Map::kBitField3Offset, - Representation::Integer32()); - } - - static HObjectAccess ForMapDescriptors() { - return HObjectAccess(kInobject, Map::kDescriptorsOffset); - } - - static HObjectAccess ForNameHashField() { - return HObjectAccess(kInobject, - Name::kHashFieldOffset, - Representation::Integer32()); - } - - static HObjectAccess ForMapInstanceTypeAndBitField() { - STATIC_ASSERT((Map::kInstanceTypeAndBitFieldOffset & 1) == 0); - // Ensure the two fields share one 16-bit word, endian-independent. - STATIC_ASSERT((Map::kBitFieldOffset & ~1) == - (Map::kInstanceTypeOffset & ~1)); - return HObjectAccess(kInobject, - Map::kInstanceTypeAndBitFieldOffset, - Representation::UInteger16()); - } - - static HObjectAccess ForPropertyCellValue() { - return HObjectAccess(kInobject, PropertyCell::kValueOffset); - } - - static HObjectAccess ForPropertyCellDetails() { - return HObjectAccess(kInobject, PropertyCell::kDetailsOffset, - Representation::Smi()); - } - - static HObjectAccess ForCellValue() { - return HObjectAccess(kInobject, Cell::kValueOffset); - } - - static HObjectAccess ForWeakCellValue() { - return HObjectAccess(kInobject, WeakCell::kValueOffset); - } - - static HObjectAccess ForWeakCellNext() { - return HObjectAccess(kInobject, WeakCell::kNextOffset); - } - - static HObjectAccess ForAllocationMementoSite() { - return HObjectAccess(kInobject, AllocationMemento::kAllocationSiteOffset); - } - - static HObjectAccess ForCounter() { - return HObjectAccess(kExternalMemory, 0, Representation::Integer32(), - Handle::null(), false, false); - } - - static HObjectAccess ForExternalUInteger8() { - return HObjectAccess(kExternalMemory, 0, Representation::UInteger8(), - Handle::null(), false, false); - } - - static HObjectAccess ForBoundTargetFunction() { - return HObjectAccess(kInobject, - JSBoundFunction::kBoundTargetFunctionOffset); - } - - static HObjectAccess ForBoundThis() { - return HObjectAccess(kInobject, JSBoundFunction::kBoundThisOffset); - } - - static HObjectAccess ForBoundArguments() { - return HObjectAccess(kInobject, JSBoundFunction::kBoundArgumentsOffset); - } - - // Create an access to an offset in a fixed array header. - static HObjectAccess ForFixedArrayHeader(int offset); - - // Create an access to an in-object property in a JSObject. - // This kind of access must be used when the object |map| is known and - // in-object properties are being accessed. Accesses of the in-object - // properties can have different semantics depending on whether corresponding - // property was added to the map or not. - static HObjectAccess ForMapAndOffset(Handle map, int offset, - Representation representation = Representation::Tagged()); - - // Create an access to an in-object property in a JSObject. - // This kind of access can be used for accessing object header fields or - // in-object properties if the map of the object is not known. - static HObjectAccess ForObservableJSObjectOffset(int offset, - Representation representation = Representation::Tagged()) { - return ForMapAndOffset(Handle::null(), offset, representation); - } - - // Create an access to an in-object property in a JSArray. - static HObjectAccess ForJSArrayOffset(int offset); - - static HObjectAccess ForContextSlot(int index); - - static HObjectAccess ForScriptContext(int index); - - // Create an access to the backing store of an object. - static HObjectAccess ForBackingStoreOffset(int offset, - Representation representation = Representation::Tagged()); - - // Create an access to a resolved field (in-object or backing store). - static HObjectAccess ForField(Handle map, int index, - Representation representation, - Handle name); - - static HObjectAccess ForJSTypedArrayLength() { - return HObjectAccess::ForObservableJSObjectOffset( - JSTypedArray::kLengthOffset); - } - - static HObjectAccess ForJSArrayBufferBackingStore() { - return HObjectAccess::ForObservableJSObjectOffset( - JSArrayBuffer::kBackingStoreOffset, Representation::External()); - } - - static HObjectAccess ForJSArrayBufferByteLength() { - return HObjectAccess::ForObservableJSObjectOffset( - JSArrayBuffer::kByteLengthOffset, Representation::Tagged()); - } - - static HObjectAccess ForJSArrayBufferBitField() { - return HObjectAccess::ForObservableJSObjectOffset( - JSArrayBuffer::kBitFieldOffset, Representation::Integer32()); - } - - static HObjectAccess ForJSArrayBufferBitFieldSlot() { - return HObjectAccess::ForObservableJSObjectOffset( - JSArrayBuffer::kBitFieldSlot, Representation::Smi()); - } - - static HObjectAccess ForJSArrayBufferViewBuffer() { - return HObjectAccess::ForObservableJSObjectOffset( - JSArrayBufferView::kBufferOffset); - } - - static HObjectAccess ForJSArrayBufferViewByteOffset() { - return HObjectAccess::ForObservableJSObjectOffset( - JSArrayBufferView::kByteOffsetOffset); - } - - static HObjectAccess ForJSArrayBufferViewByteLength() { - return HObjectAccess::ForObservableJSObjectOffset( - JSArrayBufferView::kByteLengthOffset); - } - - static HObjectAccess ForJSGlobalObjectNativeContext() { - return HObjectAccess(kInobject, JSGlobalObject::kNativeContextOffset); - } - - static HObjectAccess ForJSRegExpFlags() { - return HObjectAccess(kInobject, JSRegExp::kFlagsOffset); - } - - static HObjectAccess ForJSRegExpSource() { - return HObjectAccess(kInobject, JSRegExp::kSourceOffset); - } - - static HObjectAccess ForJSCollectionTable() { - return HObjectAccess::ForObservableJSObjectOffset( - JSCollection::kTableOffset); - } - - template - static HObjectAccess ForOrderedHashTableNumberOfBuckets() { - return HObjectAccess(kInobject, CollectionType::kNumberOfBucketsOffset, - Representation::Smi()); - } - - template - static HObjectAccess ForOrderedHashTableNumberOfElements() { - return HObjectAccess(kInobject, CollectionType::kNumberOfElementsOffset, - Representation::Smi()); - } - - template - static HObjectAccess ForOrderedHashTableNumberOfDeletedElements() { - return HObjectAccess(kInobject, - CollectionType::kNumberOfDeletedElementsOffset, - Representation::Smi()); - } - - template - static HObjectAccess ForOrderedHashTableNextTable() { - return HObjectAccess(kInobject, CollectionType::kNextTableOffset); - } - - template - static HObjectAccess ForOrderedHashTableBucket(int bucket) { - return HObjectAccess(kInobject, CollectionType::kHashTableStartOffset + - (bucket * kPointerSize), - Representation::Smi()); - } - - // Access into the data table of an OrderedHashTable with a - // known-at-compile-time bucket count. - template - static HObjectAccess ForOrderedHashTableDataTableIndex(int index) { - return HObjectAccess(kInobject, CollectionType::kHashTableStartOffset + - (kBucketCount * kPointerSize) + - (index * kPointerSize)); - } - - inline bool Equals(HObjectAccess that) const { - return value_ == that.value_; // portion and offset must match - } - - protected: - void SetGVNFlags(HValue *instr, PropertyAccessType access_type); - - private: - // internal use only; different parts of an object or array - enum Portion { - kMaps, // map of an object - kArrayLengths, // the length of an array - kStringLengths, // the length of a string - kElementsPointer, // elements pointer - kBackingStore, // some field in the backing store - kDouble, // some double field - kInobject, // some other in-object field - kExternalMemory // some field in external memory - }; - - HObjectAccess() : value_(0) {} - - HObjectAccess(Portion portion, int offset, - Representation representation = Representation::Tagged(), - Handle name = Handle::null(), - bool immutable = false, bool existing_inobject_property = true) - : value_(PortionField::encode(portion) | - RepresentationField::encode(representation.kind()) | - ImmutableField::encode(immutable ? 1 : 0) | - ExistingInobjectPropertyField::encode( - existing_inobject_property ? 1 : 0) | - OffsetField::encode(offset)), - name_(name) { - // assert that the fields decode correctly - DCHECK(this->offset() == offset); - DCHECK(this->portion() == portion); - DCHECK(this->immutable() == immutable); - DCHECK(this->existing_inobject_property() == existing_inobject_property); - DCHECK(RepresentationField::decode(value_) == representation.kind()); - DCHECK(!this->existing_inobject_property() || IsInobject()); - } - - class PortionField : public BitField {}; - class RepresentationField : public BitField {}; - class ImmutableField : public BitField {}; - class ExistingInobjectPropertyField : public BitField {}; - class OffsetField : public BitField {}; - - uint32_t value_; // encodes portion, representation, immutable, and offset - Handle name_; - - friend class HLoadNamedField; - friend class HStoreNamedField; - friend class SideEffectsTracker; - friend std::ostream& operator<<(std::ostream& os, - const HObjectAccess& access); - - inline Portion portion() const { - return PortionField::decode(value_); - } -}; - - -std::ostream& operator<<(std::ostream& os, const HObjectAccess& access); - - -class HLoadNamedField final : public HTemplateInstruction<2> { - public: - DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*, - HValue*, HObjectAccess); - DECLARE_INSTRUCTION_FACTORY_P5(HLoadNamedField, HValue*, HValue*, - HObjectAccess, const UniqueSet*, HType); - - HValue* object() const { return OperandAt(0); } - HValue* dependency() const { - DCHECK(HasDependency()); - return OperandAt(1); - } - bool HasDependency() const { return OperandAt(0) != OperandAt(1); } - HObjectAccess access() const { return access_; } - Representation field_representation() const { - return access_.representation(); - } - - const UniqueSet* maps() const { return maps_; } - - bool HasEscapingOperandAt(int index) override { return false; } - bool HasOutOfBoundsAccess(int size) override { - return !access().IsInobject() || access().offset() >= size; - } - Representation RequiredInputRepresentation(int index) override { - if (index == 0) { - // object must be external in case of external memory access - return access().IsExternalMemory() ? Representation::External() - : Representation::Tagged(); - } - DCHECK(index == 1); - return Representation::None(); - } - Range* InferRange(Zone* zone) override; - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - bool CanBeReplacedWith(HValue* other) const { - if (!CheckFlag(HValue::kCantBeReplaced)) return false; - if (!type().Equals(other->type())) return false; - if (!representation().Equals(other->representation())) return false; - if (!other->IsLoadNamedField()) return true; - HLoadNamedField* that = HLoadNamedField::cast(other); - if (this->maps_ == that->maps_) return true; - if (this->maps_ == NULL || that->maps_ == NULL) return false; - return this->maps_->IsSubset(that->maps_); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField) - - protected: - bool DataEquals(HValue* other) override { - HLoadNamedField* that = HLoadNamedField::cast(other); - if (!this->access_.Equals(that->access_)) return false; - if (this->maps_ == that->maps_) return true; - return (this->maps_ != NULL && - that->maps_ != NULL && - this->maps_->Equals(that->maps_)); - } - - private: - HLoadNamedField(HValue* object, - HValue* dependency, - HObjectAccess access) - : access_(access), maps_(NULL) { - DCHECK_NOT_NULL(object); - SetOperandAt(0, object); - SetOperandAt(1, dependency ? dependency : object); - - Representation representation = access.representation(); - if (representation.IsInteger8() || - representation.IsUInteger8() || - representation.IsInteger16() || - representation.IsUInteger16()) { - set_representation(Representation::Integer32()); - } else if (representation.IsSmi()) { - set_type(HType::Smi()); - if (SmiValuesAre32Bits()) { - set_representation(Representation::Integer32()); - } else { - set_representation(representation); - } - } else if (representation.IsDouble() || - representation.IsExternal() || - representation.IsInteger32()) { - set_representation(representation); - } else if (representation.IsHeapObject()) { - set_type(HType::HeapObject()); - set_representation(Representation::Tagged()); - } else { - set_representation(Representation::Tagged()); - } - access.SetGVNFlags(this, LOAD); - } - - HLoadNamedField(HValue* object, - HValue* dependency, - HObjectAccess access, - const UniqueSet* maps, - HType type) - : HTemplateInstruction<2>(type), access_(access), maps_(maps) { - DCHECK_NOT_NULL(maps); - DCHECK_NE(0, maps->size()); - - DCHECK_NOT_NULL(object); - SetOperandAt(0, object); - SetOperandAt(1, dependency ? dependency : object); - - DCHECK(access.representation().IsHeapObject()); - DCHECK(type.IsHeapObject()); - set_representation(Representation::Tagged()); - - access.SetGVNFlags(this, LOAD); - } - - bool IsDeletable() const override { return true; } - - HObjectAccess access_; - const UniqueSet* maps_; -}; - - -class HLoadFunctionPrototype final : public HUnaryOperation { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*); - - HValue* function() { return OperandAt(0); } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - explicit HLoadFunctionPrototype(HValue* function) - : HUnaryOperation(function) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetDependsOnFlag(kCalls); - } -}; - -class ArrayInstructionInterface { - public: - virtual HValue* GetKey() = 0; - virtual void SetKey(HValue* key) = 0; - virtual ElementsKind elements_kind() const = 0; - // TryIncreaseBaseOffset returns false if overflow would result. - virtual bool TryIncreaseBaseOffset(uint32_t increase_by_value) = 0; - virtual bool IsDehoisted() const = 0; - virtual void SetDehoisted(bool is_dehoisted) = 0; - virtual ~ArrayInstructionInterface() { } - - static Representation KeyedAccessIndexRequirement(Representation r) { - return r.IsInteger32() || SmiValuesAre32Bits() - ? Representation::Integer32() : Representation::Smi(); - } -}; - - -static const int kDefaultKeyedHeaderOffsetSentinel = -1; - -enum LoadKeyedHoleMode { - NEVER_RETURN_HOLE, - ALLOW_RETURN_HOLE, - CONVERT_HOLE_TO_UNDEFINED -}; - - -class HLoadKeyed final : public HTemplateInstruction<4>, - public ArrayInstructionInterface { - public: - DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*, HValue*, - ElementsKind); - DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*, HValue*, - ElementsKind, LoadKeyedHoleMode); - DECLARE_INSTRUCTION_FACTORY_P7(HLoadKeyed, HValue*, HValue*, HValue*, HValue*, - ElementsKind, LoadKeyedHoleMode, int); - - bool is_fixed_typed_array() const { - return IsFixedTypedArrayElementsKind(elements_kind()); - } - HValue* elements() const { return OperandAt(0); } - HValue* key() const { return OperandAt(1); } - HValue* dependency() const { - DCHECK(HasDependency()); - return OperandAt(2); - } - bool HasDependency() const { return OperandAt(0) != OperandAt(2); } - HValue* backing_store_owner() const { - DCHECK(HasBackingStoreOwner()); - return OperandAt(3); - } - bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); } - uint32_t base_offset() const { return BaseOffsetField::decode(bit_field_); } - bool TryIncreaseBaseOffset(uint32_t increase_by_value) override; - HValue* GetKey() override { return key(); } - void SetKey(HValue* key) override { SetOperandAt(1, key); } - bool IsDehoisted() const override { - return IsDehoistedField::decode(bit_field_); - } - void SetDehoisted(bool is_dehoisted) override { - bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted); - } - ElementsKind elements_kind() const override { - return ElementsKindField::decode(bit_field_); - } - LoadKeyedHoleMode hole_mode() const { - return HoleModeField::decode(bit_field_); - } - - Representation RequiredInputRepresentation(int index) override { - // kind_fast: tagged[int32] (none) - // kind_double: tagged[int32] (none) - // kind_fixed_typed_array: external[int32] (none) - // kind_external: external[int32] (none) - if (index == 0) { - return is_fixed_typed_array() ? Representation::External() - : Representation::Tagged(); - } - if (index == 1) { - return ArrayInstructionInterface::KeyedAccessIndexRequirement( - OperandAt(1)->representation()); - } - if (index == 2) { - return Representation::None(); - } - DCHECK_EQ(3, index); - return HasBackingStoreOwner() ? Representation::Tagged() - : Representation::None(); - } - - Representation observed_input_representation(int index) override { - return RequiredInputRepresentation(index); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - bool UsesMustHandleHole() const; - bool AllUsesCanTreatHoleAsNaN() const; - bool RequiresHoleCheck() const; - - Range* InferRange(Zone* zone) override; - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed) - - protected: - bool DataEquals(HValue* other) override { - if (!other->IsLoadKeyed()) return false; - HLoadKeyed* other_load = HLoadKeyed::cast(other); - - if (base_offset() != other_load->base_offset()) return false; - return elements_kind() == other_load->elements_kind(); - } - - private: - HLoadKeyed(HValue* obj, HValue* key, HValue* dependency, - HValue* backing_store_owner, ElementsKind elements_kind, - LoadKeyedHoleMode mode = NEVER_RETURN_HOLE, - int offset = kDefaultKeyedHeaderOffsetSentinel) - : bit_field_(0) { - offset = offset == kDefaultKeyedHeaderOffsetSentinel - ? GetDefaultHeaderSizeForElementsKind(elements_kind) - : offset; - bit_field_ = ElementsKindField::encode(elements_kind) | - HoleModeField::encode(mode) | - BaseOffsetField::encode(offset); - - SetOperandAt(0, obj); - SetOperandAt(1, key); - SetOperandAt(2, dependency != nullptr ? dependency : obj); - SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj); - DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array()); - - if (!is_fixed_typed_array()) { - // I can detect the case between storing double (holey and fast) and - // smi/object by looking at elements_kind_. - DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) || - IsFastDoubleElementsKind(elements_kind)); - - if (IsFastSmiOrObjectElementsKind(elements_kind)) { - if (IsFastSmiElementsKind(elements_kind) && - (!IsHoleyElementsKind(elements_kind) || - mode == NEVER_RETURN_HOLE)) { - set_type(HType::Smi()); - if (SmiValuesAre32Bits() && !RequiresHoleCheck()) { - set_representation(Representation::Integer32()); - } else { - set_representation(Representation::Smi()); - } - } else { - set_representation(Representation::Tagged()); - } - - SetDependsOnFlag(kArrayElements); - } else { - set_representation(Representation::Double()); - SetDependsOnFlag(kDoubleArrayElements); - } - } else { - if (elements_kind == FLOAT32_ELEMENTS || - elements_kind == FLOAT64_ELEMENTS) { - set_representation(Representation::Double()); - } else { - set_representation(Representation::Integer32()); - } - - if (is_fixed_typed_array()) { - SetDependsOnFlag(kExternalMemory); - SetDependsOnFlag(kTypedArrayElements); - } else { - UNREACHABLE(); - } - // Native code could change the specialized array. - SetDependsOnFlag(kCalls); - } - - SetFlag(kUseGVN); - } - - bool IsDeletable() const override { return !RequiresHoleCheck(); } - - // Establish some checks around our packed fields - enum LoadKeyedBits { - kBitsForElementsKind = 5, - kBitsForHoleMode = 2, - kBitsForBaseOffset = 24, - kBitsForIsDehoisted = 1, - - kStartElementsKind = 0, - kStartHoleMode = kStartElementsKind + kBitsForElementsKind, - kStartBaseOffset = kStartHoleMode + kBitsForHoleMode, - kStartIsDehoisted = kStartBaseOffset + kBitsForBaseOffset - }; - - STATIC_ASSERT((kBitsForElementsKind + kBitsForHoleMode + kBitsForBaseOffset + - kBitsForIsDehoisted) <= sizeof(uint32_t) * 8); - STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind)); - class ElementsKindField: - public BitField - {}; // NOLINT - class HoleModeField: - public BitField - {}; // NOLINT - class BaseOffsetField: - public BitField - {}; // NOLINT - class IsDehoistedField: - public BitField - {}; // NOLINT - uint32_t bit_field_; -}; - - -// Indicates whether the store is a store to an entry that was previously -// initialized or not. -enum StoreFieldOrKeyedMode { - // The entry could be either previously initialized or not. - INITIALIZING_STORE, - // At the time of this store it is guaranteed that the entry is already - // initialized. - STORE_TO_INITIALIZED_ENTRY -}; - - -class HStoreNamedField final : public HTemplateInstruction<3> { - public: - DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*, - HObjectAccess, HValue*); - DECLARE_INSTRUCTION_FACTORY_P4(HStoreNamedField, HValue*, - HObjectAccess, HValue*, StoreFieldOrKeyedMode); - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField) - - bool HasEscapingOperandAt(int index) override { return index == 1; } - bool HasOutOfBoundsAccess(int size) override { - return !access().IsInobject() || access().offset() >= size; - } - Representation RequiredInputRepresentation(int index) override { - if (index == 0 && access().IsExternalMemory()) { - // object must be external in case of external memory access - return Representation::External(); - } else if (index == 1) { - if (field_representation().IsInteger8() || - field_representation().IsUInteger8() || - field_representation().IsInteger16() || - field_representation().IsUInteger16() || - field_representation().IsInteger32()) { - return Representation::Integer32(); - } else if (field_representation().IsDouble()) { - return field_representation(); - } else if (field_representation().IsSmi()) { - if (SmiValuesAre32Bits() && - store_mode() == STORE_TO_INITIALIZED_ENTRY) { - return Representation::Integer32(); - } - return field_representation(); - } else if (field_representation().IsExternal()) { - return Representation::External(); - } - } - return Representation::Tagged(); - } - bool HandleSideEffectDominator(GVNFlag side_effect, - HValue* dominator) override { - DCHECK(side_effect == kNewSpacePromotion); - if (!FLAG_use_write_barrier_elimination) return false; - dominator_ = dominator; - return false; - } - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HValue* object() const { return OperandAt(0); } - HValue* value() const { return OperandAt(1); } - HValue* transition() const { return OperandAt(2); } - - HObjectAccess access() const { return access_; } - HValue* dominator() const { return dominator_; } - bool has_transition() const { return HasTransitionField::decode(bit_field_); } - StoreFieldOrKeyedMode store_mode() const { - return StoreModeField::decode(bit_field_); - } - - Handle transition_map() const { - if (has_transition()) { - return Handle::cast( - HConstant::cast(transition())->handle(isolate())); - } else { - return Handle(); - } - } - - void SetTransition(HConstant* transition) { - DCHECK(!has_transition()); // Only set once. - SetOperandAt(2, transition); - bit_field_ = HasTransitionField::update(bit_field_, true); - SetChangesFlag(kMaps); - } - - bool NeedsWriteBarrier() const { - DCHECK(!field_representation().IsDouble() || - (FLAG_unbox_double_fields && access_.IsInobject()) || - !has_transition()); - if (field_representation().IsDouble()) return false; - if (field_representation().IsSmi()) return false; - if (field_representation().IsInteger32()) return false; - if (field_representation().IsExternal()) return false; - return StoringValueNeedsWriteBarrier(value()) && - ReceiverObjectNeedsWriteBarrier(object(), value(), dominator()); - } - - bool NeedsWriteBarrierForMap() { - return ReceiverObjectNeedsWriteBarrier(object(), transition(), - dominator()); - } - - SmiCheck SmiCheckForWriteBarrier() const { - if (field_representation().IsHeapObject()) return OMIT_SMI_CHECK; - if (value()->type().IsHeapObject()) return OMIT_SMI_CHECK; - return INLINE_SMI_CHECK; - } - - PointersToHereCheck PointersToHereCheckForValue() const { - return PointersToHereCheckForObject(value(), dominator()); - } - - Representation field_representation() const { - return access_.representation(); - } - - void UpdateValue(HValue* value) { - SetOperandAt(1, value); - } - - bool CanBeReplacedWith(HStoreNamedField* that) const { - if (!this->access().Equals(that->access())) return false; - if (SmiValuesAre32Bits() && - this->field_representation().IsSmi() && - this->store_mode() == INITIALIZING_STORE && - that->store_mode() == STORE_TO_INITIALIZED_ENTRY) { - // We cannot replace an initializing store to a smi field with a store to - // an initialized entry on 64-bit architectures (with 32-bit smis). - return false; - } - return true; - } - - private: - HStoreNamedField(HValue* obj, HObjectAccess access, HValue* val, - StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE) - : access_(access), - dominator_(NULL), - bit_field_(HasTransitionField::encode(false) | - StoreModeField::encode(store_mode)) { - // Stores to a non existing in-object property are allowed only to the - // newly allocated objects (via HAllocate or HInnerAllocatedObject). - DCHECK(!access.IsInobject() || access.existing_inobject_property() || - obj->IsAllocate() || obj->IsInnerAllocatedObject()); - SetOperandAt(0, obj); - SetOperandAt(1, val); - SetOperandAt(2, obj); - access.SetGVNFlags(this, STORE); - } - - class HasTransitionField : public BitField {}; - class StoreModeField : public BitField {}; - - HObjectAccess access_; - HValue* dominator_; - uint32_t bit_field_; -}; - -class HStoreKeyed final : public HTemplateInstruction<4>, - public ArrayInstructionInterface { - public: - DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*, - HValue*, ElementsKind); - DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*, - HValue*, ElementsKind, StoreFieldOrKeyedMode); - DECLARE_INSTRUCTION_FACTORY_P7(HStoreKeyed, HValue*, HValue*, HValue*, - HValue*, ElementsKind, StoreFieldOrKeyedMode, - int); - - Representation RequiredInputRepresentation(int index) override { - // kind_fast: tagged[int32] = tagged - // kind_double: tagged[int32] = double - // kind_smi : tagged[int32] = smi - // kind_fixed_typed_array: tagged[int32] = (double | int32) - // kind_external: external[int32] = (double | int32) - if (index == 0) { - return is_fixed_typed_array() ? Representation::External() - : Representation::Tagged(); - } else if (index == 1) { - return ArrayInstructionInterface::KeyedAccessIndexRequirement( - OperandAt(1)->representation()); - } else if (index == 2) { - return RequiredValueRepresentation(elements_kind(), store_mode()); - } - - DCHECK_EQ(3, index); - return HasBackingStoreOwner() ? Representation::Tagged() - : Representation::None(); - } - - static Representation RequiredValueRepresentation( - ElementsKind kind, StoreFieldOrKeyedMode mode) { - if (IsDoubleOrFloatElementsKind(kind)) { - return Representation::Double(); - } - - if (kind == FAST_SMI_ELEMENTS && SmiValuesAre32Bits() && - mode == STORE_TO_INITIALIZED_ENTRY) { - return Representation::Integer32(); - } - - if (IsFastSmiElementsKind(kind)) { - return Representation::Smi(); - } - - if (IsFixedTypedArrayElementsKind(kind)) { - return Representation::Integer32(); - } - return Representation::Tagged(); - } - - bool is_fixed_typed_array() const { - return IsFixedTypedArrayElementsKind(elements_kind()); - } - - Representation observed_input_representation(int index) override { - if (index != 2) return RequiredInputRepresentation(index); - if (IsUninitialized()) { - return Representation::None(); - } - Representation r = - RequiredValueRepresentation(elements_kind(), store_mode()); - // For fast object elements kinds, don't assume anything. - if (r.IsTagged()) return Representation::None(); - return r; - } - - HValue* elements() const { return OperandAt(0); } - HValue* key() const { return OperandAt(1); } - HValue* value() const { return OperandAt(2); } - HValue* backing_store_owner() const { - DCHECK(HasBackingStoreOwner()); - return OperandAt(3); - } - bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); } - bool value_is_smi() const { return IsFastSmiElementsKind(elements_kind()); } - StoreFieldOrKeyedMode store_mode() const { - return StoreModeField::decode(bit_field_); - } - ElementsKind elements_kind() const override { - return ElementsKindField::decode(bit_field_); - } - uint32_t base_offset() const { return base_offset_; } - bool TryIncreaseBaseOffset(uint32_t increase_by_value) override; - HValue* GetKey() override { return key(); } - void SetKey(HValue* key) override { SetOperandAt(1, key); } - bool IsDehoisted() const override { - return IsDehoistedField::decode(bit_field_); - } - void SetDehoisted(bool is_dehoisted) override { - bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted); - } - bool IsUninitialized() { return IsUninitializedField::decode(bit_field_); } - void SetUninitialized(bool is_uninitialized) { - bit_field_ = IsUninitializedField::update(bit_field_, is_uninitialized); - } - - bool IsConstantHoleStore() { - return value()->IsConstant() && HConstant::cast(value())->IsTheHole(); - } - - bool HandleSideEffectDominator(GVNFlag side_effect, - HValue* dominator) override { - DCHECK(side_effect == kNewSpacePromotion); - dominator_ = dominator; - return false; - } - - HValue* dominator() const { return dominator_; } - - bool NeedsWriteBarrier() { - if (value_is_smi()) { - return false; - } else { - return StoringValueNeedsWriteBarrier(value()) && - ReceiverObjectNeedsWriteBarrier(elements(), value(), dominator()); - } - } - - PointersToHereCheck PointersToHereCheckForValue() const { - return PointersToHereCheckForObject(value(), dominator()); - } - - bool NeedsCanonicalization(); - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed) - - private: - HStoreKeyed(HValue* obj, HValue* key, HValue* val, - HValue* backing_store_owner, ElementsKind elements_kind, - StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE, - int offset = kDefaultKeyedHeaderOffsetSentinel) - : base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel - ? GetDefaultHeaderSizeForElementsKind(elements_kind) - : offset), - bit_field_(IsDehoistedField::encode(false) | - IsUninitializedField::encode(false) | - StoreModeField::encode(store_mode) | - ElementsKindField::encode(elements_kind)), - dominator_(NULL) { - SetOperandAt(0, obj); - SetOperandAt(1, key); - SetOperandAt(2, val); - SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj); - DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array()); - - if (IsFastObjectElementsKind(elements_kind)) { - SetFlag(kTrackSideEffectDominators); - SetDependsOnFlag(kNewSpacePromotion); - } - if (IsFastDoubleElementsKind(elements_kind)) { - SetChangesFlag(kDoubleArrayElements); - } else if (IsFastSmiElementsKind(elements_kind)) { - SetChangesFlag(kArrayElements); - } else if (is_fixed_typed_array()) { - SetChangesFlag(kTypedArrayElements); - SetChangesFlag(kExternalMemory); - SetFlag(kTruncatingToNumber); - } else { - SetChangesFlag(kArrayElements); - } - - // {UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating. - if (elements_kind >= UINT8_ELEMENTS && elements_kind <= INT32_ELEMENTS) { - SetFlag(kTruncatingToInt32); - } - } - - class IsDehoistedField : public BitField {}; - class IsUninitializedField : public BitField {}; - class StoreModeField : public BitField {}; - class ElementsKindField : public BitField {}; - - uint32_t base_offset_; - uint32_t bit_field_; - HValue* dominator_; -}; - -class HTransitionElementsKind final : public HTemplateInstruction<2> { - public: - inline static HTransitionElementsKind* New(Isolate* isolate, Zone* zone, - HValue* context, HValue* object, - Handle original_map, - Handle transitioned_map) { - return new(zone) HTransitionElementsKind(context, object, - original_map, transitioned_map); - } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HValue* object() const { return OperandAt(0); } - HValue* context() const { return OperandAt(1); } - Unique original_map() const { return original_map_; } - Unique transitioned_map() const { return transitioned_map_; } - ElementsKind from_kind() const { - return FromElementsKindField::decode(bit_field_); - } - ElementsKind to_kind() const { - return ToElementsKindField::decode(bit_field_); - } - bool map_is_stable() const { return MapIsStableField::decode(bit_field_); } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind) - - protected: - bool DataEquals(HValue* other) override { - HTransitionElementsKind* instr = HTransitionElementsKind::cast(other); - return original_map_ == instr->original_map_ && - transitioned_map_ == instr->transitioned_map_; - } - - int RedefinedOperandIndex() override { return 0; } - - private: - HTransitionElementsKind(HValue* context, HValue* object, - Handle original_map, - Handle transitioned_map) - : original_map_(Unique(original_map)), - transitioned_map_(Unique(transitioned_map)), - bit_field_( - FromElementsKindField::encode(original_map->elements_kind()) | - ToElementsKindField::encode(transitioned_map->elements_kind()) | - MapIsStableField::encode(transitioned_map->is_stable())) { - SetOperandAt(0, object); - SetOperandAt(1, context); - SetFlag(kUseGVN); - SetChangesFlag(kElementsKind); - if (!IsSimpleMapChangeTransition(from_kind(), to_kind())) { - SetChangesFlag(kElementsPointer); - SetChangesFlag(kNewSpacePromotion); - } - set_representation(Representation::Tagged()); - } - - class FromElementsKindField : public BitField {}; - class ToElementsKindField : public BitField {}; - class MapIsStableField : public BitField {}; - - Unique original_map_; - Unique transitioned_map_; - uint32_t bit_field_; -}; - - -class HStringAdd final : public HBinaryOperation { - public: - static HInstruction* New( - Isolate* isolate, Zone* zone, HValue* context, HValue* left, - HValue* right, PretenureFlag pretenure_flag = NOT_TENURED, - StringAddFlags flags = STRING_ADD_CHECK_BOTH, - Handle allocation_site = Handle::null()); - - StringAddFlags flags() const { return flags_; } - PretenureFlag pretenure_flag() const { return pretenure_flag_; } - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - DECLARE_CONCRETE_INSTRUCTION(StringAdd) - - protected: - bool DataEquals(HValue* other) override { - return flags_ == HStringAdd::cast(other)->flags_ && - pretenure_flag_ == HStringAdd::cast(other)->pretenure_flag_; - } - - private: - HStringAdd(HValue* context, HValue* left, HValue* right, - PretenureFlag pretenure_flag, StringAddFlags flags, - Handle allocation_site) - : HBinaryOperation(context, left, right, HType::String()), - flags_(flags), - pretenure_flag_(pretenure_flag) { - set_representation(Representation::Tagged()); - if ((flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT) { - SetAllSideEffects(); - ClearFlag(kUseGVN); - } else { - SetChangesFlag(kNewSpacePromotion); - SetFlag(kUseGVN); - } - SetDependsOnFlag(kMaps); - if (FLAG_trace_pretenuring) { - PrintF("HStringAdd with AllocationSite %p %s\n", - allocation_site.is_null() - ? static_cast(NULL) - : static_cast(*allocation_site), - pretenure_flag == TENURED ? "tenured" : "not tenured"); - } - } - - bool IsDeletable() const final { - return (flags_ & STRING_ADD_CONVERT) != STRING_ADD_CONVERT; - } - - const StringAddFlags flags_; - const PretenureFlag pretenure_flag_; -}; - - -class HStringCharCodeAt final : public HTemplateInstruction<3> { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt, - HValue*, - HValue*); - - Representation RequiredInputRepresentation(int index) override { - // The index is supposed to be Integer32. - return index == 2 - ? Representation::Integer32() - : Representation::Tagged(); - } - - HValue* context() const { return OperandAt(0); } - HValue* string() const { return OperandAt(1); } - HValue* index() const { return OperandAt(2); } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt) - - protected: - bool DataEquals(HValue* other) override { return true; } - - Range* InferRange(Zone* zone) override { - return new(zone) Range(0, String::kMaxUtf16CodeUnit); - } - - private: - HStringCharCodeAt(HValue* context, HValue* string, HValue* index) { - SetOperandAt(0, context); - SetOperandAt(1, string); - SetOperandAt(2, index); - set_representation(Representation::Integer32()); - SetFlag(kUseGVN); - SetDependsOnFlag(kMaps); - SetDependsOnFlag(kStringChars); - SetChangesFlag(kNewSpacePromotion); - } - - // No side effects: runtime function assumes string + number inputs. - bool IsDeletable() const override { return true; } -}; - - -class HStringCharFromCode final : public HTemplateInstruction<2> { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - HValue* char_code); - - Representation RequiredInputRepresentation(int index) override { - return index == 0 - ? Representation::Tagged() - : Representation::Integer32(); - } - - HValue* context() const { return OperandAt(0); } - HValue* value() const { return OperandAt(1); } - - bool DataEquals(HValue* other) override { return true; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode) - - private: - HStringCharFromCode(HValue* context, HValue* char_code) - : HTemplateInstruction<2>(HType::String()) { - SetOperandAt(0, context); - SetOperandAt(1, char_code); - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetChangesFlag(kNewSpacePromotion); - } - - bool IsDeletable() const override { - return !value()->ToNumberCanBeObserved(); - } -}; - - -class HTypeof final : public HTemplateInstruction<2> { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*); - - HValue* context() const { return OperandAt(0); } - HValue* value() const { return OperandAt(1); } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(Typeof) - - private: - explicit HTypeof(HValue* context, HValue* value) { - SetOperandAt(0, context); - SetOperandAt(1, value); - set_representation(Representation::Tagged()); - } - - bool IsDeletable() const override { return true; } -}; - - -class HTrapAllocationMemento final : public HTemplateInstruction<1> { - public: - DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HValue* object() { return OperandAt(0); } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento) - - private: - explicit HTrapAllocationMemento(HValue* obj) { - SetOperandAt(0, obj); - } -}; - - -class HMaybeGrowElements final : public HTemplateInstruction<5> { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HMaybeGrowElements, HValue*, - HValue*, HValue*, HValue*, bool, - ElementsKind); - - Representation RequiredInputRepresentation(int index) override { - if (index < 3) { - return Representation::Tagged(); - } - DCHECK(index == 3 || index == 4); - return Representation::Integer32(); - } - - HValue* context() const { return OperandAt(0); } - HValue* object() const { return OperandAt(1); } - HValue* elements() const { return OperandAt(2); } - HValue* key() const { return OperandAt(3); } - HValue* current_capacity() const { return OperandAt(4); } - - bool is_js_array() const { return is_js_array_; } - ElementsKind kind() const { return kind_; } - - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements) - - protected: - bool DataEquals(HValue* other) override { return true; } - - private: - explicit HMaybeGrowElements(HValue* context, HValue* object, HValue* elements, - HValue* key, HValue* current_capacity, - bool is_js_array, ElementsKind kind) { - is_js_array_ = is_js_array; - kind_ = kind; - - SetOperandAt(0, context); - SetOperandAt(1, object); - SetOperandAt(2, elements); - SetOperandAt(3, key); - SetOperandAt(4, current_capacity); - - SetFlag(kUseGVN); - SetChangesFlag(kElementsPointer); - SetChangesFlag(kNewSpacePromotion); - set_representation(Representation::Tagged()); - } - - bool is_js_array_; - ElementsKind kind_; -}; - - -class HSeqStringGetChar final : public HTemplateInstruction<2> { - public: - static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context, - String::Encoding encoding, HValue* string, - HValue* index); - - Representation RequiredInputRepresentation(int index) override { - return (index == 0) ? Representation::Tagged() - : Representation::Integer32(); - } - - String::Encoding encoding() const { return encoding_; } - HValue* string() const { return OperandAt(0); } - HValue* index() const { return OperandAt(1); } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar) - - protected: - bool DataEquals(HValue* other) override { - return encoding() == HSeqStringGetChar::cast(other)->encoding(); - } - - Range* InferRange(Zone* zone) override { - if (encoding() == String::ONE_BYTE_ENCODING) { - return new(zone) Range(0, String::kMaxOneByteCharCode); - } else { - DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding()); - return new(zone) Range(0, String::kMaxUtf16CodeUnit); - } - } - - private: - HSeqStringGetChar(String::Encoding encoding, - HValue* string, - HValue* index) : encoding_(encoding) { - SetOperandAt(0, string); - SetOperandAt(1, index); - set_representation(Representation::Integer32()); - SetFlag(kUseGVN); - SetDependsOnFlag(kStringChars); - } - - bool IsDeletable() const override { return true; } - - String::Encoding encoding_; -}; - - -class HSeqStringSetChar final : public HTemplateInstruction<4> { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4( - HSeqStringSetChar, String::Encoding, - HValue*, HValue*, HValue*); - - String::Encoding encoding() { return encoding_; } - HValue* context() { return OperandAt(0); } - HValue* string() { return OperandAt(1); } - HValue* index() { return OperandAt(2); } - HValue* value() { return OperandAt(3); } - - Representation RequiredInputRepresentation(int index) override { - return (index <= 1) ? Representation::Tagged() - : Representation::Integer32(); - } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar) - - private: - HSeqStringSetChar(HValue* context, - String::Encoding encoding, - HValue* string, - HValue* index, - HValue* value) : encoding_(encoding) { - SetOperandAt(0, context); - SetOperandAt(1, string); - SetOperandAt(2, index); - SetOperandAt(3, value); - set_representation(Representation::Tagged()); - SetChangesFlag(kStringChars); - } - - String::Encoding encoding_; -}; - - -class HCheckMapValue final : public HTemplateInstruction<2> { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HType CalculateInferredType() override { - if (value()->type().IsHeapObject()) return value()->type(); - return HType::HeapObject(); - } - - HValue* value() const { return OperandAt(0); } - HValue* map() const { return OperandAt(1); } - - HValue* Canonicalize() override; - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue) - - protected: - int RedefinedOperandIndex() override { return 0; } - - bool DataEquals(HValue* other) override { return true; } - - private: - HCheckMapValue(HValue* value, HValue* map) - : HTemplateInstruction<2>(HType::HeapObject()) { - SetOperandAt(0, value); - SetOperandAt(1, map); - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetDependsOnFlag(kMaps); - SetDependsOnFlag(kElementsKind); - } -}; - - -class HForInPrepareMap final : public HTemplateInstruction<2> { - public: - DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*); - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HValue* context() const { return OperandAt(0); } - HValue* enumerable() const { return OperandAt(1); } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HType CalculateInferredType() override { return HType::Tagged(); } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap); - - private: - HForInPrepareMap(HValue* context, - HValue* object) { - SetOperandAt(0, context); - SetOperandAt(1, object); - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } -}; - - -class HForInCacheArray final : public HTemplateInstruction<2> { - public: - DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int); - - Representation RequiredInputRepresentation(int index) override { - return Representation::Tagged(); - } - - HValue* enumerable() const { return OperandAt(0); } - HValue* map() const { return OperandAt(1); } - int idx() const { return idx_; } - - HForInCacheArray* index_cache() { - return index_cache_; - } - - void set_index_cache(HForInCacheArray* index_cache) { - index_cache_ = index_cache; - } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HType CalculateInferredType() override { return HType::Tagged(); } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray); - - private: - HForInCacheArray(HValue* enumerable, - HValue* keys, - int idx) : idx_(idx) { - SetOperandAt(0, enumerable); - SetOperandAt(1, keys); - set_representation(Representation::Tagged()); - } - - int idx_; - HForInCacheArray* index_cache_; -}; - - -class HLoadFieldByIndex final : public HTemplateInstruction<2> { - public: - DECLARE_INSTRUCTION_FACTORY_P2(HLoadFieldByIndex, HValue*, HValue*); - - HLoadFieldByIndex(HValue* object, - HValue* index) { - SetOperandAt(0, object); - SetOperandAt(1, index); - SetChangesFlag(kNewSpacePromotion); - set_representation(Representation::Tagged()); - } - - Representation RequiredInputRepresentation(int index) override { - if (index == 1) { - return Representation::Smi(); - } else { - return Representation::Tagged(); - } - } - - HValue* object() const { return OperandAt(0); } - HValue* index() const { return OperandAt(1); } - - std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT - - HType CalculateInferredType() override { return HType::Tagged(); } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex); - - private: - bool IsDeletable() const override { return true; } -}; - -#undef DECLARE_INSTRUCTION -#undef DECLARE_CONCRETE_INSTRUCTION - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_ diff --git a/src/crankshaft/hydrogen-load-elimination.cc b/src/crankshaft/hydrogen-load-elimination.cc deleted file mode 100644 index 99f4947a84..0000000000 --- a/src/crankshaft/hydrogen-load-elimination.cc +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-load-elimination.h" - -#include "src/crankshaft/hydrogen-alias-analysis.h" -#include "src/crankshaft/hydrogen-flow-engine.h" -#include "src/crankshaft/hydrogen-instructions.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -#define GLOBAL true -#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x - -static const int kMaxTrackedFields = 16; -static const int kMaxTrackedObjects = 5; - -// An element in the field approximation list. -class HFieldApproximation : public ZoneObject { - public: // Just a data blob. - HValue* object_; - HValue* last_value_; - HFieldApproximation* next_; - - // Recursively copy the entire linked list of field approximations. - HFieldApproximation* Copy(Zone* zone) { - HFieldApproximation* copy = new(zone) HFieldApproximation(); - copy->object_ = this->object_; - copy->last_value_ = this->last_value_; - copy->next_ = this->next_ == NULL ? NULL : this->next_->Copy(zone); - return copy; - } -}; - - -// The main datastructure used during load/store elimination. Each in-object -// field is tracked separately. For each field, store a list of known field -// values for known objects. -class HLoadEliminationTable : public ZoneObject { - public: - HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing) - : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { } - - // The main processing of instructions. - HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) { - switch (instr->opcode()) { - case HValue::kLoadNamedField: { - HLoadNamedField* l = HLoadNamedField::cast(instr); - TRACE((" process L%d field %d (o%d)\n", - instr->id(), - FieldOf(l->access()), - l->object()->ActualValue()->id())); - HValue* result = load(l); - if (result != instr && l->CanBeReplacedWith(result)) { - // The load can be replaced with a previous load or a value. - TRACE((" replace L%d -> v%d\n", instr->id(), result->id())); - instr->DeleteAndReplaceWith(result); - } - break; - } - case HValue::kStoreNamedField: { - HStoreNamedField* s = HStoreNamedField::cast(instr); - TRACE((" process S%d field %d (o%d) = v%d\n", - instr->id(), - FieldOf(s->access()), - s->object()->ActualValue()->id(), - s->value()->id())); - HValue* result = store(s); - if (result == NULL) { - // The store is redundant. Remove it. - TRACE((" remove S%d\n", instr->id())); - instr->DeleteAndReplaceWith(NULL); - } - break; - } - case HValue::kTransitionElementsKind: { - HTransitionElementsKind* t = HTransitionElementsKind::cast(instr); - HValue* object = t->object()->ActualValue(); - KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL); - KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL); - break; - } - default: { - if (instr->CheckChangesFlag(kInobjectFields)) { - TRACE((" kill-all i%d\n", instr->id())); - Kill(); - break; - } - if (instr->CheckChangesFlag(kMaps)) { - TRACE((" kill-maps i%d\n", instr->id())); - KillOffset(JSObject::kMapOffset); - } - if (instr->CheckChangesFlag(kElementsKind)) { - TRACE((" kill-elements-kind i%d\n", instr->id())); - KillOffset(JSObject::kMapOffset); - KillOffset(JSObject::kElementsOffset); - } - if (instr->CheckChangesFlag(kElementsPointer)) { - TRACE((" kill-elements i%d\n", instr->id())); - KillOffset(JSObject::kElementsOffset); - } - if (instr->CheckChangesFlag(kOsrEntries)) { - TRACE((" kill-osr i%d\n", instr->id())); - Kill(); - } - } - // Improvements possible: - // - learn from HCheckMaps for field 0 - // - remove unobservable stores (write-after-write) - // - track cells - // - track globals - // - track roots - } - return this; - } - - // Support for global analysis with HFlowEngine: Merge given state with - // the other incoming state. - static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state, - HBasicBlock* succ_block, - HLoadEliminationTable* pred_state, - HBasicBlock* pred_block, - Zone* zone) { - DCHECK(pred_state != NULL); - if (succ_state == NULL) { - return pred_state->Copy(succ_block, pred_block, zone); - } else { - return succ_state->Merge(succ_block, pred_state, pred_block, zone); - } - } - - // Support for global analysis with HFlowEngine: Given state merged with all - // the other incoming states, prepare it for use. - static HLoadEliminationTable* Finish(HLoadEliminationTable* state, - HBasicBlock* block, - Zone* zone) { - DCHECK(state != NULL); - return state; - } - - private: - // Copy state to successor block. - HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, - Zone* zone) { - HLoadEliminationTable* copy = - new(zone) HLoadEliminationTable(zone, aliasing_); - copy->EnsureFields(fields_.length()); - for (int i = 0; i < fields_.length(); i++) { - copy->fields_[i] = fields_[i] == NULL ? NULL : fields_[i]->Copy(zone); - } - if (FLAG_trace_load_elimination) { - TRACE((" copy-to B%d\n", succ->block_id())); - copy->Print(); - } - return copy; - } - - // Merge this state with the other incoming state. - HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that, - HBasicBlock* that_block, Zone* zone) { - if (that->fields_.length() < fields_.length()) { - // Drop fields not in the other table. - fields_.Rewind(that->fields_.length()); - } - for (int i = 0; i < fields_.length(); i++) { - // Merge the field approximations for like fields. - HFieldApproximation* approx = fields_[i]; - HFieldApproximation* prev = NULL; - while (approx != NULL) { - // TODO(titzer): Merging is O(N * M); sort? - HFieldApproximation* other = that->Find(approx->object_, i); - if (other == NULL || !Equal(approx->last_value_, other->last_value_)) { - // Kill an entry that doesn't agree with the other value. - if (prev != NULL) { - prev->next_ = approx->next_; - } else { - fields_[i] = approx->next_; - } - approx = approx->next_; - continue; - } - prev = approx; - approx = approx->next_; - } - } - if (FLAG_trace_load_elimination) { - TRACE((" merge-to B%d\n", succ->block_id())); - Print(); - } - return this; - } - - friend class HLoadEliminationEffects; // Calls Kill() and others. - friend class HLoadEliminationPhase; - - private: - // Process a load instruction, updating internal table state. If a previous - // load or store for this object and field exists, return the new value with - // which the load should be replaced. Otherwise, return {instr}. - HValue* load(HLoadNamedField* instr) { - // There must be no loads from non observable in-object properties. - DCHECK(!instr->access().IsInobject() || - instr->access().existing_inobject_property()); - - int field = FieldOf(instr->access()); - if (field < 0) return instr; - - HValue* object = instr->object()->ActualValue(); - HFieldApproximation* approx = FindOrCreate(object, field); - - if (approx->last_value_ == NULL) { - // Load is not redundant. Fill out a new entry. - approx->last_value_ = instr; - return instr; - } else if (approx->last_value_->block()->EqualToOrDominates( - instr->block())) { - // Eliminate the load. Reuse previously stored value or load instruction. - return approx->last_value_; - } else { - return instr; - } - } - - // Process a store instruction, updating internal table state. If a previous - // store to the same object and field makes this store redundant (e.g. because - // the stored values are the same), return NULL indicating that this store - // instruction is redundant. Otherwise, return {instr}. - HValue* store(HStoreNamedField* instr) { - if (instr->access().IsInobject() && - !instr->access().existing_inobject_property()) { - TRACE((" skipping non existing property initialization store\n")); - return instr; - } - - int field = FieldOf(instr->access()); - if (field < 0) return KillIfMisaligned(instr); - - HValue* object = instr->object()->ActualValue(); - HValue* value = instr->value(); - - if (instr->has_transition()) { - // A transition introduces a new field and alters the map of the object. - // Since the field in the object is new, it cannot alias existing entries. - KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL); - } else { - // Kill non-equivalent may-alias entries. - KillFieldInternal(object, field, value); - } - HFieldApproximation* approx = FindOrCreate(object, field); - - if (Equal(approx->last_value_, value)) { - // The store is redundant because the field already has this value. - return NULL; - } else { - // The store is not redundant. Update the entry. - approx->last_value_ = value; - return instr; - } - } - - // Kill everything in this table. - void Kill() { - fields_.Rewind(0); - } - - // Kill all entries matching the given offset. - void KillOffset(int offset) { - int field = FieldOf(offset); - if (field >= 0 && field < fields_.length()) { - fields_[field] = NULL; - } - } - - // Kill all entries aliasing the given store. - void KillStore(HStoreNamedField* s) { - int field = FieldOf(s->access()); - if (field >= 0) { - KillFieldInternal(s->object()->ActualValue(), field, s->value()); - } else { - KillIfMisaligned(s); - } - } - - // Kill multiple entries in the case of a misaligned store. - HValue* KillIfMisaligned(HStoreNamedField* instr) { - HObjectAccess access = instr->access(); - if (access.IsInobject()) { - int offset = access.offset(); - if ((offset % kPointerSize) != 0) { - // Kill the field containing the first word of the access. - HValue* object = instr->object()->ActualValue(); - int field = offset / kPointerSize; - KillFieldInternal(object, field, NULL); - - // Kill the next field in case of overlap. - int size = access.representation().size(); - int next_field = (offset + size - 1) / kPointerSize; - if (next_field != field) KillFieldInternal(object, next_field, NULL); - } - } - return instr; - } - - // Find an entry for the given object and field pair. - HFieldApproximation* Find(HValue* object, int field) { - // Search for a field approximation for this object. - HFieldApproximation* approx = fields_[field]; - while (approx != NULL) { - if (aliasing_->MustAlias(object, approx->object_)) return approx; - approx = approx->next_; - } - return NULL; - } - - // Find or create an entry for the given object and field pair. - HFieldApproximation* FindOrCreate(HValue* object, int field) { - EnsureFields(field + 1); - - // Search for a field approximation for this object. - HFieldApproximation* approx = fields_[field]; - int count = 0; - while (approx != NULL) { - if (aliasing_->MustAlias(object, approx->object_)) return approx; - count++; - approx = approx->next_; - } - - if (count >= kMaxTrackedObjects) { - // Pull the last entry off the end and repurpose it for this object. - approx = ReuseLastApproximation(field); - } else { - // Allocate a new entry. - approx = new(zone_) HFieldApproximation(); - } - - // Insert the entry at the head of the list. - approx->object_ = object; - approx->last_value_ = NULL; - approx->next_ = fields_[field]; - fields_[field] = approx; - - return approx; - } - - // Kill all entries for a given field that _may_ alias the given object - // and do _not_ have the given value. - void KillFieldInternal(HValue* object, int field, HValue* value) { - if (field >= fields_.length()) return; // Nothing to do. - - HFieldApproximation* approx = fields_[field]; - HFieldApproximation* prev = NULL; - while (approx != NULL) { - if (aliasing_->MayAlias(object, approx->object_)) { - if (!Equal(approx->last_value_, value)) { - // Kill an aliasing entry that doesn't agree on the value. - if (prev != NULL) { - prev->next_ = approx->next_; - } else { - fields_[field] = approx->next_; - } - approx = approx->next_; - continue; - } - } - prev = approx; - approx = approx->next_; - } - } - - bool Equal(HValue* a, HValue* b) { - if (a == b) return true; - if (a != NULL && b != NULL && a->CheckFlag(HValue::kUseGVN)) { - return a->Equals(b); - } - return false; - } - - // Remove the last approximation for a field so that it can be reused. - // We reuse the last entry because it was the first inserted and is thus - // farthest away from the current instruction. - HFieldApproximation* ReuseLastApproximation(int field) { - HFieldApproximation* approx = fields_[field]; - DCHECK(approx != NULL); - - HFieldApproximation* prev = NULL; - while (approx->next_ != NULL) { - prev = approx; - approx = approx->next_; - } - if (prev != NULL) prev->next_ = NULL; - return approx; - } - - // Compute the field index for the given object access; -1 if not tracked. - int FieldOf(HObjectAccess access) { - return access.IsInobject() ? FieldOf(access.offset()) : -1; - } - - // Compute the field index for the given in-object offset; -1 if not tracked. - int FieldOf(int offset) { - if (offset >= kMaxTrackedFields * kPointerSize) return -1; - if ((offset % kPointerSize) != 0) return -1; // Ignore misaligned accesses. - return offset / kPointerSize; - } - - // Ensure internal storage for the given number of fields. - void EnsureFields(int num_fields) { - if (fields_.length() < num_fields) { - fields_.AddBlock(NULL, num_fields - fields_.length(), zone_); - } - } - - // Print this table to stdout. - void Print() { - for (int i = 0; i < fields_.length(); i++) { - PrintF(" field %d: ", i); - for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) { - PrintF("[o%d =", a->object_->id()); - if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id()); - PrintF("] "); - } - PrintF("\n"); - } - } - - Zone* zone_; - ZoneList fields_; - HAliasAnalyzer* aliasing_; -}; - - -// Support for HFlowEngine: collect store effects within loops. -class HLoadEliminationEffects : public ZoneObject { - public: - explicit HLoadEliminationEffects(Zone* zone) - : zone_(zone), stores_(5, zone) { } - - inline bool Disabled() { - return false; // Effects are _not_ disabled. - } - - // Process a possibly side-effecting instruction. - void Process(HInstruction* instr, Zone* zone) { - if (instr->IsStoreNamedField()) { - stores_.Add(HStoreNamedField::cast(instr), zone_); - } else { - flags_.Add(instr->ChangesFlags()); - } - } - - // Apply these effects to the given load elimination table. - void Apply(HLoadEliminationTable* table) { - // Loads must not be hoisted past the OSR entry, therefore we kill - // everything if we see an OSR entry. - if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) { - table->Kill(); - return; - } - if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) { - table->KillOffset(JSObject::kMapOffset); - } - if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) { - table->KillOffset(JSObject::kElementsOffset); - } - - // Kill non-agreeing fields for each store contained in these effects. - for (int i = 0; i < stores_.length(); i++) { - table->KillStore(stores_[i]); - } - } - - // Union these effects with the other effects. - void Union(HLoadEliminationEffects* that, Zone* zone) { - flags_.Add(that->flags_); - for (int i = 0; i < that->stores_.length(); i++) { - stores_.Add(that->stores_[i], zone); - } - } - - private: - Zone* zone_; - GVNFlagSet flags_; - ZoneList stores_; -}; - - -// The main routine of the analysis phase. Use the HFlowEngine for either a -// local or a global analysis. -void HLoadEliminationPhase::Run() { - HFlowEngine - engine(graph(), zone()); - HAliasAnalyzer aliasing; - HLoadEliminationTable* table = - new(zone()) HLoadEliminationTable(zone(), &aliasing); - - if (GLOBAL) { - // Perform a global analysis. - engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table); - } else { - // Perform only local analysis. - for (int i = 0; i < graph()->blocks()->length(); i++) { - table->Kill(); - engine.AnalyzeOneBlock(graph()->blocks()->at(i), table); - } - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-load-elimination.h b/src/crankshaft/hydrogen-load-elimination.h deleted file mode 100644 index e5656459c9..0000000000 --- a/src/crankshaft/hydrogen-load-elimination.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_ -#define V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - -class HLoadEliminationPhase : public HPhase { - public: - explicit HLoadEliminationPhase(HGraph* graph) - : HPhase("H_Load elimination", graph) { } - - void Run(); - - private: - void EliminateLoads(HBasicBlock* block); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_ diff --git a/src/crankshaft/hydrogen-mark-unreachable.cc b/src/crankshaft/hydrogen-mark-unreachable.cc deleted file mode 100644 index 2393b5a8a4..0000000000 --- a/src/crankshaft/hydrogen-mark-unreachable.cc +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-mark-unreachable.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -void HMarkUnreachableBlocksPhase::MarkUnreachableBlocks() { - // If there is unreachable code in the graph, propagate the unreachable marks - // using a fixed-point iteration. - bool changed = true; - const ZoneList* blocks = graph()->blocks(); - while (changed) { - changed = false; - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* block = blocks->at(i); - if (!block->IsReachable()) continue; - bool is_reachable = blocks->at(0) == block; - for (HPredecessorIterator it(block); !it.Done(); it.Advance()) { - HBasicBlock* predecessor = it.Current(); - // A block is reachable if one of its predecessors is reachable, - // doesn't deoptimize and either is known to transfer control to the - // block or has a control flow instruction for which the next block - // cannot be determined. - if (predecessor->IsReachable() && !predecessor->IsDeoptimizing()) { - HBasicBlock* pred_succ; - bool known_pred_succ = - predecessor->end()->KnownSuccessorBlock(&pred_succ); - if (!known_pred_succ || pred_succ == block) { - is_reachable = true; - break; - } - } - if (block->is_osr_entry()) { - is_reachable = true; - } - } - if (!is_reachable) { - block->MarkUnreachable(); - changed = true; - } - } - } -} - - -void HMarkUnreachableBlocksPhase::Run() { - MarkUnreachableBlocks(); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-mark-unreachable.h b/src/crankshaft/hydrogen-mark-unreachable.h deleted file mode 100644 index 1243b1fcbe..0000000000 --- a/src/crankshaft/hydrogen-mark-unreachable.h +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_ -#define V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HMarkUnreachableBlocksPhase : public HPhase { - public: - explicit HMarkUnreachableBlocksPhase(HGraph* graph) - : HPhase("H_Mark unreachable blocks", graph) { } - - void Run(); - - private: - void MarkUnreachableBlocks(); - - DISALLOW_COPY_AND_ASSIGN(HMarkUnreachableBlocksPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_ diff --git a/src/crankshaft/hydrogen-range-analysis.cc b/src/crankshaft/hydrogen-range-analysis.cc deleted file mode 100644 index 50592d32ca..0000000000 --- a/src/crankshaft/hydrogen-range-analysis.cc +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-range-analysis.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -class Pending { - public: - Pending(HBasicBlock* block, int last_changed_range) - : block_(block), last_changed_range_(last_changed_range) {} - - HBasicBlock* block() const { return block_; } - int last_changed_range() const { return last_changed_range_; } - - private: - HBasicBlock* block_; - int last_changed_range_; -}; - - -void HRangeAnalysisPhase::TraceRange(const char* msg, ...) { - if (FLAG_trace_range) { - va_list arguments; - va_start(arguments, msg); - base::OS::VPrint(msg, arguments); - va_end(arguments); - } -} - - -void HRangeAnalysisPhase::Run() { - HBasicBlock* block(graph()->entry_block()); - ZoneList stack(graph()->blocks()->length(), zone()); - while (block != NULL) { - TraceRange("Analyzing block B%d\n", block->block_id()); - - // Infer range based on control flow. - if (block->predecessors()->length() == 1) { - HBasicBlock* pred = block->predecessors()->first(); - if (pred->end()->IsCompareNumericAndBranch()) { - InferControlFlowRange(HCompareNumericAndBranch::cast(pred->end()), - block); - } - } - - // Process phi instructions. - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - InferRange(phi); - } - - // Go through all instructions of the current block. - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HValue* value = it.Current(); - InferRange(value); - - // Compute the bailout-on-minus-zero flag. - if (value->IsChange()) { - HChange* instr = HChange::cast(value); - // Propagate flags for negative zero checks upwards from conversions - // int32-to-tagged and int32-to-double. - Representation from = instr->value()->representation(); - DCHECK(from.Equals(instr->from())); - if (from.IsSmiOrInteger32()) { - DCHECK(instr->to().IsTagged() || - instr->to().IsDouble() || - instr->to().IsSmiOrInteger32()); - PropagateMinusZeroChecks(instr->value()); - } - } - } - - // Continue analysis in all dominated blocks. - const ZoneList* dominated_blocks(block->dominated_blocks()); - if (!dominated_blocks->is_empty()) { - // Continue with first dominated block, and push the - // remaining blocks on the stack (in reverse order). - int last_changed_range = changed_ranges_.length(); - for (int i = dominated_blocks->length() - 1; i > 0; --i) { - stack.Add(Pending(dominated_blocks->at(i), last_changed_range), zone()); - } - block = dominated_blocks->at(0); - } else if (!stack.is_empty()) { - // Pop next pending block from stack. - Pending pending = stack.RemoveLast(); - RollBackTo(pending.last_changed_range()); - block = pending.block(); - } else { - // All blocks done. - block = NULL; - } - } - - // The ranges are not valid anymore due to SSI vs. SSA! - PoisonRanges(); -} - - -void HRangeAnalysisPhase::PoisonRanges() { -#ifdef DEBUG - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (instr->HasRange()) instr->PoisonRange(); - } - } -#endif -} - - -void HRangeAnalysisPhase::InferControlFlowRange(HCompareNumericAndBranch* test, - HBasicBlock* dest) { - DCHECK((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest)); - if (test->representation().IsSmiOrInteger32()) { - Token::Value op = test->token(); - if (test->SecondSuccessor() == dest) { - op = Token::NegateCompareOp(op); - } - Token::Value inverted_op = Token::ReverseCompareOp(op); - UpdateControlFlowRange(op, test->left(), test->right()); - UpdateControlFlowRange(inverted_op, test->right(), test->left()); - } -} - - -// We know that value [op] other. Use this information to update the range on -// value. -void HRangeAnalysisPhase::UpdateControlFlowRange(Token::Value op, - HValue* value, - HValue* other) { - Range temp_range; - Range* range = other->range() != NULL ? other->range() : &temp_range; - Range* new_range = NULL; - - TraceRange("Control flow range infer %d %s %d\n", - value->id(), - Token::Name(op), - other->id()); - - if (op == Token::EQ || op == Token::EQ_STRICT) { - // The same range has to apply for value. - new_range = range->Copy(graph()->zone()); - } else if (op == Token::LT || op == Token::LTE) { - new_range = range->CopyClearLower(graph()->zone()); - if (op == Token::LT) { - new_range->AddConstant(-1); - } - } else if (op == Token::GT || op == Token::GTE) { - new_range = range->CopyClearUpper(graph()->zone()); - if (op == Token::GT) { - new_range->AddConstant(1); - } - } - - if (new_range != NULL && !new_range->IsMostGeneric()) { - AddRange(value, new_range); - } -} - - -void HRangeAnalysisPhase::InferRange(HValue* value) { - DCHECK(!value->HasRange()); - if (!value->representation().IsNone()) { - value->ComputeInitialRange(graph()->zone()); - Range* range = value->range(); - TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n", - value->id(), - value->Mnemonic(), - range->lower(), - range->upper()); - } -} - - -void HRangeAnalysisPhase::RollBackTo(int index) { - DCHECK(index <= changed_ranges_.length()); - for (int i = index; i < changed_ranges_.length(); ++i) { - changed_ranges_[i]->RemoveLastAddedRange(); - } - changed_ranges_.Rewind(index); -} - - -void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) { - Range* original_range = value->range(); - value->AddNewRange(range, graph()->zone()); - changed_ranges_.Add(value, zone()); - Range* new_range = value->range(); - TraceRange("Updated range of %d set to [%d,%d]\n", - value->id(), - new_range->lower(), - new_range->upper()); - if (original_range != NULL) { - TraceRange("Original range was [%d,%d]\n", - original_range->lower(), - original_range->upper()); - } - TraceRange("New information was [%d,%d]\n", - range->lower(), - range->upper()); -} - - -void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) { - DCHECK(worklist_.is_empty()); - DCHECK(in_worklist_.IsEmpty()); - - AddToWorklist(value); - while (!worklist_.is_empty()) { - value = worklist_.RemoveLast(); - - if (value->IsPhi()) { - // For phis, we must propagate the check to all of its inputs. - HPhi* phi = HPhi::cast(value); - for (int i = 0; i < phi->OperandCount(); ++i) { - AddToWorklist(phi->OperandAt(i)); - } - } else if (value->IsUnaryMathOperation()) { - HUnaryMathOperation* instr = HUnaryMathOperation::cast(value); - if (instr->representation().IsSmiOrInteger32() && - !instr->value()->representation().Equals(instr->representation())) { - if (instr->value()->range() == NULL || - instr->value()->range()->CanBeMinusZero()) { - instr->SetFlag(HValue::kBailoutOnMinusZero); - } - } - if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() && - instr->representation().Equals( - instr->RequiredInputRepresentation(0))) { - AddToWorklist(instr->value()); - } - } else if (value->IsChange()) { - HChange* instr = HChange::cast(value); - if (!instr->from().IsSmiOrInteger32() && - !instr->CanTruncateToInt32() && - (instr->value()->range() == NULL || - instr->value()->range()->CanBeMinusZero())) { - instr->SetFlag(HValue::kBailoutOnMinusZero); - } - } else if (value->IsForceRepresentation()) { - HForceRepresentation* instr = HForceRepresentation::cast(value); - AddToWorklist(instr->value()); - } else if (value->IsMod()) { - HMod* instr = HMod::cast(value); - if (instr->range() == NULL || instr->range()->CanBeMinusZero()) { - instr->SetFlag(HValue::kBailoutOnMinusZero); - AddToWorklist(instr->left()); - } - } else if (value->IsDiv() || value->IsMul()) { - HBinaryOperation* instr = HBinaryOperation::cast(value); - if (instr->range() == NULL || instr->range()->CanBeMinusZero()) { - instr->SetFlag(HValue::kBailoutOnMinusZero); - } - AddToWorklist(instr->right()); - AddToWorklist(instr->left()); - } else if (value->IsMathFloorOfDiv()) { - HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value); - instr->SetFlag(HValue::kBailoutOnMinusZero); - } else if (value->IsAdd() || value->IsSub()) { - HBinaryOperation* instr = HBinaryOperation::cast(value); - if (instr->range() == NULL || instr->range()->CanBeMinusZero()) { - // Propagate to the left argument. If the left argument cannot be -0, - // then the result of the add/sub operation cannot be either. - AddToWorklist(instr->left()); - } - } else if (value->IsMathMinMax()) { - HMathMinMax* instr = HMathMinMax::cast(value); - AddToWorklist(instr->right()); - AddToWorklist(instr->left()); - } - } - - in_worklist_.Clear(); - DCHECK(in_worklist_.IsEmpty()); - DCHECK(worklist_.is_empty()); -} - - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-range-analysis.h b/src/crankshaft/hydrogen-range-analysis.h deleted file mode 100644 index eeac690e62..0000000000 --- a/src/crankshaft/hydrogen-range-analysis.h +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_ -#define V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_ - -#include "src/base/compiler-specific.h" -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HRangeAnalysisPhase : public HPhase { - public: - explicit HRangeAnalysisPhase(HGraph* graph) - : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()), - in_worklist_(graph->GetMaximumValueID(), zone()), - worklist_(32, zone()) {} - - void Run(); - - private: - PRINTF_FORMAT(2, 3) void TraceRange(const char* msg, ...); - void InferControlFlowRange(HCompareNumericAndBranch* test, - HBasicBlock* dest); - void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other); - void InferRange(HValue* value); - void RollBackTo(int index); - void AddRange(HValue* value, Range* range); - void AddToWorklist(HValue* value) { - if (in_worklist_.Contains(value->id())) return; - in_worklist_.Add(value->id()); - worklist_.Add(value, zone()); - } - void PropagateMinusZeroChecks(HValue* value); - void PoisonRanges(); - - ZoneList changed_ranges_; - - BitVector in_worklist_; - ZoneList worklist_; - - DISALLOW_COPY_AND_ASSIGN(HRangeAnalysisPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_ diff --git a/src/crankshaft/hydrogen-redundant-phi.cc b/src/crankshaft/hydrogen-redundant-phi.cc deleted file mode 100644 index 08644c874c..0000000000 --- a/src/crankshaft/hydrogen-redundant-phi.cc +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-redundant-phi.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -void HRedundantPhiEliminationPhase::Run() { - // Gather all phis from all blocks first. - const ZoneList* blocks(graph()->blocks()); - ZoneList all_phis(blocks->length(), zone()); - for (int i = 0; i < blocks->length(); ++i) { - HBasicBlock* block = blocks->at(i); - for (int j = 0; j < block->phis()->length(); j++) { - all_phis.Add(block->phis()->at(j), zone()); - } - } - - // Iteratively reduce all phis in the list. - ProcessPhis(&all_phis); - -#if DEBUG - // Make sure that we *really* removed all redundant phis. - for (int i = 0; i < blocks->length(); ++i) { - for (int j = 0; j < blocks->at(i)->phis()->length(); j++) { - DCHECK(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL); - } - } -#endif -} - - -void HRedundantPhiEliminationPhase::ProcessBlock(HBasicBlock* block) { - ProcessPhis(block->phis()); -} - - -void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList* phis) { - bool updated; - do { - // Iterately replace all redundant phis in the given list. - updated = false; - for (int i = 0; i < phis->length(); i++) { - HPhi* phi = phis->at(i); - if (phi->CheckFlag(HValue::kIsDead)) continue; // Already replaced. - - HValue* replacement = phi->GetRedundantReplacement(); - if (replacement != NULL) { - phi->SetFlag(HValue::kIsDead); - for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) { - HValue* value = it.value(); - value->SetOperandAt(it.index(), replacement); - // Iterate again if used in another non-dead phi. - updated |= value->IsPhi() && !value->CheckFlag(HValue::kIsDead); - } - phi->block()->RemovePhi(phi); - } - } - } while (updated); -} - - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-redundant-phi.h b/src/crankshaft/hydrogen-redundant-phi.h deleted file mode 100644 index e8735c82d3..0000000000 --- a/src/crankshaft/hydrogen-redundant-phi.h +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_ -#define V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -// Replace all phis consisting of a single non-loop operand plus any number of -// loop operands by that single non-loop operand. -class HRedundantPhiEliminationPhase : public HPhase { - public: - explicit HRedundantPhiEliminationPhase(HGraph* graph) - : HPhase("H_Redundant phi elimination", graph) { } - - void Run(); - void ProcessBlock(HBasicBlock* block); - - private: - void ProcessPhis(const ZoneList* phis); - - DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_ diff --git a/src/crankshaft/hydrogen-removable-simulates.cc b/src/crankshaft/hydrogen-removable-simulates.cc deleted file mode 100644 index e68168cf9c..0000000000 --- a/src/crankshaft/hydrogen-removable-simulates.cc +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-removable-simulates.h" - -#include "src/crankshaft/hydrogen-flow-engine.h" -#include "src/crankshaft/hydrogen-instructions.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -class State : public ZoneObject { - public: - explicit State(Zone* zone) - : zone_(zone), mergelist_(2, zone), first_(true), mode_(NORMAL) { } - - State* Process(HInstruction* instr, Zone* zone) { - if (FLAG_trace_removable_simulates) { - PrintF("[%s with state %p in B%d: #%d %s]\n", - mode_ == NORMAL ? "processing" : "collecting", - reinterpret_cast(this), instr->block()->block_id(), - instr->id(), instr->Mnemonic()); - } - // Forward-merge "trains" of simulates after an instruction with observable - // side effects to keep live ranges short. - if (mode_ == COLLECT_CONSECUTIVE_SIMULATES) { - if (instr->IsSimulate()) { - HSimulate* current_simulate = HSimulate::cast(instr); - if (current_simulate->is_candidate_for_removal() && - !current_simulate->ast_id().IsNone()) { - Remember(current_simulate); - return this; - } - } - FlushSimulates(); - mode_ = NORMAL; - } - // Ensure there's a non-foldable HSimulate before an HEnterInlined to avoid - // folding across HEnterInlined. - DCHECK(!(instr->IsEnterInlined() && - HSimulate::cast(instr->previous())->is_candidate_for_removal())); - if (instr->IsLeaveInlined() || instr->IsReturn()) { - // Never fold simulates from inlined environments into simulates in the - // outer environment. Simply remove all accumulated simulates without - // merging. This is safe because simulates after instructions with side - // effects are never added to the merge list. The same reasoning holds for - // return instructions. - RemoveSimulates(); - return this; - } - if (instr->IsControlInstruction()) { - // Merge the accumulated simulates at the end of the block. - FlushSimulates(); - return this; - } - if (instr->IsCapturedObject()) { - // Do not merge simulates across captured objects - captured objects - // change environments during environment replay, and such changes - // would not be reflected in the simulate. - FlushSimulates(); - return this; - } - // Skip the non-simulates and the first simulate. - if (!instr->IsSimulate()) return this; - if (first_) { - first_ = false; - return this; - } - HSimulate* current_simulate = HSimulate::cast(instr); - if (!current_simulate->is_candidate_for_removal()) { - Remember(current_simulate); - FlushSimulates(); - } else if (current_simulate->ast_id().IsNone()) { - DCHECK(current_simulate->next()->IsEnterInlined()); - FlushSimulates(); - } else if (current_simulate->previous()->HasObservableSideEffects()) { - Remember(current_simulate); - mode_ = COLLECT_CONSECUTIVE_SIMULATES; - } else { - Remember(current_simulate); - } - - return this; - } - - static State* Merge(State* succ_state, - HBasicBlock* succ_block, - State* pred_state, - HBasicBlock* pred_block, - Zone* zone) { - return (succ_state == NULL) - ? pred_state->Copy(succ_block, pred_block, zone) - : succ_state->Merge(succ_block, pred_state, pred_block, zone); - } - - static State* Finish(State* state, HBasicBlock* block, Zone* zone) { - if (FLAG_trace_removable_simulates) { - PrintF("[preparing state %p for B%d]\n", reinterpret_cast(state), - block->block_id()); - } - // For our current local analysis, we should not remember simulates across - // block boundaries. - DCHECK(!state->HasRememberedSimulates()); - // Nasty heuristic: Never remove the first simulate in a block. This - // just so happens to have a beneficial effect on register allocation. - state->first_ = true; - return state; - } - - private: - explicit State(const State& other) - : zone_(other.zone_), - mergelist_(other.mergelist_, other.zone_), - first_(other.first_), - mode_(other.mode_) { } - - enum Mode { NORMAL, COLLECT_CONSECUTIVE_SIMULATES }; - - bool HasRememberedSimulates() const { return !mergelist_.is_empty(); } - - void Remember(HSimulate* sim) { - mergelist_.Add(sim, zone_); - } - - void FlushSimulates() { - if (HasRememberedSimulates()) { - mergelist_.RemoveLast()->MergeWith(&mergelist_); - } - } - - void RemoveSimulates() { - while (HasRememberedSimulates()) { - mergelist_.RemoveLast()->DeleteAndReplaceWith(NULL); - } - } - - State* Copy(HBasicBlock* succ_block, HBasicBlock* pred_block, Zone* zone) { - State* copy = new(zone) State(*this); - if (FLAG_trace_removable_simulates) { - PrintF("[copy state %p from B%d to new state %p for B%d]\n", - reinterpret_cast(this), pred_block->block_id(), - reinterpret_cast(copy), succ_block->block_id()); - } - return copy; - } - - State* Merge(HBasicBlock* succ_block, - State* pred_state, - HBasicBlock* pred_block, - Zone* zone) { - // For our current local analysis, we should not remember simulates across - // block boundaries. - DCHECK(!pred_state->HasRememberedSimulates()); - DCHECK(!HasRememberedSimulates()); - if (FLAG_trace_removable_simulates) { - PrintF("[merge state %p from B%d into %p for B%d]\n", - reinterpret_cast(pred_state), pred_block->block_id(), - reinterpret_cast(this), succ_block->block_id()); - } - return this; - } - - Zone* zone_; - ZoneList mergelist_; - bool first_; - Mode mode_; -}; - - -// We don't use effects here. -class Effects : public ZoneObject { - public: - explicit Effects(Zone* zone) { } - bool Disabled() { return true; } - void Process(HInstruction* instr, Zone* zone) { } - void Apply(State* state) { } - void Union(Effects* that, Zone* zone) { } -}; - - -void HMergeRemovableSimulatesPhase::Run() { - HFlowEngine engine(graph(), zone()); - State* state = new(zone()) State(zone()); - engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), state); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-removable-simulates.h b/src/crankshaft/hydrogen-removable-simulates.h deleted file mode 100644 index 34500012cb..0000000000 --- a/src/crankshaft/hydrogen-removable-simulates.h +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_ -#define V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HMergeRemovableSimulatesPhase : public HPhase { - public: - explicit HMergeRemovableSimulatesPhase(HGraph* graph) - : HPhase("H_Merge removable simulates", graph) { } - - void Run(); - - private: - DISALLOW_COPY_AND_ASSIGN(HMergeRemovableSimulatesPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_ diff --git a/src/crankshaft/hydrogen-representation-changes.cc b/src/crankshaft/hydrogen-representation-changes.cc deleted file mode 100644 index 5fd72618fa..0000000000 --- a/src/crankshaft/hydrogen-representation-changes.cc +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-representation-changes.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -void HRepresentationChangesPhase::InsertRepresentationChangeForUse( - HValue* value, HValue* use_value, int use_index, Representation to) { - // Insert the representation change right before its use. For phi-uses we - // insert at the end of the corresponding predecessor. - HInstruction* next = NULL; - if (use_value->IsPhi()) { - next = use_value->block()->predecessors()->at(use_index)->end(); - } else { - next = HInstruction::cast(use_value); - } - // For constants we try to make the representation change at compile - // time. When a representation change is not possible without loss of - // information we treat constants like normal instructions and insert the - // change instructions for them. - HInstruction* new_value = NULL; - bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi); - bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32); - bool is_truncating_to_number = - use_value->CheckFlag(HValue::kTruncatingToNumber); - if (value->IsConstant()) { - HConstant* constant = HConstant::cast(value); - // Try to create a new copy of the constant with the new representation. - if (is_truncating_to_int && to.IsInteger32()) { - Maybe res = constant->CopyToTruncatedInt32(graph()->zone()); - if (res.IsJust()) new_value = res.FromJust(); - } else { - new_value = constant->CopyToRepresentation(to, graph()->zone()); - } - } - - if (new_value == NULL) { - new_value = new (graph()->zone()) - HChange(value, to, is_truncating_to_smi, is_truncating_to_int, - is_truncating_to_number); - } - - new_value->InsertBefore(next); - use_value->SetOperandAt(use_index, new_value); -} - - -static bool IsNonDeoptingIntToSmiChange(HChange* change) { - Representation from_rep = change->from(); - Representation to_rep = change->to(); - // Flags indicating Uint32 operations are set in a later Hydrogen phase. - DCHECK(!change->CheckFlag(HValue::kUint32)); - return from_rep.IsInteger32() && to_rep.IsSmi() && SmiValuesAre32Bits(); -} - - -void HRepresentationChangesPhase::InsertRepresentationChangesForValue( - HValue* value) { - Representation r = value->representation(); - if (r.IsNone()) { -#ifdef DEBUG - for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) { - HValue* use_value = it.value(); - int use_index = it.index(); - Representation req = use_value->RequiredInputRepresentation(use_index); - DCHECK(req.IsNone()); - } -#endif - return; - } - if (value->HasNoUses()) { - if (value->IsForceRepresentation()) value->DeleteAndReplaceWith(NULL); - return; - } - - for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) { - HValue* use_value = it.value(); - int use_index = it.index(); - Representation req = use_value->RequiredInputRepresentation(use_index); - if (req.IsNone() || req.Equals(r)) continue; - - // If this is an HForceRepresentation instruction, and an HChange has been - // inserted above it, examine the input representation of the HChange. If - // that's int32, and this HForceRepresentation use is int32, and int32 to - // smi changes can't cause deoptimisation, set the input of the use to the - // input of the HChange. - if (value->IsForceRepresentation()) { - HValue* input = HForceRepresentation::cast(value)->value(); - if (input->IsChange()) { - HChange* change = HChange::cast(input); - if (change->from().Equals(req) && IsNonDeoptingIntToSmiChange(change)) { - use_value->SetOperandAt(use_index, change->value()); - continue; - } - } - } - InsertRepresentationChangeForUse(value, use_value, use_index, req); - } - if (value->HasNoUses()) { - DCHECK(value->IsConstant() || value->IsForceRepresentation()); - value->DeleteAndReplaceWith(NULL); - } else { - // The only purpose of a HForceRepresentation is to represent the value - // after the (possible) HChange instruction. We make it disappear. - if (value->IsForceRepresentation()) { - value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value()); - } - } -} - - -void HRepresentationChangesPhase::Run() { - // Compute truncation flag for phis: - // - // - Initially assume that all phis allow truncation to number and iteratively - // remove the ones that are used in an operation that not do an implicit - // ToNumber conversion. - // - Also assume that all Integer32 phis allow ToInt32 truncation and all - // Smi phis allow truncation to Smi. - // - ZoneList number_worklist(8, zone()); - ZoneList int_worklist(8, zone()); - ZoneList smi_worklist(8, zone()); - - const ZoneList* phi_list(graph()->phi_list()); - for (int i = 0; i < phi_list->length(); i++) { - HPhi* phi = phi_list->at(i); - if (phi->representation().IsInteger32()) { - phi->SetFlag(HValue::kTruncatingToInt32); - } else if (phi->representation().IsSmi()) { - phi->SetFlag(HValue::kTruncatingToSmi); - phi->SetFlag(HValue::kTruncatingToInt32); - } - phi->SetFlag(HValue::kTruncatingToNumber); - } - - for (int i = 0; i < phi_list->length(); i++) { - HPhi* phi = phi_list->at(i); - HValue* value = NULL; - - if (phi->CheckFlag(HValue::kTruncatingToNumber) && - !phi->CheckUsesForFlag(HValue::kTruncatingToNumber, &value)) { - number_worklist.Add(phi, zone()); - phi->ClearFlag(HValue::kTruncatingToNumber); - phi->ClearFlag(HValue::kTruncatingToInt32); - phi->ClearFlag(HValue::kTruncatingToSmi); - if (FLAG_trace_representation) { - PrintF("#%d Phi is not truncating Number because of #%d %s\n", - phi->id(), value->id(), value->Mnemonic()); - } - } else if (phi->representation().IsSmiOrInteger32() && - !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) { - int_worklist.Add(phi, zone()); - phi->ClearFlag(HValue::kTruncatingToInt32); - phi->ClearFlag(HValue::kTruncatingToSmi); - if (FLAG_trace_representation) { - PrintF("#%d Phi is not truncating Int32 because of #%d %s\n", - phi->id(), value->id(), value->Mnemonic()); - } - } else if (phi->representation().IsSmi() && - !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) { - smi_worklist.Add(phi, zone()); - phi->ClearFlag(HValue::kTruncatingToSmi); - if (FLAG_trace_representation) { - PrintF("#%d Phi is not truncating Smi because of #%d %s\n", - phi->id(), value->id(), value->Mnemonic()); - } - } - } - - while (!number_worklist.is_empty()) { - HPhi* current = number_worklist.RemoveLast(); - for (int i = current->OperandCount() - 1; i >= 0; --i) { - HValue* input = current->OperandAt(i); - if (input->IsPhi() && input->CheckFlag(HValue::kTruncatingToNumber)) { - if (FLAG_trace_representation) { - PrintF("#%d Phi is not truncating Number because of #%d %s\n", - input->id(), current->id(), current->Mnemonic()); - } - input->ClearFlag(HValue::kTruncatingToNumber); - input->ClearFlag(HValue::kTruncatingToInt32); - input->ClearFlag(HValue::kTruncatingToSmi); - number_worklist.Add(HPhi::cast(input), zone()); - } - } - } - - while (!int_worklist.is_empty()) { - HPhi* current = int_worklist.RemoveLast(); - for (int i = 0; i < current->OperandCount(); ++i) { - HValue* input = current->OperandAt(i); - if (input->IsPhi() && - input->representation().IsSmiOrInteger32() && - input->CheckFlag(HValue::kTruncatingToInt32)) { - if (FLAG_trace_representation) { - PrintF("#%d Phi is not truncating Int32 because of #%d %s\n", - input->id(), current->id(), current->Mnemonic()); - } - input->ClearFlag(HValue::kTruncatingToInt32); - int_worklist.Add(HPhi::cast(input), zone()); - } - } - } - - while (!smi_worklist.is_empty()) { - HPhi* current = smi_worklist.RemoveLast(); - for (int i = 0; i < current->OperandCount(); ++i) { - HValue* input = current->OperandAt(i); - if (input->IsPhi() && - input->representation().IsSmi() && - input->CheckFlag(HValue::kTruncatingToSmi)) { - if (FLAG_trace_representation) { - PrintF("#%d Phi is not truncating Smi because of #%d %s\n", - input->id(), current->id(), current->Mnemonic()); - } - input->ClearFlag(HValue::kTruncatingToSmi); - smi_worklist.Add(HPhi::cast(input), zone()); - } - } - } - - const ZoneList* blocks(graph()->blocks()); - for (int i = 0; i < blocks->length(); ++i) { - // Process phi instructions first. - const HBasicBlock* block(blocks->at(i)); - const ZoneList* phis = block->phis(); - for (int j = 0; j < phis->length(); j++) { - InsertRepresentationChangesForValue(phis->at(j)); - } - - // Process normal instructions. - for (HInstruction* current = block->first(); current != NULL; ) { - HInstruction* next = current->next(); - InsertRepresentationChangesForValue(current); - current = next; - } - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-representation-changes.h b/src/crankshaft/hydrogen-representation-changes.h deleted file mode 100644 index d8403947c3..0000000000 --- a/src/crankshaft/hydrogen-representation-changes.h +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_ -#define V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HRepresentationChangesPhase : public HPhase { - public: - explicit HRepresentationChangesPhase(HGraph* graph) - : HPhase("H_Representation changes", graph) { } - - void Run(); - - private: - void InsertRepresentationChangeForUse(HValue* value, - HValue* use_value, - int use_index, - Representation to); - void InsertRepresentationChangesForValue(HValue* value); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_ diff --git a/src/crankshaft/hydrogen-sce.cc b/src/crankshaft/hydrogen-sce.cc deleted file mode 100644 index a08190de3e..0000000000 --- a/src/crankshaft/hydrogen-sce.cc +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-sce.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -void HStackCheckEliminationPhase::Run() { - // For each loop block walk the dominator tree from the backwards branch to - // the loop header. If a call instruction is encountered the backwards branch - // is dominated by a call and the stack check in the backwards branch can be - // removed. - for (int i = 0; i < graph()->blocks()->length(); i++) { - HBasicBlock* block = graph()->blocks()->at(i); - if (block->IsLoopHeader()) { - HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge(); - HBasicBlock* dominator = back_edge; - while (true) { - for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) { - if (it.Current()->HasStackCheck()) { - block->loop_information()->stack_check()->Eliminate(); - break; - } - } - - // Done when the loop header is processed. - if (dominator == block) break; - - // Move up the dominator tree. - dominator = dominator->dominator(); - } - } - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-sce.h b/src/crankshaft/hydrogen-sce.h deleted file mode 100644 index bb896bad6b..0000000000 --- a/src/crankshaft/hydrogen-sce.h +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_SCE_H_ -#define V8_CRANKSHAFT_HYDROGEN_SCE_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -class HStackCheckEliminationPhase : public HPhase { - public: - explicit HStackCheckEliminationPhase(HGraph* graph) - : HPhase("H_Stack check elimination", graph) { } - - void Run(); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_SCE_H_ diff --git a/src/crankshaft/hydrogen-store-elimination.cc b/src/crankshaft/hydrogen-store-elimination.cc deleted file mode 100644 index b081c21984..0000000000 --- a/src/crankshaft/hydrogen-store-elimination.cc +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-store-elimination.h" - -#include "src/crankshaft/hydrogen-instructions.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -#define TRACE(x) if (FLAG_trace_store_elimination) PrintF x - -// Performs a block-by-block local analysis for removable stores. -void HStoreEliminationPhase::Run() { - GVNFlagSet flags; // Use GVN flags as an approximation for some instructions. - flags.RemoveAll(); - - flags.Add(kArrayElements); - flags.Add(kArrayLengths); - flags.Add(kStringLengths); - flags.Add(kBackingStoreFields); - flags.Add(kDoubleArrayElements); - flags.Add(kDoubleFields); - flags.Add(kElementsPointer); - flags.Add(kInobjectFields); - flags.Add(kExternalMemory); - flags.Add(kStringChars); - flags.Add(kTypedArrayElements); - - for (int i = 0; i < graph()->blocks()->length(); i++) { - unobserved_.Rewind(0); - HBasicBlock* block = graph()->blocks()->at(i); - if (!block->IsReachable()) continue; - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (instr->CheckFlag(HValue::kIsDead)) continue; - - switch (instr->opcode()) { - case HValue::kStoreNamedField: - // Remove any unobserved stores overwritten by this store. - ProcessStore(HStoreNamedField::cast(instr)); - break; - case HValue::kLoadNamedField: - // Observe any unobserved stores on this object + field. - ProcessLoad(HLoadNamedField::cast(instr)); - break; - default: - ProcessInstr(instr, flags); - break; - } - } - } -} - - -void HStoreEliminationPhase::ProcessStore(HStoreNamedField* store) { - HValue* object = store->object()->ActualValue(); - int i = 0; - while (i < unobserved_.length()) { - HStoreNamedField* prev = unobserved_.at(i); - if (aliasing_->MustAlias(object, prev->object()->ActualValue()) && - prev->CanBeReplacedWith(store)) { - // This store is guaranteed to overwrite the previous store. - prev->DeleteAndReplaceWith(NULL); - TRACE(("++ Unobserved store S%d overwritten by S%d\n", - prev->id(), store->id())); - unobserved_.Remove(i); - } else { - i++; - } - } - // Only non-transitioning stores are removable. - if (!store->has_transition()) { - TRACE(("-- Might remove store S%d\n", store->id())); - unobserved_.Add(store, zone()); - } -} - - -void HStoreEliminationPhase::ProcessLoad(HLoadNamedField* load) { - HValue* object = load->object()->ActualValue(); - int i = 0; - while (i < unobserved_.length()) { - HStoreNamedField* prev = unobserved_.at(i); - if (aliasing_->MayAlias(object, prev->object()->ActualValue()) && - load->access().Equals(prev->access())) { - TRACE(("-- Observed store S%d by load L%d\n", prev->id(), load->id())); - unobserved_.Remove(i); - } else { - i++; - } - } -} - - -void HStoreEliminationPhase::ProcessInstr(HInstruction* instr, - GVNFlagSet flags) { - if (unobserved_.length() == 0) return; // Nothing to do. - if (instr->CanDeoptimize()) { - TRACE(("-- Observed stores at I%d (%s might deoptimize)\n", - instr->id(), instr->Mnemonic())); - unobserved_.Rewind(0); - return; - } - if (instr->CheckChangesFlag(kNewSpacePromotion)) { - TRACE(("-- Observed stores at I%d (%s might GC)\n", - instr->id(), instr->Mnemonic())); - unobserved_.Rewind(0); - return; - } - if (instr->DependsOnFlags().ContainsAnyOf(flags)) { - TRACE(("-- Observed stores at I%d (GVN flags of %s)\n", - instr->id(), instr->Mnemonic())); - unobserved_.Rewind(0); - return; - } -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-store-elimination.h b/src/crankshaft/hydrogen-store-elimination.h deleted file mode 100644 index 2a9e0c1488..0000000000 --- a/src/crankshaft/hydrogen-store-elimination.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_ -#define V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/hydrogen-alias-analysis.h" - -namespace v8 { -namespace internal { - -class HStoreEliminationPhase : public HPhase { - public: - explicit HStoreEliminationPhase(HGraph* graph) - : HPhase("H_Store elimination", graph), - unobserved_(10, zone()), - aliasing_() { } - - void Run(); - private: - ZoneList unobserved_; - HAliasAnalyzer* aliasing_; - - void ProcessStore(HStoreNamedField* store); - void ProcessLoad(HLoadNamedField* load); - void ProcessInstr(HInstruction* instr, GVNFlagSet flags); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_ diff --git a/src/crankshaft/hydrogen-types.cc b/src/crankshaft/hydrogen-types.cc deleted file mode 100644 index 0d33413641..0000000000 --- a/src/crankshaft/hydrogen-types.cc +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-types.h" - -#include "src/field-type.h" -#include "src/handles-inl.h" -#include "src/objects-inl.h" -#include "src/ostreams.h" - -namespace v8 { -namespace internal { - -// static -HType HType::FromType(AstType* type) { - if (AstType::Any()->Is(type)) return HType::Any(); - if (!type->IsInhabited()) return HType::None(); - if (type->Is(AstType::SignedSmall())) return HType::Smi(); - if (type->Is(AstType::Number())) return HType::TaggedNumber(); - if (type->Is(AstType::Null())) return HType::Null(); - if (type->Is(AstType::String())) return HType::String(); - if (type->Is(AstType::Boolean())) return HType::Boolean(); - if (type->Is(AstType::Undefined())) return HType::Undefined(); - if (type->Is(AstType::Object())) return HType::JSObject(); - if (type->Is(AstType::DetectableReceiver())) return HType::JSReceiver(); - return HType::Tagged(); -} - - -// static -HType HType::FromFieldType(Handle type, Zone* temp_zone) { - return FromType(type->Convert(temp_zone)); -} - -// static -HType HType::FromValue(Handle value) { - Object* raw_value = *value; - if (raw_value->IsSmi()) return HType::Smi(); - DCHECK(raw_value->IsHeapObject()); - Isolate* isolate = HeapObject::cast(*value)->GetIsolate(); - if (raw_value->IsNull(isolate)) return HType::Null(); - if (raw_value->IsHeapNumber()) { - double n = Handle::cast(value)->value(); - return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber(); - } - if (raw_value->IsString()) return HType::String(); - if (raw_value->IsBoolean()) return HType::Boolean(); - if (raw_value->IsUndefined(isolate)) return HType::Undefined(); - if (raw_value->IsJSArray()) { - DCHECK(!raw_value->IsUndetectable()); - return HType::JSArray(); - } - if (raw_value->IsJSObject() && !raw_value->IsUndetectable()) { - return HType::JSObject(); - } - return HType::HeapObject(); -} - - -std::ostream& operator<<(std::ostream& os, const HType& t) { - // Note: The c1visualizer syntax for locals allows only a sequence of the - // following characters: A-Za-z0-9_-|: - switch (t.kind_) { -#define DEFINE_CASE(Name, mask) \ - case HType::k##Name: \ - return os << #Name; - HTYPE_LIST(DEFINE_CASE) -#undef DEFINE_CASE - } - UNREACHABLE(); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-types.h b/src/crankshaft/hydrogen-types.h deleted file mode 100644 index 3e68872924..0000000000 --- a/src/crankshaft/hydrogen-types.h +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_TYPES_H_ -#define V8_CRANKSHAFT_HYDROGEN_TYPES_H_ - -#include -#include - -#include "src/ast/ast-types.h" -#include "src/base/macros.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -template class Handle; -class FieldType; -class Object; - -#define HTYPE_LIST(V) \ - V(Any, 0x0) /* 0000 0000 0000 0000 */ \ - V(Tagged, 0x1) /* 0000 0000 0000 0001 */ \ - V(TaggedPrimitive, 0x5) /* 0000 0000 0000 0101 */ \ - V(TaggedNumber, 0xd) /* 0000 0000 0000 1101 */ \ - V(Smi, 0x1d) /* 0000 0000 0001 1101 */ \ - V(HeapObject, 0x21) /* 0000 0000 0010 0001 */ \ - V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \ - V(Null, 0x27) /* 0000 0000 0010 0111 */ \ - V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \ - V(String, 0x65) /* 0000 0000 0110 0101 */ \ - V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \ - V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \ - V(JSReceiver, 0x221) /* 0000 0010 0010 0001 */ \ - V(JSObject, 0x621) /* 0000 0110 0010 0001 */ \ - V(JSArray, 0xe21) /* 0000 1110 0010 0001 */ \ - V(None, 0xfff) /* 0000 1111 1111 1111 */ - -class HType final { - public: - #define DECLARE_CONSTRUCTOR(Name, mask) \ - static HType Name() WARN_UNUSED_RESULT { return HType(k##Name); } - HTYPE_LIST(DECLARE_CONSTRUCTOR) - #undef DECLARE_CONSTRUCTOR - - // Return the weakest (least precise) common type. - HType Combine(HType other) const WARN_UNUSED_RESULT { - return HType(static_cast(kind_ & other.kind_)); - } - - bool Equals(HType other) const WARN_UNUSED_RESULT { - return kind_ == other.kind_; - } - - bool IsSubtypeOf(HType other) const WARN_UNUSED_RESULT { - return Combine(other).Equals(other); - } - - #define DECLARE_IS_TYPE(Name, mask) \ - bool Is##Name() const WARN_UNUSED_RESULT { \ - return IsSubtypeOf(HType::Name()); \ - } - HTYPE_LIST(DECLARE_IS_TYPE) - #undef DECLARE_IS_TYPE - - static HType FromType(AstType* type) WARN_UNUSED_RESULT; - static HType FromFieldType(Handle type, - Zone* temp_zone) WARN_UNUSED_RESULT; - static HType FromValue(Handle value) WARN_UNUSED_RESULT; - - friend std::ostream& operator<<(std::ostream& os, const HType& t); - - private: - enum Kind { - #define DECLARE_TYPE(Name, mask) k##Name = mask, - HTYPE_LIST(DECLARE_TYPE) - #undef DECLARE_TYPE - LAST_KIND = kNone - }; - - // Make sure type fits in int16. - STATIC_ASSERT(LAST_KIND < (1 << (CHAR_BIT * sizeof(int16_t)))); - - explicit HType(Kind kind) : kind_(kind) { } - - int16_t kind_; -}; - - -std::ostream& operator<<(std::ostream& os, const HType& t); -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_TYPES_H_ diff --git a/src/crankshaft/hydrogen-uint32-analysis.cc b/src/crankshaft/hydrogen-uint32-analysis.cc deleted file mode 100644 index de31a616c1..0000000000 --- a/src/crankshaft/hydrogen-uint32-analysis.cc +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-uint32-analysis.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -static bool IsUnsignedLoad(HLoadKeyed* instr) { - switch (instr->elements_kind()) { - case UINT8_ELEMENTS: - case UINT16_ELEMENTS: - case UINT32_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - return true; - default: - return false; - } -} - - -static bool IsUint32Operation(HValue* instr) { - return instr->IsShr() || - (instr->IsLoadKeyed() && IsUnsignedLoad(HLoadKeyed::cast(instr))) || - (instr->IsInteger32Constant() && instr->GetInteger32Constant() >= 0); -} - - -bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) { - // Operations that operate on bits are safe. - if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) { - return true; - } else if (use->IsSimulate() || use->IsArgumentsObject()) { - // Deoptimization has special support for uint32. - return true; - } else if (use->IsChange()) { - // Conversions have special support for uint32. - // This DCHECK guards that the conversion in question is actually - // implemented. Do not extend the whitelist without adding - // support to LChunkBuilder::DoChange(). - DCHECK(HChange::cast(use)->to().IsDouble() || - HChange::cast(use)->to().IsSmi() || - HChange::cast(use)->to().IsTagged()); - return true; - } else if (use->IsStoreKeyed()) { - HStoreKeyed* store = HStoreKeyed::cast(use); - if (store->is_fixed_typed_array()) { - // Storing a value into an external integer array is a bit level - // operation. - if (store->value() == val) { - // Clamping or a conversion to double should have beed inserted. - DCHECK(store->elements_kind() != UINT8_CLAMPED_ELEMENTS); - DCHECK(store->elements_kind() != FLOAT32_ELEMENTS); - DCHECK(store->elements_kind() != FLOAT64_ELEMENTS); - return true; - } - } - } else if (use->IsCompareNumericAndBranch()) { - HCompareNumericAndBranch* c = HCompareNumericAndBranch::cast(use); - return IsUint32Operation(c->left()) && IsUint32Operation(c->right()); - } - - return false; -} - - -// Iterate over all uses and verify that they are uint32 safe: either don't -// distinguish between int32 and uint32 due to their bitwise nature or -// have special support for uint32 values. -// Encountered phis are optimistically treated as safe uint32 uses, -// marked with kUint32 flag and collected in the phis_ list. A separate -// pass will be performed later by UnmarkUnsafePhis to clear kUint32 from -// phis that are not actually uint32-safe (it requires fix point iteration). -bool HUint32AnalysisPhase::Uint32UsesAreSafe(HValue* uint32val) { - bool collect_phi_uses = false; - for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - - if (use->IsPhi()) { - if (!use->CheckFlag(HInstruction::kUint32)) { - // There is a phi use of this value from a phi that is not yet - // collected in phis_ array. Separate pass is required. - collect_phi_uses = true; - } - - // Optimistically treat phis as uint32 safe. - continue; - } - - if (!IsSafeUint32Use(uint32val, use)) { - return false; - } - } - - if (collect_phi_uses) { - for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - - // There is a phi use of this value from a phi that is not yet - // collected in phis_ array. Separate pass is required. - if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) { - use->SetFlag(HInstruction::kUint32); - phis_.Add(HPhi::cast(use), zone()); - } - } - } - - return true; -} - - -// Check if all operands to the given phi are marked with kUint32 flag. -bool HUint32AnalysisPhase::CheckPhiOperands(HPhi* phi) { - if (!phi->CheckFlag(HInstruction::kUint32)) { - // This phi is not uint32 safe. No need to check operands. - return false; - } - - for (int j = 0; j < phi->OperandCount(); j++) { - HValue* operand = phi->OperandAt(j); - if (!operand->CheckFlag(HInstruction::kUint32)) { - // Lazily mark constants that fit into uint32 range with kUint32 flag. - if (operand->IsInteger32Constant() && - operand->GetInteger32Constant() >= 0) { - operand->SetFlag(HInstruction::kUint32); - continue; - } - - // This phi is not safe, some operands are not uint32 values. - return false; - } - } - - return true; -} - - -// Remove kUint32 flag from the phi itself and its operands. If any operand -// was a phi marked with kUint32 place it into a worklist for -// transitive clearing of kUint32 flag. -void HUint32AnalysisPhase::UnmarkPhi(HPhi* phi, ZoneList* worklist) { - phi->ClearFlag(HInstruction::kUint32); - for (int j = 0; j < phi->OperandCount(); j++) { - HValue* operand = phi->OperandAt(j); - if (operand->CheckFlag(HInstruction::kUint32)) { - operand->ClearFlag(HInstruction::kUint32); - if (operand->IsPhi()) { - worklist->Add(HPhi::cast(operand), zone()); - } - } - } -} - - -void HUint32AnalysisPhase::UnmarkUnsafePhis() { - // No phis were collected. Nothing to do. - if (phis_.length() == 0) return; - - // Worklist used to transitively clear kUint32 from phis that - // are used as arguments to other phis. - ZoneList worklist(phis_.length(), zone()); - - // Phi can be used as a uint32 value if and only if - // all its operands are uint32 values and all its - // uses are uint32 safe. - - // Iterate over collected phis and unmark those that - // are unsafe. When unmarking phi unmark its operands - // and add it to the worklist if it is a phi as well. - // Phis that are still marked as safe are shifted down - // so that all safe phis form a prefix of the phis_ array. - int phi_count = 0; - for (int i = 0; i < phis_.length(); i++) { - HPhi* phi = phis_[i]; - - if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) { - phis_[phi_count++] = phi; - } else { - UnmarkPhi(phi, &worklist); - } - } - - // Now phis array contains only those phis that have safe - // non-phi uses. Start transitively clearing kUint32 flag - // from phi operands of discovered non-safe phis until - // only safe phis are left. - while (!worklist.is_empty()) { - while (!worklist.is_empty()) { - HPhi* phi = worklist.RemoveLast(); - UnmarkPhi(phi, &worklist); - } - - // Check if any operands to safe phis were unmarked - // turning a safe phi into unsafe. The same value - // can flow into several phis. - int new_phi_count = 0; - for (int i = 0; i < phi_count; i++) { - HPhi* phi = phis_[i]; - - if (CheckPhiOperands(phi)) { - phis_[new_phi_count++] = phi; - } else { - UnmarkPhi(phi, &worklist); - } - } - phi_count = new_phi_count; - } -} - - -void HUint32AnalysisPhase::Run() { - if (!graph()->has_uint32_instructions()) return; - - ZoneList* uint32_instructions = graph()->uint32_instructions(); - for (int i = 0; i < uint32_instructions->length(); ++i) { - // Analyze instruction and mark it with kUint32 if all - // its uses are uint32 safe. - HInstruction* current = uint32_instructions->at(i); - if (current->IsLinked() && - current->representation().IsInteger32() && - Uint32UsesAreSafe(current)) { - current->SetFlag(HInstruction::kUint32); - } - } - - // Some phis might have been optimistically marked with kUint32 flag. - // Remove this flag from those phis that are unsafe and propagate - // this information transitively potentially clearing kUint32 flag - // from some non-phi operations that are used as operands to unsafe phis. - UnmarkUnsafePhis(); -} - - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen-uint32-analysis.h b/src/crankshaft/hydrogen-uint32-analysis.h deleted file mode 100644 index 0d959b5953..0000000000 --- a/src/crankshaft/hydrogen-uint32-analysis.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_ -#define V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_ - -#include "src/crankshaft/hydrogen.h" - -namespace v8 { -namespace internal { - - -// Discover instructions that can be marked with kUint32 flag allowing -// them to produce full range uint32 values. -class HUint32AnalysisPhase : public HPhase { - public: - explicit HUint32AnalysisPhase(HGraph* graph) - : HPhase("H_Compute safe UInt32 operations", graph), phis_(4, zone()) { } - - void Run(); - - private: - INLINE(bool IsSafeUint32Use(HValue* val, HValue* use)); - INLINE(bool Uint32UsesAreSafe(HValue* uint32val)); - INLINE(bool CheckPhiOperands(HPhi* phi)); - INLINE(void UnmarkPhi(HPhi* phi, ZoneList* worklist)); - INLINE(void UnmarkUnsafePhis()); - - ZoneList phis_; -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_ diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc deleted file mode 100644 index a6b77ebc64..0000000000 --- a/src/crankshaft/hydrogen.cc +++ /dev/null @@ -1,4263 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen.h" - -#include -#include - -#include "src/allocation-site-scopes.h" -#include "src/ast/ast-numbering.h" -#include "src/ast/compile-time-value.h" -#include "src/ast/scopes.h" -#include "src/code-factory.h" -#include "src/crankshaft/hydrogen-bce.h" -#include "src/crankshaft/hydrogen-canonicalize.h" -#include "src/crankshaft/hydrogen-check-elimination.h" -#include "src/crankshaft/hydrogen-dce.h" -#include "src/crankshaft/hydrogen-dehoist.h" -#include "src/crankshaft/hydrogen-environment-liveness.h" -#include "src/crankshaft/hydrogen-escape-analysis.h" -#include "src/crankshaft/hydrogen-gvn.h" -#include "src/crankshaft/hydrogen-infer-representation.h" -#include "src/crankshaft/hydrogen-infer-types.h" -#include "src/crankshaft/hydrogen-load-elimination.h" -#include "src/crankshaft/hydrogen-mark-unreachable.h" -#include "src/crankshaft/hydrogen-range-analysis.h" -#include "src/crankshaft/hydrogen-redundant-phi.h" -#include "src/crankshaft/hydrogen-removable-simulates.h" -#include "src/crankshaft/hydrogen-representation-changes.h" -#include "src/crankshaft/hydrogen-sce.h" -#include "src/crankshaft/hydrogen-store-elimination.h" -#include "src/crankshaft/hydrogen-uint32-analysis.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/field-type.h" -#include "src/full-codegen/full-codegen.h" -#include "src/globals.h" -#include "src/ic/call-optimization.h" -#include "src/ic/ic.h" -// GetRootConstructor -#include "src/ic/ic-inl.h" -#include "src/isolate-inl.h" -#include "src/objects/map.h" -#include "src/runtime/runtime.h" - -#if V8_TARGET_ARCH_IA32 -#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT -#elif V8_TARGET_ARCH_X64 -#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM64 -#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM -#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT -#elif V8_TARGET_ARCH_PPC -#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS -#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS64 -#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_S390 -#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT -#elif V8_TARGET_ARCH_X87 -#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT -#else -#error Unsupported target architecture. -#endif - -namespace v8 { -namespace internal { - -const auto GetRegConfig = RegisterConfiguration::Crankshaft; - -HBasicBlock::HBasicBlock(HGraph* graph) - : block_id_(graph->GetNextBlockID()), - graph_(graph), - phis_(4, graph->zone()), - first_(NULL), - last_(NULL), - end_(NULL), - loop_information_(NULL), - predecessors_(2, graph->zone()), - dominator_(NULL), - dominated_blocks_(4, graph->zone()), - last_environment_(NULL), - argument_count_(-1), - first_instruction_index_(-1), - last_instruction_index_(-1), - deleted_phis_(4, graph->zone()), - parent_loop_header_(NULL), - is_reachable_(true), - dominates_loop_successors_(false), - is_osr_entry_(false), - is_ordered_(false) { } - - -Isolate* HBasicBlock::isolate() const { - return graph_->isolate(); -} - - -void HBasicBlock::MarkUnreachable() { - is_reachable_ = false; -} - - -void HBasicBlock::AttachLoopInformation() { - DCHECK(!IsLoopHeader()); - loop_information_ = new(zone()) HLoopInformation(this, zone()); -} - - -void HBasicBlock::DetachLoopInformation() { - DCHECK(IsLoopHeader()); - loop_information_ = NULL; -} - - -void HBasicBlock::AddPhi(HPhi* phi) { - DCHECK(!IsStartBlock()); - phis_.Add(phi, zone()); - phi->SetBlock(this); -} - - -void HBasicBlock::RemovePhi(HPhi* phi) { - DCHECK(phi->block() == this); - DCHECK(phis_.Contains(phi)); - phi->Kill(); - phis_.RemoveElement(phi); - phi->SetBlock(NULL); -} - - -void HBasicBlock::AddInstruction(HInstruction* instr, SourcePosition position) { - DCHECK(!IsStartBlock() || !IsFinished()); - DCHECK(!instr->IsLinked()); - DCHECK(!IsFinished()); - - if (position.IsKnown()) { - instr->set_position(position); - } - if (first_ == NULL) { - DCHECK(last_environment() != NULL); - DCHECK(!last_environment()->ast_id().IsNone()); - HBlockEntry* entry = new(zone()) HBlockEntry(); - entry->InitializeAsFirst(this); - if (position.IsKnown()) { - entry->set_position(position); - } else { - DCHECK(!FLAG_hydrogen_track_positions || - !graph()->info()->IsOptimizing() || instr->IsAbnormalExit()); - } - first_ = last_ = entry; - } - instr->InsertAfter(last_); -} - - -HPhi* HBasicBlock::AddNewPhi(int merged_index) { - if (graph()->IsInsideNoSideEffectsScope()) { - merged_index = HPhi::kInvalidMergedIndex; - } - HPhi* phi = new(zone()) HPhi(merged_index, zone()); - AddPhi(phi); - return phi; -} - - -HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id, - RemovableSimulate removable) { - DCHECK(HasEnvironment()); - HEnvironment* environment = last_environment(); - DCHECK(ast_id.IsNone() || ast_id == BailoutId::StubEntry()); - - int push_count = environment->push_count(); - int pop_count = environment->pop_count(); - - HSimulate* instr = - new(zone()) HSimulate(ast_id, pop_count, zone(), removable); -#ifdef DEBUG - instr->set_closure(environment->closure()); -#endif - // Order of pushed values: newest (top of stack) first. This allows - // HSimulate::MergeWith() to easily append additional pushed values - // that are older (from further down the stack). - for (int i = 0; i < push_count; ++i) { - instr->AddPushedValue(environment->ExpressionStackAt(i)); - } - for (GrowableBitVector::Iterator it(environment->assigned_variables(), - zone()); - !it.Done(); - it.Advance()) { - int index = it.Current(); - instr->AddAssignedValue(index, environment->Lookup(index)); - } - environment->ClearHistory(); - return instr; -} - - -void HBasicBlock::Finish(HControlInstruction* end, SourcePosition position) { - DCHECK(!IsFinished()); - AddInstruction(end, position); - end_ = end; - for (HSuccessorIterator it(end); !it.Done(); it.Advance()) { - it.Current()->RegisterPredecessor(this); - } -} - - -void HBasicBlock::Goto(HBasicBlock* block, SourcePosition position, - bool add_simulate) { - if (add_simulate) AddNewSimulate(BailoutId::None(), position); - HGoto* instr = new(zone()) HGoto(block); - Finish(instr, position); -} - - -void HBasicBlock::SetInitialEnvironment(HEnvironment* env) { - DCHECK(!HasEnvironment()); - DCHECK(first() == NULL); - UpdateEnvironment(env); -} - - -void HBasicBlock::UpdateEnvironment(HEnvironment* env) { - last_environment_ = env; - graph()->update_maximum_environment_size(env->first_expression_index()); -} - - -void HBasicBlock::SetJoinId(BailoutId ast_id) { - int length = predecessors_.length(); - DCHECK(length > 0); - for (int i = 0; i < length; i++) { - HBasicBlock* predecessor = predecessors_[i]; - DCHECK(predecessor->end()->IsGoto()); - HSimulate* simulate = HSimulate::cast(predecessor->end()->previous()); - simulate->set_ast_id(ast_id); - predecessor->last_environment()->set_ast_id(ast_id); - } -} - - -bool HBasicBlock::Dominates(HBasicBlock* other) const { - HBasicBlock* current = other->dominator(); - while (current != NULL) { - if (current == this) return true; - current = current->dominator(); - } - return false; -} - - -bool HBasicBlock::EqualToOrDominates(HBasicBlock* other) const { - if (this == other) return true; - return Dominates(other); -} - - -int HBasicBlock::LoopNestingDepth() const { - const HBasicBlock* current = this; - int result = (current->IsLoopHeader()) ? 1 : 0; - while (current->parent_loop_header() != NULL) { - current = current->parent_loop_header(); - result++; - } - return result; -} - - -void HBasicBlock::MarkSuccEdgeUnreachable(int succ) { - DCHECK(IsFinished()); - HBasicBlock* succ_block = end()->SuccessorAt(succ); - - DCHECK(succ_block->predecessors()->length() == 1); - succ_block->MarkUnreachable(); -} - - -void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) { - if (HasPredecessor()) { - // Only loop header blocks can have a predecessor added after - // instructions have been added to the block (they have phis for all - // values in the environment, these phis may be eliminated later). - DCHECK(IsLoopHeader() || first_ == NULL); - HEnvironment* incoming_env = pred->last_environment(); - if (IsLoopHeader()) { - DCHECK_EQ(phis()->length(), incoming_env->length()); - for (int i = 0; i < phis_.length(); ++i) { - phis_[i]->AddInput(incoming_env->values()->at(i)); - } - } else { - last_environment()->AddIncomingEdge(this, pred->last_environment()); - } - } else if (!HasEnvironment() && !IsFinished()) { - DCHECK(!IsLoopHeader()); - SetInitialEnvironment(pred->last_environment()->Copy()); - } - - predecessors_.Add(pred, zone()); -} - - -void HBasicBlock::AddDominatedBlock(HBasicBlock* block) { - DCHECK(!dominated_blocks_.Contains(block)); - // Keep the list of dominated blocks sorted such that if there is two - // succeeding block in this list, the predecessor is before the successor. - int index = 0; - while (index < dominated_blocks_.length() && - dominated_blocks_[index]->block_id() < block->block_id()) { - ++index; - } - dominated_blocks_.InsertAt(index, block, zone()); -} - - -void HBasicBlock::AssignCommonDominator(HBasicBlock* other) { - if (dominator_ == NULL) { - dominator_ = other; - other->AddDominatedBlock(this); - } else if (other->dominator() != NULL) { - HBasicBlock* first = dominator_; - HBasicBlock* second = other; - - while (first != second) { - if (first->block_id() > second->block_id()) { - first = first->dominator(); - } else { - second = second->dominator(); - } - DCHECK(first != NULL && second != NULL); - } - - if (dominator_ != first) { - DCHECK(dominator_->dominated_blocks_.Contains(this)); - dominator_->dominated_blocks_.RemoveElement(this); - dominator_ = first; - first->AddDominatedBlock(this); - } - } -} - - -void HBasicBlock::AssignLoopSuccessorDominators() { - // Mark blocks that dominate all subsequent reachable blocks inside their - // loop. Exploit the fact that blocks are sorted in reverse post order. When - // the loop is visited in increasing block id order, if the number of - // non-loop-exiting successor edges at the dominator_candidate block doesn't - // exceed the number of previously encountered predecessor edges, there is no - // path from the loop header to any block with higher id that doesn't go - // through the dominator_candidate block. In this case, the - // dominator_candidate block is guaranteed to dominate all blocks reachable - // from it with higher ids. - HBasicBlock* last = loop_information()->GetLastBackEdge(); - int outstanding_successors = 1; // one edge from the pre-header - // Header always dominates everything. - MarkAsLoopSuccessorDominator(); - for (int j = block_id(); j <= last->block_id(); ++j) { - HBasicBlock* dominator_candidate = graph_->blocks()->at(j); - for (HPredecessorIterator it(dominator_candidate); !it.Done(); - it.Advance()) { - HBasicBlock* predecessor = it.Current(); - // Don't count back edges. - if (predecessor->block_id() < dominator_candidate->block_id()) { - outstanding_successors--; - } - } - - // If more successors than predecessors have been seen in the loop up to - // now, it's not possible to guarantee that the current block dominates - // all of the blocks with higher IDs. In this case, assume conservatively - // that those paths through loop that don't go through the current block - // contain all of the loop's dependencies. Also be careful to record - // dominator information about the current loop that's being processed, - // and not nested loops, which will be processed when - // AssignLoopSuccessorDominators gets called on their header. - DCHECK(outstanding_successors >= 0); - HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header(); - if (outstanding_successors == 0 && - (parent_loop_header == this && !dominator_candidate->IsLoopHeader())) { - dominator_candidate->MarkAsLoopSuccessorDominator(); - } - HControlInstruction* end = dominator_candidate->end(); - for (HSuccessorIterator it(end); !it.Done(); it.Advance()) { - HBasicBlock* successor = it.Current(); - // Only count successors that remain inside the loop and don't loop back - // to a loop header. - if (successor->block_id() > dominator_candidate->block_id() && - successor->block_id() <= last->block_id()) { - // Backwards edges must land on loop headers. - DCHECK(successor->block_id() > dominator_candidate->block_id() || - successor->IsLoopHeader()); - outstanding_successors++; - } - } - } -} - - -int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const { - for (int i = 0; i < predecessors_.length(); ++i) { - if (predecessors_[i] == predecessor) return i; - } - UNREACHABLE(); -} - - -#ifdef DEBUG -void HBasicBlock::Verify() { - // Check that every block is finished. - DCHECK(IsFinished()); - DCHECK(block_id() >= 0); - - // Check that the incoming edges are in edge split form. - if (predecessors_.length() > 1) { - for (int i = 0; i < predecessors_.length(); ++i) { - DCHECK(predecessors_[i]->end()->SecondSuccessor() == NULL); - } - } -} -#endif - - -void HLoopInformation::RegisterBackEdge(HBasicBlock* block) { - this->back_edges_.Add(block, block->zone()); - AddBlock(block); -} - - -HBasicBlock* HLoopInformation::GetLastBackEdge() const { - int max_id = -1; - HBasicBlock* result = NULL; - for (int i = 0; i < back_edges_.length(); ++i) { - HBasicBlock* cur = back_edges_[i]; - if (cur->block_id() > max_id) { - max_id = cur->block_id(); - result = cur; - } - } - return result; -} - - -void HLoopInformation::AddBlock(HBasicBlock* block) { - if (block == loop_header()) return; - if (block->parent_loop_header() == loop_header()) return; - if (block->parent_loop_header() != NULL) { - AddBlock(block->parent_loop_header()); - } else { - block->set_parent_loop_header(loop_header()); - blocks_.Add(block, block->zone()); - for (int i = 0; i < block->predecessors()->length(); ++i) { - AddBlock(block->predecessors()->at(i)); - } - } -} - - -#ifdef DEBUG - -// Checks reachability of the blocks in this graph and stores a bit in -// the BitVector "reachable()" for every block that can be reached -// from the start block of the graph. If "dont_visit" is non-null, the given -// block is treated as if it would not be part of the graph. "visited_count()" -// returns the number of reachable blocks. -class ReachabilityAnalyzer BASE_EMBEDDED { - public: - ReachabilityAnalyzer(HBasicBlock* entry_block, - int block_count, - HBasicBlock* dont_visit) - : visited_count_(0), - stack_(16, entry_block->zone()), - reachable_(block_count, entry_block->zone()), - dont_visit_(dont_visit) { - PushBlock(entry_block); - Analyze(); - } - - int visited_count() const { return visited_count_; } - const BitVector* reachable() const { return &reachable_; } - - private: - void PushBlock(HBasicBlock* block) { - if (block != NULL && block != dont_visit_ && - !reachable_.Contains(block->block_id())) { - reachable_.Add(block->block_id()); - stack_.Add(block, block->zone()); - visited_count_++; - } - } - - void Analyze() { - while (!stack_.is_empty()) { - HControlInstruction* end = stack_.RemoveLast()->end(); - for (HSuccessorIterator it(end); !it.Done(); it.Advance()) { - PushBlock(it.Current()); - } - } - } - - int visited_count_; - ZoneList stack_; - BitVector reachable_; - HBasicBlock* dont_visit_; -}; - - -void HGraph::Verify(bool do_full_verify) const { - base::LockGuard guard(isolate()->heap()->relocation_mutex()); - AllowHandleDereference allow_deref; - AllowDeferredHandleDereference allow_deferred_deref; - for (int i = 0; i < blocks_.length(); i++) { - HBasicBlock* block = blocks_.at(i); - - block->Verify(); - - // Check that every block contains at least one node and that only the last - // node is a control instruction. - HInstruction* current = block->first(); - DCHECK(current != NULL && current->IsBlockEntry()); - while (current != NULL) { - DCHECK((current->next() == NULL) == current->IsControlInstruction()); - DCHECK(current->block() == block); - current->Verify(); - current = current->next(); - } - - // Check that successors are correctly set. - HBasicBlock* first = block->end()->FirstSuccessor(); - HBasicBlock* second = block->end()->SecondSuccessor(); - DCHECK(second == NULL || first != NULL); - - // Check that the predecessor array is correct. - if (first != NULL) { - DCHECK(first->predecessors()->Contains(block)); - if (second != NULL) { - DCHECK(second->predecessors()->Contains(block)); - } - } - - // Check that phis have correct arguments. - for (int j = 0; j < block->phis()->length(); j++) { - HPhi* phi = block->phis()->at(j); - phi->Verify(); - } - - // Check that all join blocks have predecessors that end with an - // unconditional goto and agree on their environment node id. - if (block->predecessors()->length() >= 2) { - BailoutId id = - block->predecessors()->first()->last_environment()->ast_id(); - for (int k = 0; k < block->predecessors()->length(); k++) { - HBasicBlock* predecessor = block->predecessors()->at(k); - DCHECK(predecessor->end()->IsGoto() || - predecessor->end()->IsDeoptimize()); - DCHECK(predecessor->last_environment()->ast_id() == id); - } - } - } - - // Check special property of first block to have no predecessors. - DCHECK(blocks_.at(0)->predecessors()->is_empty()); - - if (do_full_verify) { - // Check that the graph is fully connected. - ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL); - DCHECK(analyzer.visited_count() == blocks_.length()); - - // Check that entry block dominator is NULL. - DCHECK(entry_block_->dominator() == NULL); - - // Check dominators. - for (int i = 0; i < blocks_.length(); ++i) { - HBasicBlock* block = blocks_.at(i); - if (block->dominator() == NULL) { - // Only start block may have no dominator assigned to. - DCHECK(i == 0); - } else { - // Assert that block is unreachable if dominator must not be visited. - ReachabilityAnalyzer dominator_analyzer(entry_block_, - blocks_.length(), - block->dominator()); - DCHECK(!dominator_analyzer.reachable()->Contains(block->block_id())); - } - } - } -} - -#endif - - -HConstant* HGraph::GetConstant(SetOncePointer* pointer, - int32_t value) { - if (!pointer->is_set()) { - // Can't pass GetInvalidContext() to HConstant::New, because that will - // recursively call GetConstant - HConstant* constant = HConstant::New(isolate(), zone(), NULL, value); - constant->InsertAfter(entry_block()->first()); - pointer->set(constant); - return constant; - } - return ReinsertConstantIfNecessary(pointer->get()); -} - - -HConstant* HGraph::ReinsertConstantIfNecessary(HConstant* constant) { - if (!constant->IsLinked()) { - // The constant was removed from the graph. Reinsert. - constant->ClearFlag(HValue::kIsDead); - constant->InsertAfter(entry_block()->first()); - } - return constant; -} - - -HConstant* HGraph::GetConstant0() { - return GetConstant(&constant_0_, 0); -} - - -HConstant* HGraph::GetConstant1() { - return GetConstant(&constant_1_, 1); -} - - -HConstant* HGraph::GetConstantMinus1() { - return GetConstant(&constant_minus1_, -1); -} - - -HConstant* HGraph::GetConstantBool(bool value) { - return value ? GetConstantTrue() : GetConstantFalse(); -} - -#define DEFINE_GET_CONSTANT(Name, name, constant, type, htype, boolean_value, \ - undetectable) \ - HConstant* HGraph::GetConstant##Name() { \ - if (!constant_##name##_.is_set()) { \ - HConstant* constant = new (zone()) HConstant( \ - Unique::CreateImmovable(isolate()->factory()->constant()), \ - Unique::CreateImmovable(isolate()->factory()->type##_map()), \ - false, Representation::Tagged(), htype, true, boolean_value, \ - undetectable, ODDBALL_TYPE); \ - constant->InsertAfter(entry_block()->first()); \ - constant_##name##_.set(constant); \ - } \ - return ReinsertConstantIfNecessary(constant_##name##_.get()); \ - } - -DEFINE_GET_CONSTANT(Undefined, undefined, undefined_value, undefined, - HType::Undefined(), false, true) -DEFINE_GET_CONSTANT(True, true, true_value, boolean, HType::Boolean(), true, - false) -DEFINE_GET_CONSTANT(False, false, false_value, boolean, HType::Boolean(), false, - false) -DEFINE_GET_CONSTANT(Hole, the_hole, the_hole_value, the_hole, HType::None(), - false, false) -DEFINE_GET_CONSTANT(Null, null, null_value, null, HType::Null(), false, true) -DEFINE_GET_CONSTANT(OptimizedOut, optimized_out, optimized_out, optimized_out, - HType::None(), false, false) - -#undef DEFINE_GET_CONSTANT - -#define DEFINE_IS_CONSTANT(Name, name) \ -bool HGraph::IsConstant##Name(HConstant* constant) { \ - return constant_##name##_.is_set() && constant == constant_##name##_.get(); \ -} -DEFINE_IS_CONSTANT(Undefined, undefined) -DEFINE_IS_CONSTANT(0, 0) -DEFINE_IS_CONSTANT(1, 1) -DEFINE_IS_CONSTANT(Minus1, minus1) -DEFINE_IS_CONSTANT(True, true) -DEFINE_IS_CONSTANT(False, false) -DEFINE_IS_CONSTANT(Hole, the_hole) -DEFINE_IS_CONSTANT(Null, null) - -#undef DEFINE_IS_CONSTANT - - -HConstant* HGraph::GetInvalidContext() { - return GetConstant(&constant_invalid_context_, 0xFFFFC0C7); -} - - -bool HGraph::IsStandardConstant(HConstant* constant) { - if (IsConstantUndefined(constant)) return true; - if (IsConstant0(constant)) return true; - if (IsConstant1(constant)) return true; - if (IsConstantMinus1(constant)) return true; - if (IsConstantTrue(constant)) return true; - if (IsConstantFalse(constant)) return true; - if (IsConstantHole(constant)) return true; - if (IsConstantNull(constant)) return true; - return false; -} - - -HGraphBuilder::IfBuilder::IfBuilder() : builder_(NULL), needs_compare_(true) {} - - -HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder) - : needs_compare_(true) { - Initialize(builder); -} - - -HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, - HIfContinuation* continuation) - : needs_compare_(false), first_true_block_(NULL), first_false_block_(NULL) { - InitializeDontCreateBlocks(builder); - continuation->Continue(&first_true_block_, &first_false_block_); -} - - -void HGraphBuilder::IfBuilder::InitializeDontCreateBlocks( - HGraphBuilder* builder) { - builder_ = builder; - finished_ = false; - did_then_ = false; - did_else_ = false; - did_else_if_ = false; - did_and_ = false; - did_or_ = false; - captured_ = false; - pending_merge_block_ = false; - split_edge_merge_block_ = NULL; - merge_at_join_blocks_ = NULL; - normal_merge_at_join_block_count_ = 0; - deopt_merge_at_join_block_count_ = 0; -} - - -void HGraphBuilder::IfBuilder::Initialize(HGraphBuilder* builder) { - InitializeDontCreateBlocks(builder); - HEnvironment* env = builder->environment(); - first_true_block_ = builder->CreateBasicBlock(env->Copy()); - first_false_block_ = builder->CreateBasicBlock(env->Copy()); -} - - -HControlInstruction* HGraphBuilder::IfBuilder::AddCompare( - HControlInstruction* compare) { - DCHECK(did_then_ == did_else_); - if (did_else_) { - // Handle if-then-elseif - did_else_if_ = true; - did_else_ = false; - did_then_ = false; - did_and_ = false; - did_or_ = false; - pending_merge_block_ = false; - split_edge_merge_block_ = NULL; - HEnvironment* env = builder()->environment(); - first_true_block_ = builder()->CreateBasicBlock(env->Copy()); - first_false_block_ = builder()->CreateBasicBlock(env->Copy()); - } - if (split_edge_merge_block_ != NULL) { - HEnvironment* env = first_false_block_->last_environment(); - HBasicBlock* split_edge = builder()->CreateBasicBlock(env->Copy()); - if (did_or_) { - compare->SetSuccessorAt(0, split_edge); - compare->SetSuccessorAt(1, first_false_block_); - } else { - compare->SetSuccessorAt(0, first_true_block_); - compare->SetSuccessorAt(1, split_edge); - } - builder()->GotoNoSimulate(split_edge, split_edge_merge_block_); - } else { - compare->SetSuccessorAt(0, first_true_block_); - compare->SetSuccessorAt(1, first_false_block_); - } - builder()->FinishCurrentBlock(compare); - needs_compare_ = false; - return compare; -} - - -void HGraphBuilder::IfBuilder::Or() { - DCHECK(!needs_compare_); - DCHECK(!did_and_); - did_or_ = true; - HEnvironment* env = first_false_block_->last_environment(); - if (split_edge_merge_block_ == NULL) { - split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy()); - builder()->GotoNoSimulate(first_true_block_, split_edge_merge_block_); - first_true_block_ = split_edge_merge_block_; - } - builder()->set_current_block(first_false_block_); - first_false_block_ = builder()->CreateBasicBlock(env->Copy()); -} - - -void HGraphBuilder::IfBuilder::And() { - DCHECK(!needs_compare_); - DCHECK(!did_or_); - did_and_ = true; - HEnvironment* env = first_false_block_->last_environment(); - if (split_edge_merge_block_ == NULL) { - split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy()); - builder()->GotoNoSimulate(first_false_block_, split_edge_merge_block_); - first_false_block_ = split_edge_merge_block_; - } - builder()->set_current_block(first_true_block_); - first_true_block_ = builder()->CreateBasicBlock(env->Copy()); -} - - -void HGraphBuilder::IfBuilder::CaptureContinuation( - HIfContinuation* continuation) { - DCHECK(!did_else_if_); - DCHECK(!finished_); - DCHECK(!captured_); - - HBasicBlock* true_block = NULL; - HBasicBlock* false_block = NULL; - Finish(&true_block, &false_block); - DCHECK(true_block != NULL); - DCHECK(false_block != NULL); - continuation->Capture(true_block, false_block); - captured_ = true; - builder()->set_current_block(NULL); - End(); -} - - -void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) { - DCHECK(!did_else_if_); - DCHECK(!finished_); - DCHECK(!captured_); - HBasicBlock* true_block = NULL; - HBasicBlock* false_block = NULL; - Finish(&true_block, &false_block); - merge_at_join_blocks_ = NULL; - if (true_block != NULL && !true_block->IsFinished()) { - DCHECK(continuation->IsTrueReachable()); - builder()->GotoNoSimulate(true_block, continuation->true_branch()); - } - if (false_block != NULL && !false_block->IsFinished()) { - DCHECK(continuation->IsFalseReachable()); - builder()->GotoNoSimulate(false_block, continuation->false_branch()); - } - captured_ = true; - End(); -} - - -void HGraphBuilder::IfBuilder::Then() { - DCHECK(!captured_); - DCHECK(!finished_); - did_then_ = true; - if (needs_compare_) { - // Handle if's without any expressions, they jump directly to the "else" - // branch. However, we must pretend that the "then" branch is reachable, - // so that the graph builder visits it and sees any live range extending - // constructs within it. - HConstant* constant_false = builder()->graph()->GetConstantFalse(); - ToBooleanHints boolean_type = ToBooleanHint::kBoolean; - HBranch* branch = builder()->New( - constant_false, boolean_type, first_true_block_, first_false_block_); - builder()->FinishCurrentBlock(branch); - } - builder()->set_current_block(first_true_block_); - pending_merge_block_ = true; -} - - -void HGraphBuilder::IfBuilder::Else() { - DCHECK(did_then_); - DCHECK(!captured_); - DCHECK(!finished_); - AddMergeAtJoinBlock(false); - builder()->set_current_block(first_false_block_); - pending_merge_block_ = true; - did_else_ = true; -} - -void HGraphBuilder::IfBuilder::Deopt(DeoptimizeReason reason) { - DCHECK(did_then_); - builder()->Add(reason, Deoptimizer::EAGER); - AddMergeAtJoinBlock(true); -} - - -void HGraphBuilder::IfBuilder::Return(HValue* value) { - HValue* parameter_count = builder()->graph()->GetConstantMinus1(); - builder()->FinishExitCurrentBlock( - builder()->New(value, parameter_count)); - AddMergeAtJoinBlock(false); -} - - -void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) { - if (!pending_merge_block_) return; - HBasicBlock* block = builder()->current_block(); - DCHECK(block == NULL || !block->IsFinished()); - MergeAtJoinBlock* record = new (builder()->zone()) - MergeAtJoinBlock(block, deopt, merge_at_join_blocks_); - merge_at_join_blocks_ = record; - if (block != NULL) { - DCHECK(block->end() == NULL); - if (deopt) { - normal_merge_at_join_block_count_++; - } else { - deopt_merge_at_join_block_count_++; - } - } - builder()->set_current_block(NULL); - pending_merge_block_ = false; -} - - -void HGraphBuilder::IfBuilder::Finish() { - DCHECK(!finished_); - if (!did_then_) { - Then(); - } - AddMergeAtJoinBlock(false); - if (!did_else_) { - Else(); - AddMergeAtJoinBlock(false); - } - finished_ = true; -} - - -void HGraphBuilder::IfBuilder::Finish(HBasicBlock** then_continuation, - HBasicBlock** else_continuation) { - Finish(); - - MergeAtJoinBlock* else_record = merge_at_join_blocks_; - if (else_continuation != NULL) { - *else_continuation = else_record->block_; - } - MergeAtJoinBlock* then_record = else_record->next_; - if (then_continuation != NULL) { - *then_continuation = then_record->block_; - } - DCHECK(then_record->next_ == NULL); -} - - -void HGraphBuilder::IfBuilder::EndUnreachable() { - if (captured_) return; - Finish(); - builder()->set_current_block(nullptr); -} - - -void HGraphBuilder::IfBuilder::End() { - if (captured_) return; - Finish(); - - int total_merged_blocks = normal_merge_at_join_block_count_ + - deopt_merge_at_join_block_count_; - DCHECK(total_merged_blocks >= 1); - HBasicBlock* merge_block = - total_merged_blocks == 1 ? NULL : builder()->graph()->CreateBasicBlock(); - - // Merge non-deopt blocks first to ensure environment has right size for - // padding. - MergeAtJoinBlock* current = merge_at_join_blocks_; - while (current != NULL) { - if (!current->deopt_ && current->block_ != NULL) { - // If there is only one block that makes it through to the end of the - // if, then just set it as the current block and continue rather then - // creating an unnecessary merge block. - if (total_merged_blocks == 1) { - builder()->set_current_block(current->block_); - return; - } - builder()->GotoNoSimulate(current->block_, merge_block); - } - current = current->next_; - } - - // Merge deopt blocks, padding when necessary. - current = merge_at_join_blocks_; - while (current != NULL) { - if (current->deopt_ && current->block_ != NULL) { - current->block_->FinishExit( - HAbnormalExit::New(builder()->isolate(), builder()->zone(), NULL), - SourcePosition::Unknown()); - } - current = current->next_; - } - builder()->set_current_block(merge_block); -} - - -HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder) { - Initialize(builder, NULL, kWhileTrue, NULL); -} - - -HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context, - LoopBuilder::Direction direction) { - Initialize(builder, context, direction, builder->graph()->GetConstant1()); -} - - -HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context, - LoopBuilder::Direction direction, - HValue* increment_amount) { - Initialize(builder, context, direction, increment_amount); - increment_amount_ = increment_amount; -} - - -void HGraphBuilder::LoopBuilder::Initialize(HGraphBuilder* builder, - HValue* context, - Direction direction, - HValue* increment_amount) { - builder_ = builder; - context_ = context; - direction_ = direction; - increment_amount_ = increment_amount; - - finished_ = false; - header_block_ = builder->CreateLoopHeaderBlock(); - body_block_ = NULL; - exit_block_ = NULL; - exit_trampoline_block_ = NULL; -} - - -HValue* HGraphBuilder::LoopBuilder::BeginBody( - HValue* initial, - HValue* terminating, - Token::Value token) { - DCHECK(direction_ != kWhileTrue); - HEnvironment* env = builder_->environment(); - phi_ = header_block_->AddNewPhi(env->values()->length()); - phi_->AddInput(initial); - env->Push(initial); - builder_->GotoNoSimulate(header_block_); - - HEnvironment* body_env = env->Copy(); - HEnvironment* exit_env = env->Copy(); - // Remove the phi from the expression stack - body_env->Pop(); - exit_env->Pop(); - body_block_ = builder_->CreateBasicBlock(body_env); - exit_block_ = builder_->CreateBasicBlock(exit_env); - - builder_->set_current_block(header_block_); - env->Pop(); - builder_->FinishCurrentBlock(builder_->New( - phi_, terminating, token, body_block_, exit_block_)); - - builder_->set_current_block(body_block_); - if (direction_ == kPreIncrement || direction_ == kPreDecrement) { - Isolate* isolate = builder_->isolate(); - HValue* one = builder_->graph()->GetConstant1(); - if (direction_ == kPreIncrement) { - increment_ = HAdd::New(isolate, zone(), context_, phi_, one); - } else { - increment_ = HSub::New(isolate, zone(), context_, phi_, one); - } - increment_->ClearFlag(HValue::kCanOverflow); - builder_->AddInstruction(increment_); - return increment_; - } else { - return phi_; - } -} - - -void HGraphBuilder::LoopBuilder::BeginBody(int drop_count) { - DCHECK(direction_ == kWhileTrue); - HEnvironment* env = builder_->environment(); - builder_->GotoNoSimulate(header_block_); - builder_->set_current_block(header_block_); - env->Drop(drop_count); -} - - -void HGraphBuilder::LoopBuilder::Break() { - if (exit_trampoline_block_ == NULL) { - // Its the first time we saw a break. - if (direction_ == kWhileTrue) { - HEnvironment* env = builder_->environment()->Copy(); - exit_trampoline_block_ = builder_->CreateBasicBlock(env); - } else { - HEnvironment* env = exit_block_->last_environment()->Copy(); - exit_trampoline_block_ = builder_->CreateBasicBlock(env); - builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_); - } - } - - builder_->GotoNoSimulate(exit_trampoline_block_); - builder_->set_current_block(NULL); -} - - -void HGraphBuilder::LoopBuilder::EndBody() { - DCHECK(!finished_); - - if (direction_ == kPostIncrement || direction_ == kPostDecrement) { - Isolate* isolate = builder_->isolate(); - if (direction_ == kPostIncrement) { - increment_ = - HAdd::New(isolate, zone(), context_, phi_, increment_amount_); - } else { - increment_ = - HSub::New(isolate, zone(), context_, phi_, increment_amount_); - } - increment_->ClearFlag(HValue::kCanOverflow); - builder_->AddInstruction(increment_); - } - - if (direction_ != kWhileTrue) { - // Push the new increment value on the expression stack to merge into - // the phi. - builder_->environment()->Push(increment_); - } - HBasicBlock* last_block = builder_->current_block(); - builder_->GotoNoSimulate(last_block, header_block_); - header_block_->loop_information()->RegisterBackEdge(last_block); - - if (exit_trampoline_block_ != NULL) { - builder_->set_current_block(exit_trampoline_block_); - } else { - builder_->set_current_block(exit_block_); - } - finished_ = true; -} - - -HGraph* HGraphBuilder::CreateGraph() { - DCHECK(!FLAG_minimal); - graph_ = new (zone()) HGraph(info_, descriptor_); - if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_); - CompilationPhase phase("H_Block building", info_); - set_current_block(graph()->entry_block()); - if (!BuildGraph()) return NULL; - graph()->FinalizeUniqueness(); - return graph_; -} - - -HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { - DCHECK(current_block() != NULL); - DCHECK(!FLAG_hydrogen_track_positions || position_.IsKnown() || - !info_->IsOptimizing()); - current_block()->AddInstruction(instr, source_position()); - if (graph()->IsInsideNoSideEffectsScope()) { - instr->SetFlag(HValue::kHasNoObservableSideEffects); - } - return instr; -} - - -void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) { - DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() || - position_.IsKnown()); - current_block()->Finish(last, source_position()); - if (last->IsReturn() || last->IsAbnormalExit()) { - set_current_block(NULL); - } -} - - -void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) { - DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() || - position_.IsKnown()); - current_block()->FinishExit(instruction, source_position()); - if (instruction->IsReturn() || instruction->IsAbnormalExit()) { - set_current_block(NULL); - } -} - - -void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) { - if (FLAG_native_code_counters && counter->Enabled()) { - HValue* reference = Add(ExternalReference(counter)); - HValue* old_value = - Add(reference, nullptr, HObjectAccess::ForCounter()); - HValue* new_value = AddUncasted(old_value, graph()->GetConstant1()); - new_value->ClearFlag(HValue::kCanOverflow); // Ignore counter overflow - Add(reference, HObjectAccess::ForCounter(), - new_value, STORE_TO_INITIALIZED_ENTRY); - } -} - - -void HGraphBuilder::AddSimulate(BailoutId id, - RemovableSimulate removable) { - DCHECK(current_block() != NULL); - DCHECK(!graph()->IsInsideNoSideEffectsScope()); - current_block()->AddNewSimulate(id, source_position(), removable); -} - - -HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) { - HBasicBlock* b = graph()->CreateBasicBlock(); - b->SetInitialEnvironment(env); - return b; -} - - -HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() { - HBasicBlock* header = graph()->CreateBasicBlock(); - HEnvironment* entry_env = environment()->CopyAsLoopHeader(header); - header->SetInitialEnvironment(entry_env); - header->AttachLoopInformation(); - return header; -} - - -HValue* HGraphBuilder::BuildGetElementsKind(HValue* object) { - HValue* map = Add(object, nullptr, HObjectAccess::ForMap()); - - HValue* bit_field2 = - Add(map, nullptr, HObjectAccess::ForMapBitField2()); - return BuildDecodeField(bit_field2); -} - - -HValue* HGraphBuilder::BuildEnumLength(HValue* map) { - NoObservableSideEffectsScope scope(this); - HValue* bit_field3 = - Add(map, nullptr, HObjectAccess::ForMapBitField3()); - return BuildDecodeField(bit_field3); -} - - -HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) { - if (obj->type().IsHeapObject()) return obj; - return Add(obj); -} - -void HGraphBuilder::FinishExitWithHardDeoptimization(DeoptimizeReason reason) { - Add(reason, Deoptimizer::EAGER); - FinishExitCurrentBlock(New()); -} - - -HValue* HGraphBuilder::BuildCheckString(HValue* string) { - if (!string->type().IsString()) { - DCHECK(!string->IsConstant() || - !HConstant::cast(string)->HasStringValue()); - BuildCheckHeapObject(string); - return Add(string, HCheckInstanceType::IS_STRING); - } - return string; -} - -HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* checked) { - if (object->type().IsJSObject()) return object; - HValue* function = checked->ActualValue(); - if (function->IsConstant() && - HConstant::cast(function)->handle(isolate())->IsJSFunction()) { - Handle f = Handle::cast( - HConstant::cast(function)->handle(isolate())); - SharedFunctionInfo* shared = f->shared(); - if (is_strict(shared->language_mode()) || shared->native()) return object; - } - return Add(object, checked); -} - - -HValue* HGraphBuilder::BuildCheckAndGrowElementsCapacity( - HValue* object, HValue* elements, ElementsKind kind, HValue* length, - HValue* capacity, HValue* key) { - HValue* max_gap = Add(static_cast(JSObject::kMaxGap)); - HValue* max_capacity = AddUncasted(capacity, max_gap); - Add(key, max_capacity); - - HValue* new_capacity = BuildNewElementsCapacity(key); - HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind, kind, - length, new_capacity); - return new_elements; -} - - -HValue* HGraphBuilder::BuildCheckForCapacityGrow( - HValue* object, - HValue* elements, - ElementsKind kind, - HValue* length, - HValue* key, - bool is_js_array, - PropertyAccessType access_type) { - IfBuilder length_checker(this); - - Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ; - length_checker.If(key, length, token); - - length_checker.Then(); - - HValue* current_capacity = AddLoadFixedArrayLength(elements); - - if (top_info()->IsStub()) { - IfBuilder capacity_checker(this); - capacity_checker.If(key, current_capacity, - Token::GTE); - capacity_checker.Then(); - HValue* new_elements = BuildCheckAndGrowElementsCapacity( - object, elements, kind, length, current_capacity, key); - environment()->Push(new_elements); - capacity_checker.Else(); - environment()->Push(elements); - capacity_checker.End(); - } else { - HValue* result = Add( - object, elements, key, current_capacity, is_js_array, kind); - environment()->Push(result); - } - - if (is_js_array) { - HValue* new_length = AddUncasted(key, graph_->GetConstant1()); - new_length->ClearFlag(HValue::kCanOverflow); - - Add(object, HObjectAccess::ForArrayLength(kind), - new_length); - } - - if (access_type == STORE && kind == FAST_SMI_ELEMENTS) { - HValue* checked_elements = environment()->Top(); - - // Write zero to ensure that the new element is initialized with some smi. - Add(checked_elements, key, graph()->GetConstant0(), nullptr, - kind); - } - - length_checker.Else(); - Add(key, length); - - environment()->Push(elements); - length_checker.End(); - - return environment()->Pop(); -} - - -HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object, - HValue* elements, - ElementsKind kind, - HValue* length) { - Factory* factory = isolate()->factory(); - - IfBuilder cow_checker(this); - - cow_checker.If(elements, factory->fixed_cow_array_map()); - cow_checker.Then(); - - HValue* capacity = AddLoadFixedArrayLength(elements); - - HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind, - kind, length, capacity); - - environment()->Push(new_elements); - - cow_checker.Else(); - - environment()->Push(elements); - - cow_checker.End(); - - return environment()->Pop(); -} - -HValue* HGraphBuilder::BuildNumberToString(HValue* object, AstType* type) { - NoObservableSideEffectsScope scope(this); - - // Convert constant numbers at compile time. - if (object->IsConstant() && HConstant::cast(object)->HasNumberValue()) { - Handle number = HConstant::cast(object)->handle(isolate()); - Handle result = isolate()->factory()->NumberToString(number); - return Add(result); - } - - // Create a joinable continuation. - HIfContinuation found(graph()->CreateBasicBlock(), - graph()->CreateBasicBlock()); - - // Load the number string cache. - HValue* number_string_cache = - Add(Heap::kNumberStringCacheRootIndex); - - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - HValue* mask = AddLoadFixedArrayLength(number_string_cache); - mask->set_type(HType::Smi()); - mask = AddUncasted(mask, graph()->GetConstant1()); - mask = AddUncasted(mask, graph()->GetConstant1()); - - // Check whether object is a smi. - IfBuilder if_objectissmi(this); - if_objectissmi.If(object); - if_objectissmi.Then(); - { - // Compute hash for smi similar to smi_get_hash(). - HValue* hash = AddUncasted(Token::BIT_AND, object, mask); - - // Load the key. - HValue* key_index = AddUncasted(hash, graph()->GetConstant1()); - HValue* key = Add(number_string_cache, key_index, nullptr, - nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE); - - // Check if object == key. - IfBuilder if_objectiskey(this); - if_objectiskey.If(object, key); - if_objectiskey.Then(); - { - // Make the key_index available. - Push(key_index); - } - if_objectiskey.JoinContinuation(&found); - } - if_objectissmi.Else(); - { - if (type->Is(AstType::SignedSmall())) { - if_objectissmi.Deopt(DeoptimizeReason::kExpectedSmi); - } else { - // Check if the object is a heap number. - IfBuilder if_objectisnumber(this); - HValue* objectisnumber = if_objectisnumber.If( - object, isolate()->factory()->heap_number_map()); - if_objectisnumber.Then(); - { - // Compute hash for heap number similar to double_get_hash(). - HValue* low = Add( - object, objectisnumber, - HObjectAccess::ForHeapNumberValueLowestBits()); - HValue* high = Add( - object, objectisnumber, - HObjectAccess::ForHeapNumberValueHighestBits()); - HValue* hash = AddUncasted(Token::BIT_XOR, low, high); - hash = AddUncasted(Token::BIT_AND, hash, mask); - - // Load the key. - HValue* key_index = AddUncasted(hash, graph()->GetConstant1()); - HValue* key = - Add(number_string_cache, key_index, nullptr, nullptr, - FAST_ELEMENTS, ALLOW_RETURN_HOLE); - - // Check if the key is a heap number and compare it with the object. - IfBuilder if_keyisnotsmi(this); - HValue* keyisnotsmi = if_keyisnotsmi.IfNot(key); - if_keyisnotsmi.Then(); - { - IfBuilder if_keyisheapnumber(this); - if_keyisheapnumber.If( - key, isolate()->factory()->heap_number_map()); - if_keyisheapnumber.Then(); - { - // Check if values of key and object match. - IfBuilder if_keyeqobject(this); - if_keyeqobject.If( - Add(key, keyisnotsmi, - HObjectAccess::ForHeapNumberValue()), - Add(object, objectisnumber, - HObjectAccess::ForHeapNumberValue()), - Token::EQ); - if_keyeqobject.Then(); - { - // Make the key_index available. - Push(key_index); - } - if_keyeqobject.JoinContinuation(&found); - } - if_keyisheapnumber.JoinContinuation(&found); - } - if_keyisnotsmi.JoinContinuation(&found); - } - if_objectisnumber.Else(); - { - if (type->Is(AstType::Number())) { - if_objectisnumber.Deopt(DeoptimizeReason::kExpectedHeapNumber); - } - } - if_objectisnumber.JoinContinuation(&found); - } - } - if_objectissmi.JoinContinuation(&found); - - // Check for cache hit. - IfBuilder if_found(this, &found); - if_found.Then(); - { - // Count number to string operation in native code. - AddIncrementCounter(isolate()->counters()->number_to_string_native()); - - // Load the value in case of cache hit. - HValue* key_index = Pop(); - HValue* value_index = AddUncasted(key_index, graph()->GetConstant1()); - Push(Add(number_string_cache, value_index, nullptr, nullptr, - FAST_ELEMENTS, ALLOW_RETURN_HOLE)); - } - if_found.Else(); - { - // Cache miss, fallback to runtime. - Add(object); - Push(Add( - Runtime::FunctionForId(Runtime::kNumberToStringSkipCache), - 1)); - } - if_found.End(); - - return Pop(); -} - -HValue* HGraphBuilder::BuildToNumber(HValue* input) { - if (input->type().IsTaggedNumber() || - input->representation().IsSpecialization()) { - return input; - } - Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber); - HValue* stub = Add(callable.code()); - HValue* values[] = {input}; - HCallWithDescriptor* instr = Add( - stub, 0, callable.descriptor(), ArrayVector(values)); - instr->set_type(HType::TaggedNumber()); - return instr; -} - - -HValue* HGraphBuilder::BuildToObject(HValue* receiver) { - NoObservableSideEffectsScope scope(this); - - // Create a joinable continuation. - HIfContinuation wrap(graph()->CreateBasicBlock(), - graph()->CreateBasicBlock()); - - // Determine the proper global constructor function required to wrap - // {receiver} into a JSValue, unless {receiver} is already a {JSReceiver}, in - // which case we just return it. Deopts to Runtime::kToObject if {receiver} - // is undefined or null. - IfBuilder receiver_is_smi(this); - receiver_is_smi.If(receiver); - receiver_is_smi.Then(); - { - // Use global Number function. - Push(Add(Context::NUMBER_FUNCTION_INDEX)); - } - receiver_is_smi.Else(); - { - // Determine {receiver} map and instance type. - HValue* receiver_map = - Add(receiver, nullptr, HObjectAccess::ForMap()); - HValue* receiver_instance_type = Add( - receiver_map, nullptr, HObjectAccess::ForMapInstanceType()); - - // First check whether {receiver} is already a spec object (fast case). - IfBuilder receiver_is_not_spec_object(this); - receiver_is_not_spec_object.If( - receiver_instance_type, Add(FIRST_JS_RECEIVER_TYPE), - Token::LT); - receiver_is_not_spec_object.Then(); - { - // Load the constructor function index from the {receiver} map. - HValue* constructor_function_index = Add( - receiver_map, nullptr, - HObjectAccess::ForMapInObjectPropertiesOrConstructorFunctionIndex()); - - // Check if {receiver} has a constructor (null and undefined have no - // constructors, so we deoptimize to the runtime to throw an exception). - IfBuilder constructor_function_index_is_invalid(this); - constructor_function_index_is_invalid.If( - constructor_function_index, - Add(Map::kNoConstructorFunctionIndex), Token::EQ); - constructor_function_index_is_invalid.ThenDeopt( - DeoptimizeReason::kUndefinedOrNullInToObject); - constructor_function_index_is_invalid.End(); - - // Use the global constructor function. - Push(constructor_function_index); - } - receiver_is_not_spec_object.JoinContinuation(&wrap); - } - receiver_is_smi.JoinContinuation(&wrap); - - // Wrap the receiver if necessary. - IfBuilder if_wrap(this, &wrap); - if_wrap.Then(); - { - // Grab the constructor function index. - HValue* constructor_index = Pop(); - - // Load native context. - HValue* native_context = BuildGetNativeContext(); - - // Determine the initial map for the global constructor. - HValue* constructor = Add(native_context, constructor_index, - nullptr, nullptr, FAST_ELEMENTS); - HValue* constructor_initial_map = Add( - constructor, nullptr, HObjectAccess::ForPrototypeOrInitialMap()); - // Allocate and initialize a JSValue wrapper. - HValue* value = - BuildAllocate(Add(JSValue::kSize), HType::JSObject(), - JS_VALUE_TYPE, HAllocationMode()); - Add(value, HObjectAccess::ForMap(), - constructor_initial_map); - HValue* empty_fixed_array = Add(Heap::kEmptyFixedArrayRootIndex); - Add(value, HObjectAccess::ForPropertiesPointer(), - empty_fixed_array); - Add(value, HObjectAccess::ForElementsPointer(), - empty_fixed_array); - Add(value, HObjectAccess::ForObservableJSObjectOffset( - JSValue::kValueOffset), - receiver); - Push(value); - } - if_wrap.Else(); - { Push(receiver); } - if_wrap.End(); - return Pop(); -} - - -HAllocate* HGraphBuilder::BuildAllocate( - HValue* object_size, - HType type, - InstanceType instance_type, - HAllocationMode allocation_mode) { - // Compute the effective allocation size. - HValue* size = object_size; - if (allocation_mode.CreateAllocationMementos()) { - size = AddUncasted(size, Add(AllocationMemento::kSize)); - size->ClearFlag(HValue::kCanOverflow); - } - - // Perform the actual allocation. - HAllocate* object = Add( - size, type, allocation_mode.GetPretenureMode(), instance_type, - graph()->GetConstant0(), allocation_mode.feedback_site()); - - // Setup the allocation memento. - if (allocation_mode.CreateAllocationMementos()) { - BuildCreateAllocationMemento( - object, object_size, allocation_mode.current_site()); - } - - return object; -} - - -HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length, - HValue* right_length) { - // Compute the combined string length and check against max string length. - HValue* length = AddUncasted(left_length, right_length); - // Check that length <= kMaxLength <=> length < MaxLength + 1. - HValue* max_length = Add(String::kMaxLength + 1); - if (top_info()->IsStub() || !isolate()->IsStringLengthOverflowIntact()) { - // This is a mitigation for crbug.com/627934; the real fix - // will be to migrate the StringAddStub to TurboFan one day. - IfBuilder if_invalid(this); - if_invalid.If(length, max_length, Token::GT); - if_invalid.Then(); - { - Add( - Runtime::FunctionForId(Runtime::kThrowInvalidStringLength), 0); - } - if_invalid.End(); - } else { - graph()->MarkDependsOnStringLengthOverflow(); - Add(length, max_length); - } - return length; -} - - -HValue* HGraphBuilder::BuildCreateConsString( - HValue* length, - HValue* left, - HValue* right, - HAllocationMode allocation_mode) { - // Determine the string instance types. - HInstruction* left_instance_type = AddLoadStringInstanceType(left); - HInstruction* right_instance_type = AddLoadStringInstanceType(right); - - // Allocate the cons string object. HAllocate does not care whether we - // pass CONS_STRING_TYPE or CONS_ONE_BYTE_STRING_TYPE here, so we just use - // CONS_STRING_TYPE here. Below we decide whether the cons string is - // one-byte or two-byte and set the appropriate map. - DCHECK(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE, - CONS_ONE_BYTE_STRING_TYPE)); - HAllocate* result = BuildAllocate(Add(ConsString::kSize), - HType::String(), CONS_STRING_TYPE, - allocation_mode); - - // Compute intersection and difference of instance types. - HValue* anded_instance_types = AddUncasted( - Token::BIT_AND, left_instance_type, right_instance_type); - HValue* xored_instance_types = AddUncasted( - Token::BIT_XOR, left_instance_type, right_instance_type); - - // We create a one-byte cons string if - // 1. both strings are one-byte, or - // 2. at least one of the strings is two-byte, but happens to contain only - // one-byte characters. - // To do this, we check - // 1. if both strings are one-byte, or if the one-byte data hint is set in - // both strings, or - // 2. if one of the strings has the one-byte data hint set and the other - // string is one-byte. - IfBuilder if_onebyte(this); - STATIC_ASSERT(kOneByteStringTag != 0); - STATIC_ASSERT(kOneByteDataHintMask != 0); - if_onebyte.If( - AddUncasted( - Token::BIT_AND, anded_instance_types, - Add(static_cast( - kStringEncodingMask | kOneByteDataHintMask))), - graph()->GetConstant0(), Token::NE); - if_onebyte.Or(); - STATIC_ASSERT(kOneByteStringTag != 0 && - kOneByteDataHintTag != 0 && - kOneByteDataHintTag != kOneByteStringTag); - if_onebyte.If( - AddUncasted( - Token::BIT_AND, xored_instance_types, - Add(static_cast( - kOneByteStringTag | kOneByteDataHintTag))), - Add(static_cast( - kOneByteStringTag | kOneByteDataHintTag)), Token::EQ); - if_onebyte.Then(); - { - // We can safely skip the write barrier for storing the map here. - Add( - result, HObjectAccess::ForMap(), - Add(isolate()->factory()->cons_one_byte_string_map())); - } - if_onebyte.Else(); - { - // We can safely skip the write barrier for storing the map here. - Add( - result, HObjectAccess::ForMap(), - Add(isolate()->factory()->cons_string_map())); - } - if_onebyte.End(); - - // Initialize the cons string fields. - Add(result, HObjectAccess::ForStringHashField(), - Add(String::kEmptyHashField)); - Add(result, HObjectAccess::ForStringLength(), length); - Add(result, HObjectAccess::ForConsStringFirst(), left); - Add(result, HObjectAccess::ForConsStringSecond(), right); - - // Count the native string addition. - AddIncrementCounter(isolate()->counters()->string_add_native()); - - return result; -} - - -void HGraphBuilder::BuildCopySeqStringChars(HValue* src, - HValue* src_offset, - String::Encoding src_encoding, - HValue* dst, - HValue* dst_offset, - String::Encoding dst_encoding, - HValue* length) { - DCHECK(dst_encoding != String::ONE_BYTE_ENCODING || - src_encoding == String::ONE_BYTE_ENCODING); - LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement); - HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT); - { - HValue* src_index = AddUncasted(src_offset, index); - HValue* value = - AddUncasted(src_encoding, src, src_index); - HValue* dst_index = AddUncasted(dst_offset, index); - Add(dst_encoding, dst, dst_index, value); - } - loop.EndBody(); -} - - -HValue* HGraphBuilder::BuildObjectSizeAlignment( - HValue* unaligned_size, int header_size) { - DCHECK((header_size & kObjectAlignmentMask) == 0); - HValue* size = AddUncasted( - unaligned_size, Add(static_cast( - header_size + kObjectAlignmentMask))); - size->ClearFlag(HValue::kCanOverflow); - return AddUncasted( - Token::BIT_AND, size, Add(static_cast( - ~kObjectAlignmentMask))); -} - - -HValue* HGraphBuilder::BuildUncheckedStringAdd( - HValue* left, - HValue* right, - HAllocationMode allocation_mode) { - // Determine the string lengths. - HValue* left_length = AddLoadStringLength(left); - HValue* right_length = AddLoadStringLength(right); - - // Compute the combined string length. - HValue* length = BuildAddStringLengths(left_length, right_length); - - // Do some manual constant folding here. - if (left_length->IsConstant()) { - HConstant* c_left_length = HConstant::cast(left_length); - DCHECK_NE(0, c_left_length->Integer32Value()); - if (c_left_length->Integer32Value() + 1 >= ConsString::kMinLength) { - // The right string contains at least one character. - return BuildCreateConsString(length, left, right, allocation_mode); - } - } else if (right_length->IsConstant()) { - HConstant* c_right_length = HConstant::cast(right_length); - DCHECK_NE(0, c_right_length->Integer32Value()); - if (c_right_length->Integer32Value() + 1 >= ConsString::kMinLength) { - // The left string contains at least one character. - return BuildCreateConsString(length, left, right, allocation_mode); - } - } - - // Check if we should create a cons string. - IfBuilder if_createcons(this); - if_createcons.If( - length, Add(ConsString::kMinLength), Token::GTE); - if_createcons.And(); - if_createcons.If( - length, Add(ConsString::kMaxLength), Token::LTE); - if_createcons.Then(); - { - // Create a cons string. - Push(BuildCreateConsString(length, left, right, allocation_mode)); - } - if_createcons.Else(); - { - // Determine the string instance types. - HValue* left_instance_type = AddLoadStringInstanceType(left); - HValue* right_instance_type = AddLoadStringInstanceType(right); - - // Compute union and difference of instance types. - HValue* ored_instance_types = AddUncasted( - Token::BIT_OR, left_instance_type, right_instance_type); - HValue* xored_instance_types = AddUncasted( - Token::BIT_XOR, left_instance_type, right_instance_type); - - // Check if both strings have the same encoding and both are - // sequential. - IfBuilder if_sameencodingandsequential(this); - if_sameencodingandsequential.If( - AddUncasted( - Token::BIT_AND, xored_instance_types, - Add(static_cast(kStringEncodingMask))), - graph()->GetConstant0(), Token::EQ); - if_sameencodingandsequential.And(); - STATIC_ASSERT(kSeqStringTag == 0); - if_sameencodingandsequential.If( - AddUncasted( - Token::BIT_AND, ored_instance_types, - Add(static_cast(kStringRepresentationMask))), - graph()->GetConstant0(), Token::EQ); - if_sameencodingandsequential.Then(); - { - HConstant* string_map = - Add(isolate()->factory()->string_map()); - HConstant* one_byte_string_map = - Add(isolate()->factory()->one_byte_string_map()); - - // Determine map and size depending on whether result is one-byte string. - IfBuilder if_onebyte(this); - STATIC_ASSERT(kOneByteStringTag != 0); - if_onebyte.If( - AddUncasted( - Token::BIT_AND, ored_instance_types, - Add(static_cast(kStringEncodingMask))), - graph()->GetConstant0(), Token::NE); - if_onebyte.Then(); - { - // Allocate sequential one-byte string object. - Push(length); - Push(one_byte_string_map); - } - if_onebyte.Else(); - { - // Allocate sequential two-byte string object. - HValue* size = AddUncasted(length, graph()->GetConstant1()); - size->ClearFlag(HValue::kCanOverflow); - size->SetFlag(HValue::kUint32); - Push(size); - Push(string_map); - } - if_onebyte.End(); - HValue* map = Pop(); - - // Calculate the number of bytes needed for the characters in the - // string while observing object alignment. - STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0); - HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize); - - IfBuilder if_size(this); - if_size.If( - size, Add(kMaxRegularHeapObjectSize), Token::LT); - if_size.Then(); - { - // Allocate the string object. HAllocate does not care whether we pass - // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE. - HAllocate* result = - BuildAllocate(size, HType::String(), STRING_TYPE, allocation_mode); - Add(result, HObjectAccess::ForMap(), map); - - // Initialize the string fields. - Add(result, HObjectAccess::ForStringHashField(), - Add(String::kEmptyHashField)); - Add(result, HObjectAccess::ForStringLength(), length); - - // Copy characters to the result string. - IfBuilder if_twobyte(this); - if_twobyte.If(map, string_map); - if_twobyte.Then(); - { - // Copy characters from the left string. - BuildCopySeqStringChars( - left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result, - graph()->GetConstant0(), String::TWO_BYTE_ENCODING, left_length); - - // Copy characters from the right string. - BuildCopySeqStringChars( - right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result, - left_length, String::TWO_BYTE_ENCODING, right_length); - } - if_twobyte.Else(); - { - // Copy characters from the left string. - BuildCopySeqStringChars( - left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result, - graph()->GetConstant0(), String::ONE_BYTE_ENCODING, left_length); - - // Copy characters from the right string. - BuildCopySeqStringChars( - right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result, - left_length, String::ONE_BYTE_ENCODING, right_length); - } - if_twobyte.End(); - - // Count the native string addition. - AddIncrementCounter(isolate()->counters()->string_add_native()); - - // Return the sequential string. - Push(result); - } - if_size.Else(); - { - // Fallback to the runtime to add the two strings. The string has to be - // allocated in LO space. - Add(left, right); - Push(Add(Runtime::FunctionForId(Runtime::kStringAdd), 2)); - } - if_size.End(); - } - if_sameencodingandsequential.Else(); - { - // Fallback to the runtime to add the two strings. - Add(left, right); - Push(Add(Runtime::FunctionForId(Runtime::kStringAdd), 2)); - } - if_sameencodingandsequential.End(); - } - if_createcons.End(); - - return Pop(); -} - - -HValue* HGraphBuilder::BuildStringAdd( - HValue* left, - HValue* right, - HAllocationMode allocation_mode) { - NoObservableSideEffectsScope no_effects(this); - - // Determine string lengths. - HValue* left_length = AddLoadStringLength(left); - HValue* right_length = AddLoadStringLength(right); - - // Check if left string is empty. - IfBuilder if_leftempty(this); - if_leftempty.If( - left_length, graph()->GetConstant0(), Token::EQ); - if_leftempty.Then(); - { - // Count the native string addition. - AddIncrementCounter(isolate()->counters()->string_add_native()); - - // Just return the right string. - Push(right); - } - if_leftempty.Else(); - { - // Check if right string is empty. - IfBuilder if_rightempty(this); - if_rightempty.If( - right_length, graph()->GetConstant0(), Token::EQ); - if_rightempty.Then(); - { - // Count the native string addition. - AddIncrementCounter(isolate()->counters()->string_add_native()); - - // Just return the left string. - Push(left); - } - if_rightempty.Else(); - { - // Add the two non-empty strings. - Push(BuildUncheckedStringAdd(left, right, allocation_mode)); - } - if_rightempty.End(); - } - if_leftempty.End(); - - return Pop(); -} - - -HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( - HValue* checked_object, - HValue* key, - HValue* val, - bool is_js_array, - ElementsKind elements_kind, - PropertyAccessType access_type, - LoadKeyedHoleMode load_mode, - KeyedAccessStoreMode store_mode) { - DCHECK(top_info()->IsStub() || checked_object->IsCompareMap() || - checked_object->IsCheckMaps()); - DCHECK(!IsFixedTypedArrayElementsKind(elements_kind) || !is_js_array); - // No GVNFlag is necessary for ElementsKind if there is an explicit dependency - // on a HElementsTransition instruction. The flag can also be removed if the - // map to check has FAST_HOLEY_ELEMENTS, since there can be no further - // ElementsKind transitions. Finally, the dependency can be removed for stores - // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the - // generated store code. - if ((elements_kind == FAST_HOLEY_ELEMENTS) || - (elements_kind == FAST_ELEMENTS && access_type == STORE)) { - checked_object->ClearDependsOnFlag(kElementsKind); - } - - bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind); - bool fast_elements = IsFastObjectElementsKind(elements_kind); - HValue* elements = AddLoadElements(checked_object); - if (access_type == STORE && (fast_elements || fast_smi_only_elements) && - store_mode != STORE_NO_TRANSITION_HANDLE_COW) { - HCheckMaps* check_cow_map = Add( - elements, isolate()->factory()->fixed_array_map()); - check_cow_map->ClearDependsOnFlag(kElementsKind); - } - HInstruction* length = NULL; - if (is_js_array) { - length = Add( - checked_object->ActualValue(), checked_object, - HObjectAccess::ForArrayLength(elements_kind)); - } else { - length = AddLoadFixedArrayLength(elements); - } - length->set_type(HType::Smi()); - HValue* checked_key = NULL; - if (IsFixedTypedArrayElementsKind(elements_kind)) { - checked_object = Add(checked_object); - - HValue* external_pointer = Add( - elements, nullptr, - HObjectAccess::ForFixedTypedArrayBaseExternalPointer()); - HValue* base_pointer = Add( - elements, nullptr, HObjectAccess::ForFixedTypedArrayBaseBasePointer()); - HValue* backing_store = AddUncasted(external_pointer, base_pointer, - AddOfExternalAndTagged); - - if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) { - NoObservableSideEffectsScope no_effects(this); - IfBuilder length_checker(this); - length_checker.If(key, length, Token::LT); - length_checker.Then(); - IfBuilder negative_checker(this); - HValue* bounds_check = negative_checker.If( - key, graph()->GetConstant0(), Token::GTE); - negative_checker.Then(); - HInstruction* result = AddElementAccess( - backing_store, key, val, bounds_check, checked_object->ActualValue(), - elements_kind, access_type); - negative_checker.ElseDeopt(DeoptimizeReason::kNegativeKeyEncountered); - negative_checker.End(); - length_checker.End(); - return result; - } else { - DCHECK(store_mode == STANDARD_STORE); - checked_key = Add(key, length); - return AddElementAccess(backing_store, checked_key, val, checked_object, - checked_object->ActualValue(), elements_kind, - access_type); - } - } - DCHECK(fast_smi_only_elements || - fast_elements || - IsFastDoubleElementsKind(elements_kind)); - - // In case val is stored into a fast smi array, assure that the value is a smi - // before manipulating the backing store. Otherwise the actual store may - // deopt, leaving the backing store in an invalid state. - if (access_type == STORE && IsFastSmiElementsKind(elements_kind) && - !val->type().IsSmi()) { - val = AddUncasted(val, Representation::Smi()); - } - - if (IsGrowStoreMode(store_mode)) { - NoObservableSideEffectsScope no_effects(this); - Representation representation = HStoreKeyed::RequiredValueRepresentation( - elements_kind, STORE_TO_INITIALIZED_ENTRY); - val = AddUncasted(val, representation); - elements = BuildCheckForCapacityGrow(checked_object, elements, - elements_kind, length, key, - is_js_array, access_type); - checked_key = key; - } else { - checked_key = Add(key, length); - - if (access_type == STORE && (fast_elements || fast_smi_only_elements)) { - if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) { - NoObservableSideEffectsScope no_effects(this); - elements = BuildCopyElementsOnWrite(checked_object, elements, - elements_kind, length); - } else { - HCheckMaps* check_cow_map = Add( - elements, isolate()->factory()->fixed_array_map()); - check_cow_map->ClearDependsOnFlag(kElementsKind); - } - } - } - return AddElementAccess(elements, checked_key, val, checked_object, nullptr, - elements_kind, access_type, load_mode); -} - - -HValue* HGraphBuilder::BuildCalculateElementsSize(ElementsKind kind, - HValue* capacity) { - int elements_size = IsFastDoubleElementsKind(kind) - ? kDoubleSize - : kPointerSize; - - HConstant* elements_size_value = Add(elements_size); - HInstruction* mul = - HMul::NewImul(isolate(), zone(), context(), capacity->ActualValue(), - elements_size_value); - AddInstruction(mul); - mul->ClearFlag(HValue::kCanOverflow); - - STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize); - - HConstant* header_size = Add(FixedArray::kHeaderSize); - HValue* total_size = AddUncasted(mul, header_size); - total_size->ClearFlag(HValue::kCanOverflow); - return total_size; -} - - -HAllocate* HGraphBuilder::AllocateJSArrayObject(AllocationSiteMode mode) { - int base_size = JSArray::kSize; - if (mode == TRACK_ALLOCATION_SITE) { - base_size += AllocationMemento::kSize; - } - HConstant* size_in_bytes = Add(base_size); - return Add(size_in_bytes, HType::JSArray(), NOT_TENURED, - JS_OBJECT_TYPE, graph()->GetConstant0()); -} - - -HConstant* HGraphBuilder::EstablishElementsAllocationSize( - ElementsKind kind, - int capacity) { - int base_size = IsFastDoubleElementsKind(kind) - ? FixedDoubleArray::SizeFor(capacity) - : FixedArray::SizeFor(capacity); - - return Add(base_size); -} - - -HAllocate* HGraphBuilder::BuildAllocateElements(ElementsKind kind, - HValue* size_in_bytes) { - InstanceType instance_type = IsFastDoubleElementsKind(kind) - ? FIXED_DOUBLE_ARRAY_TYPE - : FIXED_ARRAY_TYPE; - - return Add(size_in_bytes, HType::HeapObject(), NOT_TENURED, - instance_type, graph()->GetConstant0()); -} - - -void HGraphBuilder::BuildInitializeElementsHeader(HValue* elements, - ElementsKind kind, - HValue* capacity) { - Factory* factory = isolate()->factory(); - Handle map = IsFastDoubleElementsKind(kind) - ? factory->fixed_double_array_map() - : factory->fixed_array_map(); - - Add(elements, HObjectAccess::ForMap(), Add(map)); - Add(elements, HObjectAccess::ForFixedArrayLength(), - capacity); -} - - -HValue* HGraphBuilder::BuildAllocateAndInitializeArray(ElementsKind kind, - HValue* capacity) { - // The HForceRepresentation is to prevent possible deopt on int-smi - // conversion after allocation but before the new object fields are set. - capacity = AddUncasted(capacity, Representation::Smi()); - HValue* size_in_bytes = BuildCalculateElementsSize(kind, capacity); - HValue* new_array = BuildAllocateElements(kind, size_in_bytes); - BuildInitializeElementsHeader(new_array, kind, capacity); - return new_array; -} - - -void HGraphBuilder::BuildJSArrayHeader(HValue* array, - HValue* array_map, - HValue* elements, - AllocationSiteMode mode, - ElementsKind elements_kind, - HValue* allocation_site_payload, - HValue* length_field) { - Add(array, HObjectAccess::ForMap(), array_map); - - HValue* empty_fixed_array = Add(Heap::kEmptyFixedArrayRootIndex); - - Add( - array, HObjectAccess::ForPropertiesPointer(), empty_fixed_array); - - Add(array, HObjectAccess::ForElementsPointer(), - elements != nullptr ? elements : empty_fixed_array); - - Add( - array, HObjectAccess::ForArrayLength(elements_kind), length_field); - - if (mode == TRACK_ALLOCATION_SITE) { - BuildCreateAllocationMemento( - array, Add(JSArray::kSize), allocation_site_payload); - } -} - - -HInstruction* HGraphBuilder::AddElementAccess( - HValue* elements, HValue* checked_key, HValue* val, HValue* dependency, - HValue* backing_store_owner, ElementsKind elements_kind, - PropertyAccessType access_type, LoadKeyedHoleMode load_mode) { - if (access_type == STORE) { - DCHECK(val != NULL); - if (elements_kind == UINT8_CLAMPED_ELEMENTS) { - val = Add(val); - } - return Add(elements, checked_key, val, backing_store_owner, - elements_kind, STORE_TO_INITIALIZED_ENTRY); - } - - DCHECK(access_type == LOAD); - DCHECK(val == NULL); - HLoadKeyed* load = - Add(elements, checked_key, dependency, backing_store_owner, - elements_kind, load_mode); - if (elements_kind == UINT32_ELEMENTS) { - graph()->RecordUint32Instruction(load); - } - return load; -} - - -HLoadNamedField* HGraphBuilder::AddLoadMap(HValue* object, - HValue* dependency) { - return Add(object, dependency, HObjectAccess::ForMap()); -} - - -HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object, - HValue* dependency) { - return Add( - object, dependency, HObjectAccess::ForElementsPointer()); -} - - -HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength( - HValue* array, - HValue* dependency) { - return Add( - array, dependency, HObjectAccess::ForFixedArrayLength()); -} - - -HLoadNamedField* HGraphBuilder::AddLoadArrayLength(HValue* array, - ElementsKind kind, - HValue* dependency) { - return Add( - array, dependency, HObjectAccess::ForArrayLength(kind)); -} - - -HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) { - HValue* half_old_capacity = AddUncasted(old_capacity, - graph_->GetConstant1()); - - HValue* new_capacity = AddUncasted(half_old_capacity, old_capacity); - new_capacity->ClearFlag(HValue::kCanOverflow); - - HValue* min_growth = Add(16); - - new_capacity = AddUncasted(new_capacity, min_growth); - new_capacity->ClearFlag(HValue::kCanOverflow); - - return new_capacity; -} - - -HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object, - HValue* elements, - ElementsKind kind, - ElementsKind new_kind, - HValue* length, - HValue* new_capacity) { - Add( - new_capacity, - Add((kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >> - ElementsKindToShiftSize(new_kind))); - - HValue* new_elements = - BuildAllocateAndInitializeArray(new_kind, new_capacity); - - BuildCopyElements(elements, kind, new_elements, - new_kind, length, new_capacity); - - Add(object, HObjectAccess::ForElementsPointer(), - new_elements); - - return new_elements; -} - - -void HGraphBuilder::BuildFillElementsWithValue(HValue* elements, - ElementsKind elements_kind, - HValue* from, - HValue* to, - HValue* value) { - if (to == NULL) { - to = AddLoadFixedArrayLength(elements); - } - - // Special loop unfolding case - STATIC_ASSERT(JSArray::kPreallocatedArrayElements <= - kElementLoopUnrollThreshold); - int initial_capacity = -1; - if (from->IsInteger32Constant() && to->IsInteger32Constant()) { - int constant_from = from->GetInteger32Constant(); - int constant_to = to->GetInteger32Constant(); - - if (constant_from == 0 && constant_to <= kElementLoopUnrollThreshold) { - initial_capacity = constant_to; - } - } - - if (initial_capacity >= 0) { - for (int i = 0; i < initial_capacity; i++) { - HInstruction* key = Add(i); - Add(elements, key, value, nullptr, elements_kind); - } - } else { - // Carefully loop backwards so that the "from" remains live through the loop - // rather than the to. This often corresponds to keeping length live rather - // then capacity, which helps register allocation, since length is used more - // other than capacity after filling with holes. - LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement); - - HValue* key = builder.BeginBody(to, from, Token::GT); - - HValue* adjusted_key = AddUncasted(key, graph()->GetConstant1()); - adjusted_key->ClearFlag(HValue::kCanOverflow); - - Add(elements, adjusted_key, value, nullptr, elements_kind); - - builder.EndBody(); - } -} - - -void HGraphBuilder::BuildFillElementsWithHole(HValue* elements, - ElementsKind elements_kind, - HValue* from, - HValue* to) { - // Fast elements kinds need to be initialized in case statements below cause a - // garbage collection. - - HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind) - ? graph()->GetConstantHole() - : Add(HConstant::kHoleNaN); - - // Since we're about to store a hole value, the store instruction below must - // assume an elements kind that supports heap object values. - if (IsFastSmiOrObjectElementsKind(elements_kind)) { - elements_kind = FAST_HOLEY_ELEMENTS; - } - - BuildFillElementsWithValue(elements, elements_kind, from, to, hole); -} - - -void HGraphBuilder::BuildCopyProperties(HValue* from_properties, - HValue* to_properties, HValue* length, - HValue* capacity) { - ElementsKind kind = FAST_ELEMENTS; - - BuildFillElementsWithValue(to_properties, kind, length, capacity, - graph()->GetConstantUndefined()); - - LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement); - - HValue* key = builder.BeginBody(length, graph()->GetConstant0(), Token::GT); - - key = AddUncasted(key, graph()->GetConstant1()); - key->ClearFlag(HValue::kCanOverflow); - - HValue* element = - Add(from_properties, key, nullptr, nullptr, kind); - - Add(to_properties, key, element, nullptr, kind); - - builder.EndBody(); -} - - -void HGraphBuilder::BuildCopyElements(HValue* from_elements, - ElementsKind from_elements_kind, - HValue* to_elements, - ElementsKind to_elements_kind, - HValue* length, - HValue* capacity) { - int constant_capacity = -1; - if (capacity != NULL && - capacity->IsConstant() && - HConstant::cast(capacity)->HasInteger32Value()) { - int constant_candidate = HConstant::cast(capacity)->Integer32Value(); - if (constant_candidate <= kElementLoopUnrollThreshold) { - constant_capacity = constant_candidate; - } - } - - bool pre_fill_with_holes = - IsFastDoubleElementsKind(from_elements_kind) && - IsFastObjectElementsKind(to_elements_kind); - if (pre_fill_with_holes) { - // If the copy might trigger a GC, make sure that the FixedArray is - // pre-initialized with holes to make sure that it's always in a - // consistent state. - BuildFillElementsWithHole(to_elements, to_elements_kind, - graph()->GetConstant0(), NULL); - } - - if (constant_capacity != -1) { - // Unroll the loop for small elements kinds. - for (int i = 0; i < constant_capacity; i++) { - HValue* key_constant = Add(i); - HInstruction* value = Add( - from_elements, key_constant, nullptr, nullptr, from_elements_kind); - Add(to_elements, key_constant, value, nullptr, - to_elements_kind); - } - } else { - if (!pre_fill_with_holes && - (capacity == NULL || !length->Equals(capacity))) { - BuildFillElementsWithHole(to_elements, to_elements_kind, - length, NULL); - } - - LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement); - - HValue* key = builder.BeginBody(length, graph()->GetConstant0(), - Token::GT); - - key = AddUncasted(key, graph()->GetConstant1()); - key->ClearFlag(HValue::kCanOverflow); - - HValue* element = Add(from_elements, key, nullptr, nullptr, - from_elements_kind, ALLOW_RETURN_HOLE); - - ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) && - IsFastSmiElementsKind(to_elements_kind)) - ? FAST_HOLEY_ELEMENTS : to_elements_kind; - - if (IsHoleyElementsKind(from_elements_kind) && - from_elements_kind != to_elements_kind) { - IfBuilder if_hole(this); - if_hole.If(element); - if_hole.Then(); - HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind) - ? Add(HConstant::kHoleNaN) - : graph()->GetConstantHole(); - Add(to_elements, key, hole_constant, nullptr, kind); - if_hole.Else(); - HStoreKeyed* store = - Add(to_elements, key, element, nullptr, kind); - store->SetFlag(HValue::kTruncatingToNumber); - if_hole.End(); - } else { - HStoreKeyed* store = - Add(to_elements, key, element, nullptr, kind); - store->SetFlag(HValue::kTruncatingToNumber); - } - - builder.EndBody(); - } - - Counters* counters = isolate()->counters(); - AddIncrementCounter(counters->inlined_copied_elements()); -} - -void HGraphBuilder::BuildCreateAllocationMemento( - HValue* previous_object, - HValue* previous_object_size, - HValue* allocation_site) { - DCHECK(allocation_site != NULL); - HInnerAllocatedObject* allocation_memento = Add( - previous_object, previous_object_size, HType::HeapObject()); - AddStoreMapConstant( - allocation_memento, isolate()->factory()->allocation_memento_map()); - Add( - allocation_memento, - HObjectAccess::ForAllocationMementoSite(), - allocation_site); - if (FLAG_allocation_site_pretenuring) { - HValue* memento_create_count = - Add(allocation_site, nullptr, - HObjectAccess::ForAllocationSiteOffset( - AllocationSite::kPretenureCreateCountOffset)); - memento_create_count = AddUncasted( - memento_create_count, graph()->GetConstant1()); - // This smi value is reset to zero after every gc, overflow isn't a problem - // since the counter is bounded by the new space size. - memento_create_count->ClearFlag(HValue::kCanOverflow); - Add( - allocation_site, HObjectAccess::ForAllocationSiteOffset( - AllocationSite::kPretenureCreateCountOffset), memento_create_count); - } -} - - -HInstruction* HGraphBuilder::BuildGetNativeContext() { - return Add( - context(), nullptr, - HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX)); -} - -HValue* HGraphBuilder::BuildArrayBufferViewFieldAccessor(HValue* object, - HValue* checked_object, - FieldIndex index) { - NoObservableSideEffectsScope scope(this); - HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset( - index.offset(), Representation::Tagged()); - HInstruction* buffer = Add( - object, checked_object, HObjectAccess::ForJSArrayBufferViewBuffer()); - HInstruction* field = Add(object, checked_object, access); - - HInstruction* flags = Add( - buffer, nullptr, HObjectAccess::ForJSArrayBufferBitField()); - HValue* was_neutered_mask = - Add(1 << JSArrayBuffer::WasNeutered::kShift); - HValue* was_neutered_test = - AddUncasted(Token::BIT_AND, flags, was_neutered_mask); - - IfBuilder if_was_neutered(this); - if_was_neutered.If( - was_neutered_test, graph()->GetConstant0(), Token::NE); - if_was_neutered.Then(); - Push(graph()->GetConstant0()); - if_was_neutered.Else(); - Push(field); - if_was_neutered.End(); - - return Pop(); -} - - -void HBasicBlock::FinishExit(HControlInstruction* instruction, - SourcePosition position) { - Finish(instruction, position); - ClearEnvironment(); -} - - -std::ostream& operator<<(std::ostream& os, const HBasicBlock& b) { - return os << "B" << b.block_id(); -} - -HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor) - : isolate_(info->isolate()), - next_block_id_(0), - entry_block_(NULL), - blocks_(8, info->zone()), - values_(16, info->zone()), - phi_list_(NULL), - uint32_instructions_(NULL), - info_(info), - descriptor_(descriptor), - zone_(info->zone()), - allow_code_motion_(false), - use_optimistic_licm_(false), - depends_on_empty_array_proto_elements_(false), - depends_on_string_length_overflow_(false), - type_change_checksum_(0), - maximum_environment_size_(0), - no_side_effects_scope_count_(0), - disallow_adding_new_values_(false) { - if (info->IsStub()) { - // For stubs, explicitly add the context to the environment. - start_environment_ = - new (zone_) HEnvironment(zone_, descriptor.GetParameterCount() + 1); - } else { - start_environment_ = - new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_); - } - start_environment_->set_ast_id(BailoutId::FunctionContext()); - entry_block_ = CreateBasicBlock(); - entry_block_->SetInitialEnvironment(start_environment_); -} - - -HBasicBlock* HGraph::CreateBasicBlock() { - HBasicBlock* result = new(zone()) HBasicBlock(this); - blocks_.Add(result, zone()); - return result; -} - - -void HGraph::FinalizeUniqueness() { - DisallowHeapAllocation no_gc; - for (int i = 0; i < blocks()->length(); ++i) { - for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) { - it.Current()->FinalizeUniqueness(); - } - } -} - - -// Block ordering was implemented with two mutually recursive methods, -// HGraph::Postorder and HGraph::PostorderLoopBlocks. -// The recursion could lead to stack overflow so the algorithm has been -// implemented iteratively. -// At a high level the algorithm looks like this: -// -// Postorder(block, loop_header) : { -// if (block has already been visited or is of another loop) return; -// mark block as visited; -// if (block is a loop header) { -// VisitLoopMembers(block, loop_header); -// VisitSuccessorsOfLoopHeader(block); -// } else { -// VisitSuccessors(block) -// } -// put block in result list; -// } -// -// VisitLoopMembers(block, outer_loop_header) { -// foreach (block b in block loop members) { -// VisitSuccessorsOfLoopMember(b, outer_loop_header); -// if (b is loop header) VisitLoopMembers(b); -// } -// } -// -// VisitSuccessorsOfLoopMember(block, outer_loop_header) { -// foreach (block b in block successors) Postorder(b, outer_loop_header) -// } -// -// VisitSuccessorsOfLoopHeader(block) { -// foreach (block b in block successors) Postorder(b, block) -// } -// -// VisitSuccessors(block, loop_header) { -// foreach (block b in block successors) Postorder(b, loop_header) -// } -// -// The ordering is started calling Postorder(entry, NULL). -// -// Each instance of PostorderProcessor represents the "stack frame" of the -// recursion, and particularly keeps the state of the loop (iteration) of the -// "Visit..." function it represents. -// To recycle memory we keep all the frames in a double linked list but -// this means that we cannot use constructors to initialize the frames. -// -class PostorderProcessor : public ZoneObject { - public: - // Back link (towards the stack bottom). - PostorderProcessor* parent() {return father_; } - // Forward link (towards the stack top). - PostorderProcessor* child() {return child_; } - HBasicBlock* block() { return block_; } - HLoopInformation* loop() { return loop_; } - HBasicBlock* loop_header() { return loop_header_; } - - static PostorderProcessor* CreateEntryProcessor(Zone* zone, - HBasicBlock* block) { - PostorderProcessor* result = new(zone) PostorderProcessor(NULL); - return result->SetupSuccessors(zone, block, NULL); - } - - PostorderProcessor* PerformStep(Zone* zone, - ZoneList* order) { - PostorderProcessor* next = - PerformNonBacktrackingStep(zone, order); - if (next != NULL) { - return next; - } else { - return Backtrack(zone, order); - } - } - - private: - explicit PostorderProcessor(PostorderProcessor* father) - : father_(father), child_(NULL), successor_iterator(NULL) { } - - // Each enum value states the cycle whose state is kept by this instance. - enum LoopKind { - NONE, - SUCCESSORS, - SUCCESSORS_OF_LOOP_HEADER, - LOOP_MEMBERS, - SUCCESSORS_OF_LOOP_MEMBER - }; - - // Each "Setup..." method is like a constructor for a cycle state. - PostorderProcessor* SetupSuccessors(Zone* zone, - HBasicBlock* block, - HBasicBlock* loop_header) { - if (block == NULL || block->IsOrdered() || - block->parent_loop_header() != loop_header) { - kind_ = NONE; - block_ = NULL; - loop_ = NULL; - loop_header_ = NULL; - return this; - } else { - block_ = block; - loop_ = NULL; - block->MarkAsOrdered(); - - if (block->IsLoopHeader()) { - kind_ = SUCCESSORS_OF_LOOP_HEADER; - loop_header_ = block; - InitializeSuccessors(); - PostorderProcessor* result = Push(zone); - return result->SetupLoopMembers(zone, block, block->loop_information(), - loop_header); - } else { - DCHECK(block->IsFinished()); - kind_ = SUCCESSORS; - loop_header_ = loop_header; - InitializeSuccessors(); - return this; - } - } - } - - PostorderProcessor* SetupLoopMembers(Zone* zone, - HBasicBlock* block, - HLoopInformation* loop, - HBasicBlock* loop_header) { - kind_ = LOOP_MEMBERS; - block_ = block; - loop_ = loop; - loop_header_ = loop_header; - InitializeLoopMembers(); - return this; - } - - PostorderProcessor* SetupSuccessorsOfLoopMember( - HBasicBlock* block, - HLoopInformation* loop, - HBasicBlock* loop_header) { - kind_ = SUCCESSORS_OF_LOOP_MEMBER; - block_ = block; - loop_ = loop; - loop_header_ = loop_header; - InitializeSuccessors(); - return this; - } - - // This method "allocates" a new stack frame. - PostorderProcessor* Push(Zone* zone) { - if (child_ == NULL) { - child_ = new(zone) PostorderProcessor(this); - } - return child_; - } - - void ClosePostorder(ZoneList* order, Zone* zone) { - DCHECK(block_->end()->FirstSuccessor() == NULL || - order->Contains(block_->end()->FirstSuccessor()) || - block_->end()->FirstSuccessor()->IsLoopHeader()); - DCHECK(block_->end()->SecondSuccessor() == NULL || - order->Contains(block_->end()->SecondSuccessor()) || - block_->end()->SecondSuccessor()->IsLoopHeader()); - order->Add(block_, zone); - } - - // This method is the basic block to walk up the stack. - PostorderProcessor* Pop(Zone* zone, - ZoneList* order) { - switch (kind_) { - case SUCCESSORS: - case SUCCESSORS_OF_LOOP_HEADER: - ClosePostorder(order, zone); - return father_; - case LOOP_MEMBERS: - return father_; - case SUCCESSORS_OF_LOOP_MEMBER: - if (block()->IsLoopHeader() && block() != loop_->loop_header()) { - // In this case we need to perform a LOOP_MEMBERS cycle so we - // initialize it and return this instead of father. - return SetupLoopMembers(zone, block(), - block()->loop_information(), loop_header_); - } else { - return father_; - } - case NONE: - return father_; - } - UNREACHABLE(); - } - - // Walks up the stack. - PostorderProcessor* Backtrack(Zone* zone, - ZoneList* order) { - PostorderProcessor* parent = Pop(zone, order); - while (parent != NULL) { - PostorderProcessor* next = - parent->PerformNonBacktrackingStep(zone, order); - if (next != NULL) { - return next; - } else { - parent = parent->Pop(zone, order); - } - } - return NULL; - } - - PostorderProcessor* PerformNonBacktrackingStep( - Zone* zone, - ZoneList* order) { - HBasicBlock* next_block; - switch (kind_) { - case SUCCESSORS: - next_block = AdvanceSuccessors(); - if (next_block != NULL) { - PostorderProcessor* result = Push(zone); - return result->SetupSuccessors(zone, next_block, loop_header_); - } - break; - case SUCCESSORS_OF_LOOP_HEADER: - next_block = AdvanceSuccessors(); - if (next_block != NULL) { - PostorderProcessor* result = Push(zone); - return result->SetupSuccessors(zone, next_block, block()); - } - break; - case LOOP_MEMBERS: - next_block = AdvanceLoopMembers(); - if (next_block != NULL) { - PostorderProcessor* result = Push(zone); - return result->SetupSuccessorsOfLoopMember(next_block, - loop_, loop_header_); - } - break; - case SUCCESSORS_OF_LOOP_MEMBER: - next_block = AdvanceSuccessors(); - if (next_block != NULL) { - PostorderProcessor* result = Push(zone); - return result->SetupSuccessors(zone, next_block, loop_header_); - } - break; - case NONE: - return NULL; - } - return NULL; - } - - // The following two methods implement a "foreach b in successors" cycle. - void InitializeSuccessors() { - loop_index = 0; - loop_length = 0; - successor_iterator = HSuccessorIterator(block_->end()); - } - - HBasicBlock* AdvanceSuccessors() { - if (!successor_iterator.Done()) { - HBasicBlock* result = successor_iterator.Current(); - successor_iterator.Advance(); - return result; - } - return NULL; - } - - // The following two methods implement a "foreach b in loop members" cycle. - void InitializeLoopMembers() { - loop_index = 0; - loop_length = loop_->blocks()->length(); - } - - HBasicBlock* AdvanceLoopMembers() { - if (loop_index < loop_length) { - HBasicBlock* result = loop_->blocks()->at(loop_index); - loop_index++; - return result; - } else { - return NULL; - } - } - - LoopKind kind_; - PostorderProcessor* father_; - PostorderProcessor* child_; - HLoopInformation* loop_; - HBasicBlock* block_; - HBasicBlock* loop_header_; - int loop_index; - int loop_length; - HSuccessorIterator successor_iterator; -}; - - -void HGraph::OrderBlocks() { - CompilationPhase phase("H_Block ordering", info()); - -#ifdef DEBUG - // Initially the blocks must not be ordered. - for (int i = 0; i < blocks_.length(); ++i) { - DCHECK(!blocks_[i]->IsOrdered()); - } -#endif - - PostorderProcessor* postorder = - PostorderProcessor::CreateEntryProcessor(zone(), blocks_[0]); - blocks_.Rewind(0); - while (postorder) { - postorder = postorder->PerformStep(zone(), &blocks_); - } - -#ifdef DEBUG - // Now all blocks must be marked as ordered. - for (int i = 0; i < blocks_.length(); ++i) { - DCHECK(blocks_[i]->IsOrdered()); - } -#endif - - // Reverse block list and assign block IDs. - for (int i = 0, j = blocks_.length(); --j >= i; ++i) { - HBasicBlock* bi = blocks_[i]; - HBasicBlock* bj = blocks_[j]; - bi->set_block_id(j); - bj->set_block_id(i); - blocks_[i] = bj; - blocks_[j] = bi; - } -} - - -void HGraph::AssignDominators() { - HPhase phase("H_Assign dominators", this); - for (int i = 0; i < blocks_.length(); ++i) { - HBasicBlock* block = blocks_[i]; - if (block->IsLoopHeader()) { - // Only the first predecessor of a loop header is from outside the loop. - // All others are back edges, and thus cannot dominate the loop header. - block->AssignCommonDominator(block->predecessors()->first()); - block->AssignLoopSuccessorDominators(); - } else { - for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) { - blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j)); - } - } - } -} - - -bool HGraph::CheckArgumentsPhiUses() { - int block_count = blocks_.length(); - for (int i = 0; i < block_count; ++i) { - for (int j = 0; j < blocks_[i]->phis()->length(); ++j) { - HPhi* phi = blocks_[i]->phis()->at(j); - // We don't support phi uses of arguments for now. - if (phi->CheckFlag(HValue::kIsArguments)) return false; - } - } - return true; -} - - -bool HGraph::CheckConstPhiUses() { - int block_count = blocks_.length(); - for (int i = 0; i < block_count; ++i) { - for (int j = 0; j < blocks_[i]->phis()->length(); ++j) { - HPhi* phi = blocks_[i]->phis()->at(j); - // Check for the hole value (from an uninitialized const). - for (int k = 0; k < phi->OperandCount(); k++) { - if (phi->OperandAt(k) == GetConstantHole()) return false; - } - } - } - return true; -} - - -void HGraph::CollectPhis() { - int block_count = blocks_.length(); - phi_list_ = new(zone()) ZoneList(block_count, zone()); - for (int i = 0; i < block_count; ++i) { - for (int j = 0; j < blocks_[i]->phis()->length(); ++j) { - HPhi* phi = blocks_[i]->phis()->at(j); - phi_list_->Add(phi, zone()); - } - } -} - - -bool HGraph::Optimize(BailoutReason* bailout_reason) { - OrderBlocks(); - AssignDominators(); - - // We need to create a HConstant "zero" now so that GVN will fold every - // zero-valued constant in the graph together. - // The constant is needed to make idef-based bounds check work: the pass - // evaluates relations with "zero" and that zero cannot be created after GVN. - GetConstant0(); - -#ifdef DEBUG - // Do a full verify after building the graph and computing dominators. - Verify(true); -#endif - - if (FLAG_analyze_environment_liveness && maximum_environment_size() != 0) { - Run(); - } - - if (!CheckConstPhiUses()) { - *bailout_reason = kUnsupportedPhiUseOfConstVariable; - return false; - } - Run(); - if (!CheckArgumentsPhiUses()) { - *bailout_reason = kUnsupportedPhiUseOfArguments; - return false; - } - - // Find and mark unreachable code to simplify optimizations, especially gvn, - // where unreachable code could unnecessarily defeat LICM. - Run(); - - if (FLAG_dead_code_elimination) Run(); - if (FLAG_use_escape_analysis) Run(); - - if (FLAG_load_elimination) Run(); - - CollectPhis(); - - Run(); - - // Remove HSimulate instructions that have turned out not to be needed - // after all by folding them into the following HSimulate. - // This must happen after inferring representations. - Run(); - - Run(); - - Run(); - - // Must be performed before canonicalization to ensure that Canonicalize - // will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with - // zero. - Run(); - - if (FLAG_use_canonicalizing) Run(); - - if (FLAG_use_gvn) Run(); - - if (FLAG_check_elimination) Run(); - - if (FLAG_store_elimination) Run(); - - Run(); - - // Eliminate redundant stack checks on backwards branches. - Run(); - - if (FLAG_array_bounds_checks_elimination) Run(); - if (FLAG_array_index_dehoisting) Run(); - if (FLAG_dead_code_elimination) Run(); - - RestoreActualValues(); - - // Find unreachable code a second time, GVN and other optimizations may have - // made blocks unreachable that were previously reachable. - Run(); - - return true; -} - - -void HGraph::RestoreActualValues() { - HPhase phase("H_Restore actual values", this); - - for (int block_index = 0; block_index < blocks()->length(); block_index++) { - HBasicBlock* block = blocks()->at(block_index); - -#ifdef DEBUG - for (int i = 0; i < block->phis()->length(); i++) { - HPhi* phi = block->phis()->at(i); - DCHECK(phi->ActualValue() == phi); - } -#endif - - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instruction = it.Current(); - if (instruction->ActualValue() == instruction) continue; - if (instruction->CheckFlag(HValue::kIsDead)) { - // The instruction was marked as deleted but left in the graph - // as a control flow dependency point for subsequent - // instructions. - instruction->DeleteAndReplaceWith(instruction->ActualValue()); - } else { - DCHECK(instruction->IsInformativeDefinition()); - if (instruction->IsPurelyInformativeDefinition()) { - instruction->DeleteAndReplaceWith(instruction->RedefinedOperand()); - } else { - instruction->ReplaceAllUsesWith(instruction->ActualValue()); - } - } - } - } -} - - -HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) { - if (string->IsConstant()) { - HConstant* c_string = HConstant::cast(string); - if (c_string->HasStringValue()) { - return Add(c_string->StringValue()->map()->instance_type()); - } - } - return Add( - Add(string, nullptr, HObjectAccess::ForMap()), nullptr, - HObjectAccess::ForMapInstanceType()); -} - - -HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) { - return AddInstruction(BuildLoadStringLength(string)); -} - - -HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* string) { - if (string->IsConstant()) { - HConstant* c_string = HConstant::cast(string); - if (c_string->HasStringValue()) { - return New(c_string->StringValue()->length()); - } - } - return New(string, nullptr, - HObjectAccess::ForStringLength()); -} - - -HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle constant, - bool ensure_no_elements) { - HCheckMaps* check = Add( - Add(constant), handle(constant->map())); - check->ClearDependsOnFlag(kElementsKind); - if (ensure_no_elements) { - // TODO(ishell): remove this once we support NO_ELEMENTS elements kind. - HValue* elements = AddLoadElements(check, nullptr); - HValue* empty_elements = - Add(isolate()->factory()->empty_fixed_array()); - IfBuilder if_empty(this); - if_empty.IfNot(elements, empty_elements); - if_empty.ThenDeopt(DeoptimizeReason::kWrongMap); - if_empty.End(); - } - return check; -} - -HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle prototype, - Handle holder, - bool ensure_no_elements) { - PrototypeIterator iter(isolate(), prototype, kStartAtReceiver); - while (holder.is_null() || - !PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) { - BuildConstantMapCheck(PrototypeIterator::GetCurrent(iter), - ensure_no_elements); - iter.Advance(); - if (iter.IsAtEnd()) { - return NULL; - } - } - return BuildConstantMapCheck(holder); -} - - -// Checks if the given shift amounts have following forms: -// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa). -static bool ShiftAmountsAllowReplaceByRotate(HValue* sa, - HValue* const32_minus_sa) { - if (sa->IsConstant() && const32_minus_sa->IsConstant()) { - const HConstant* c1 = HConstant::cast(sa); - const HConstant* c2 = HConstant::cast(const32_minus_sa); - return c1->HasInteger32Value() && c2->HasInteger32Value() && - (c1->Integer32Value() + c2->Integer32Value() == 32); - } - if (!const32_minus_sa->IsSub()) return false; - HSub* sub = HSub::cast(const32_minus_sa); - return sub->left()->EqualsInteger32Constant(32) && sub->right() == sa; -} - - -// Checks if the left and the right are shift instructions with the oposite -// directions that can be replaced by one rotate right instruction or not. -// Returns the operand and the shift amount for the rotate instruction in the -// former case. -bool HGraphBuilder::MatchRotateRight(HValue* left, - HValue* right, - HValue** operand, - HValue** shift_amount) { - HShl* shl; - HShr* shr; - if (left->IsShl() && right->IsShr()) { - shl = HShl::cast(left); - shr = HShr::cast(right); - } else if (left->IsShr() && right->IsShl()) { - shl = HShl::cast(right); - shr = HShr::cast(left); - } else { - return false; - } - if (shl->left() != shr->left()) return false; - - if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) && - !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) { - return false; - } - *operand = shr->left(); - *shift_amount = shr->right(); - return true; -} - - -bool CanBeZero(HValue* right) { - if (right->IsConstant()) { - HConstant* right_const = HConstant::cast(right); - if (right_const->HasInteger32Value() && - (right_const->Integer32Value() & 0x1f) != 0) { - return false; - } - } - return true; -} - -HValue* HGraphBuilder::EnforceNumberType(HValue* number, AstType* expected) { - if (expected->Is(AstType::SignedSmall())) { - return AddUncasted(number, Representation::Smi()); - } - if (expected->Is(AstType::Signed32())) { - return AddUncasted(number, - Representation::Integer32()); - } - return number; -} - -HValue* HGraphBuilder::TruncateToNumber(HValue* value, AstType** expected) { - if (value->IsConstant()) { - HConstant* constant = HConstant::cast(value); - Maybe number = - constant->CopyToTruncatedNumber(isolate(), zone()); - if (number.IsJust()) { - *expected = AstType::Number(); - return AddInstruction(number.FromJust()); - } - } - - // We put temporary values on the stack, which don't correspond to anything - // in baseline code. Since nothing is observable we avoid recording those - // pushes with a NoObservableSideEffectsScope. - NoObservableSideEffectsScope no_effects(this); - - AstType* expected_type = *expected; - - // Separate the number type from the rest. - AstType* expected_obj = - AstType::Intersect(expected_type, AstType::NonNumber(), zone()); - AstType* expected_number = - AstType::Intersect(expected_type, AstType::Number(), zone()); - - // We expect to get a number. - // (We need to check first, since AstType::None->Is(AstType::Any()) == true. - if (expected_obj->Is(AstType::None())) { - DCHECK(!expected_number->Is(AstType::None())); - return value; - } - - if (expected_obj->Is(AstType::Undefined())) { - // This is already done by HChange. - *expected = AstType::Union(expected_number, AstType::Number(), zone()); - return value; - } - - return value; -} - - -static Representation RepresentationFor(AstType* type) { - DisallowHeapAllocation no_allocation; - if (type->Is(AstType::None())) return Representation::None(); - if (type->Is(AstType::SignedSmall())) return Representation::Smi(); - if (type->Is(AstType::Signed32())) return Representation::Integer32(); - if (type->Is(AstType::Number())) return Representation::Double(); - return Representation::Tagged(); -} - - -HValue* HGraphBuilder::BuildBinaryOperation( - Token::Value op, HValue* left, HValue* right, AstType* left_type, - AstType* right_type, AstType* result_type, Maybe fixed_right_arg, - HAllocationMode allocation_mode, BailoutId opt_id) { - bool maybe_string_add = false; - if (op == Token::ADD) { - // If we are adding constant string with something for which we don't have - // a feedback yet, assume that it's also going to be a string and don't - // generate deopt instructions. - if (!left_type->IsInhabited() && right->IsConstant() && - HConstant::cast(right)->HasStringValue()) { - left_type = AstType::String(); - } - - if (!right_type->IsInhabited() && left->IsConstant() && - HConstant::cast(left)->HasStringValue()) { - right_type = AstType::String(); - } - - maybe_string_add = (left_type->Maybe(AstType::String()) || - left_type->Maybe(AstType::Receiver()) || - right_type->Maybe(AstType::String()) || - right_type->Maybe(AstType::Receiver())); - } - - Representation left_rep = RepresentationFor(left_type); - Representation right_rep = RepresentationFor(right_type); - - if (!left_type->IsInhabited()) { - Add( - DeoptimizeReason::kInsufficientTypeFeedbackForLHSOfBinaryOperation, - Deoptimizer::SOFT); - left_type = AstType::Any(); - left_rep = RepresentationFor(left_type); - maybe_string_add = op == Token::ADD; - } - - if (!right_type->IsInhabited()) { - Add( - DeoptimizeReason::kInsufficientTypeFeedbackForRHSOfBinaryOperation, - Deoptimizer::SOFT); - right_type = AstType::Any(); - right_rep = RepresentationFor(right_type); - maybe_string_add = op == Token::ADD; - } - - if (!maybe_string_add) { - left = TruncateToNumber(left, &left_type); - right = TruncateToNumber(right, &right_type); - } - - // Special case for string addition here. - if (op == Token::ADD && - (left_type->Is(AstType::String()) || right_type->Is(AstType::String()))) { - // Validate type feedback for left argument. - if (left_type->Is(AstType::String())) { - left = BuildCheckString(left); - } - - // Validate type feedback for right argument. - if (right_type->Is(AstType::String())) { - right = BuildCheckString(right); - } - - // Convert left argument as necessary. - if (left_type->Is(AstType::Number())) { - DCHECK(right_type->Is(AstType::String())); - left = BuildNumberToString(left, left_type); - } else if (!left_type->Is(AstType::String())) { - DCHECK(right_type->Is(AstType::String())); - return AddUncasted( - left, right, allocation_mode.GetPretenureMode(), - STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site()); - } - - // Convert right argument as necessary. - if (right_type->Is(AstType::Number())) { - DCHECK(left_type->Is(AstType::String())); - right = BuildNumberToString(right, right_type); - } else if (!right_type->Is(AstType::String())) { - DCHECK(left_type->Is(AstType::String())); - return AddUncasted( - left, right, allocation_mode.GetPretenureMode(), - STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site()); - } - - // Fast paths for empty constant strings. - Handle left_string = - left->IsConstant() && HConstant::cast(left)->HasStringValue() - ? HConstant::cast(left)->StringValue() - : Handle(); - Handle right_string = - right->IsConstant() && HConstant::cast(right)->HasStringValue() - ? HConstant::cast(right)->StringValue() - : Handle(); - if (!left_string.is_null() && left_string->length() == 0) return right; - if (!right_string.is_null() && right_string->length() == 0) return left; - if (!left_string.is_null() && !right_string.is_null()) { - return AddUncasted( - left, right, allocation_mode.GetPretenureMode(), - STRING_ADD_CHECK_NONE, allocation_mode.feedback_site()); - } - - // Register the dependent code with the allocation site. - if (!allocation_mode.feedback_site().is_null()) { - DCHECK(!graph()->info()->IsStub()); - Handle site(allocation_mode.feedback_site()); - top_info()->dependencies()->AssumeTenuringDecision(site); - } - - // Inline the string addition into the stub when creating allocation - // mementos to gather allocation site feedback, or if we can statically - // infer that we're going to create a cons string. - if ((graph()->info()->IsStub() && - allocation_mode.CreateAllocationMementos()) || - (left->IsConstant() && - HConstant::cast(left)->HasStringValue() && - HConstant::cast(left)->StringValue()->length() + 1 >= - ConsString::kMinLength) || - (right->IsConstant() && - HConstant::cast(right)->HasStringValue() && - HConstant::cast(right)->StringValue()->length() + 1 >= - ConsString::kMinLength)) { - return BuildStringAdd(left, right, allocation_mode); - } - - // Fallback to using the string add stub. - return AddUncasted( - left, right, allocation_mode.GetPretenureMode(), STRING_ADD_CHECK_NONE, - allocation_mode.feedback_site()); - } - - // Special case for +x here. - if (op == Token::MUL) { - if (left->EqualsInteger32Constant(1)) { - return BuildToNumber(right); - } - if (right->EqualsInteger32Constant(1)) { - return BuildToNumber(left); - } - } - - if (graph()->info()->IsStub()) { - left = EnforceNumberType(left, left_type); - right = EnforceNumberType(right, right_type); - } - - Representation result_rep = RepresentationFor(result_type); - - bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) || - (right_rep.IsTagged() && !right_rep.IsSmi()); - - HInstruction* instr = NULL; - // Only the stub is allowed to call into the runtime, since otherwise we would - // inline several instructions (including the two pushes) for every tagged - // operation in optimized code, which is more expensive, than a stub call. - if (graph()->info()->IsStub() && is_non_primitive) { - HValue* values[] = {left, right}; -#define GET_STUB(Name) \ - do { \ - Callable callable = Builtins::CallableFor(isolate(), Builtins::k##Name); \ - HValue* stub = Add(callable.code()); \ - instr = AddUncasted(stub, 0, callable.descriptor(), \ - ArrayVector(values)); \ - } while (false) - - switch (op) { - default: - UNREACHABLE(); - case Token::ADD: - GET_STUB(Add); - break; - case Token::SUB: - GET_STUB(Subtract); - break; - case Token::MUL: - GET_STUB(Multiply); - break; - case Token::DIV: - GET_STUB(Divide); - break; - case Token::MOD: - GET_STUB(Modulus); - break; - case Token::BIT_OR: - GET_STUB(BitwiseOr); - break; - case Token::BIT_AND: - GET_STUB(BitwiseAnd); - break; - case Token::BIT_XOR: - GET_STUB(BitwiseXor); - break; - case Token::SAR: - GET_STUB(ShiftRight); - break; - case Token::SHR: - GET_STUB(ShiftRightLogical); - break; - case Token::SHL: - GET_STUB(ShiftLeft); - break; - } -#undef GET_STUB - } else { - switch (op) { - case Token::ADD: - instr = AddUncasted(left, right); - break; - case Token::SUB: - instr = AddUncasted(left, right); - break; - case Token::MUL: - instr = AddUncasted(left, right); - break; - case Token::MOD: { - if (fixed_right_arg.IsJust() && - !right->EqualsInteger32Constant(fixed_right_arg.FromJust())) { - HConstant* fixed_right = - Add(static_cast(fixed_right_arg.FromJust())); - IfBuilder if_same(this); - if_same.If(right, fixed_right, Token::EQ); - if_same.Then(); - if_same.ElseDeopt(DeoptimizeReason::kUnexpectedRHSOfBinaryOperation); - right = fixed_right; - } - instr = AddUncasted(left, right); - break; - } - case Token::DIV: - instr = AddUncasted(left, right); - break; - case Token::BIT_XOR: - case Token::BIT_AND: - instr = AddUncasted(op, left, right); - break; - case Token::BIT_OR: { - HValue *operand, *shift_amount; - if (left_type->Is(AstType::Signed32()) && - right_type->Is(AstType::Signed32()) && - MatchRotateRight(left, right, &operand, &shift_amount)) { - instr = AddUncasted(operand, shift_amount); - } else { - instr = AddUncasted(op, left, right); - } - break; - } - case Token::SAR: - instr = AddUncasted(left, right); - break; - case Token::SHR: - instr = AddUncasted(left, right); - if (instr->IsShr() && CanBeZero(right)) { - graph()->RecordUint32Instruction(instr); - } - break; - case Token::SHL: - instr = AddUncasted(left, right); - break; - default: - UNREACHABLE(); - } - } - - if (instr->IsBinaryOperation()) { - HBinaryOperation* binop = HBinaryOperation::cast(instr); - binop->set_observed_input_representation(1, left_rep); - binop->set_observed_input_representation(2, right_rep); - binop->initialize_output_representation(result_rep); - if (graph()->info()->IsStub()) { - // Stub should not call into stub. - instr->SetFlag(HValue::kCannotBeTagged); - // And should truncate on HForceRepresentation already. - if (left->IsForceRepresentation()) { - left->CopyFlag(HValue::kTruncatingToSmi, instr); - left->CopyFlag(HValue::kTruncatingToInt32, instr); - } - if (right->IsForceRepresentation()) { - right->CopyFlag(HValue::kTruncatingToSmi, instr); - right->CopyFlag(HValue::kTruncatingToInt32, instr); - } - } - } - return instr; -} - - -HEnvironment::HEnvironment(HEnvironment* outer, - Scope* scope, - Handle closure, - Zone* zone) - : closure_(closure), - values_(0, zone), - frame_type_(JS_FUNCTION), - parameter_count_(0), - specials_count_(1), - local_count_(0), - outer_(outer), - entry_(NULL), - pop_count_(0), - push_count_(0), - ast_id_(BailoutId::None()), - zone_(zone) { - DeclarationScope* declaration_scope = scope->GetDeclarationScope(); - Initialize(declaration_scope->num_parameters() + 1, - declaration_scope->num_stack_slots(), 0); -} - - -HEnvironment::HEnvironment(Zone* zone, int parameter_count) - : values_(0, zone), - frame_type_(STUB), - parameter_count_(parameter_count), - specials_count_(1), - local_count_(0), - outer_(NULL), - entry_(NULL), - pop_count_(0), - push_count_(0), - ast_id_(BailoutId::None()), - zone_(zone) { - Initialize(parameter_count, 0, 0); -} - - -HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone) - : values_(0, zone), - frame_type_(JS_FUNCTION), - parameter_count_(0), - specials_count_(0), - local_count_(0), - outer_(NULL), - entry_(NULL), - pop_count_(0), - push_count_(0), - ast_id_(other->ast_id()), - zone_(zone) { - Initialize(other); -} - - -HEnvironment::HEnvironment(HEnvironment* outer, - Handle closure, - FrameType frame_type, - int arguments, - Zone* zone) - : closure_(closure), - values_(arguments, zone), - frame_type_(frame_type), - parameter_count_(arguments), - specials_count_(0), - local_count_(0), - outer_(outer), - entry_(NULL), - pop_count_(0), - push_count_(0), - ast_id_(BailoutId::None()), - zone_(zone) { -} - - -void HEnvironment::Initialize(int parameter_count, - int local_count, - int stack_height) { - parameter_count_ = parameter_count; - local_count_ = local_count; - - // Avoid reallocating the temporaries' backing store on the first Push. - int total = parameter_count + specials_count_ + local_count + stack_height; - values_.Initialize(total + 4, zone()); - for (int i = 0; i < total; ++i) values_.Add(NULL, zone()); -} - - -void HEnvironment::Initialize(const HEnvironment* other) { - closure_ = other->closure(); - values_.AddAll(other->values_, zone()); - assigned_variables_.Union(other->assigned_variables_, zone()); - frame_type_ = other->frame_type_; - parameter_count_ = other->parameter_count_; - local_count_ = other->local_count_; - if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy. - entry_ = other->entry_; - pop_count_ = other->pop_count_; - push_count_ = other->push_count_; - specials_count_ = other->specials_count_; - ast_id_ = other->ast_id_; -} - - -void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) { - DCHECK(!block->IsLoopHeader()); - DCHECK(values_.length() == other->values_.length()); - - int length = values_.length(); - for (int i = 0; i < length; ++i) { - HValue* value = values_[i]; - if (value != NULL && value->IsPhi() && value->block() == block) { - // There is already a phi for the i'th value. - HPhi* phi = HPhi::cast(value); - // Assert index is correct and that we haven't missed an incoming edge. - DCHECK(phi->merged_index() == i || !phi->HasMergedIndex()); - DCHECK(phi->OperandCount() == block->predecessors()->length()); - phi->AddInput(other->values_[i]); - } else if (values_[i] != other->values_[i]) { - // There is a fresh value on the incoming edge, a phi is needed. - DCHECK(values_[i] != NULL && other->values_[i] != NULL); - HPhi* phi = block->AddNewPhi(i); - HValue* old_value = values_[i]; - for (int j = 0; j < block->predecessors()->length(); j++) { - phi->AddInput(old_value); - } - phi->AddInput(other->values_[i]); - this->values_[i] = phi; - } - } -} - - -void HEnvironment::Bind(int index, HValue* value) { - DCHECK(value != NULL); - assigned_variables_.Add(index, zone()); - values_[index] = value; -} - - -bool HEnvironment::HasExpressionAt(int index) const { - return index >= parameter_count_ + specials_count_ + local_count_; -} - - -bool HEnvironment::ExpressionStackIsEmpty() const { - DCHECK(length() >= first_expression_index()); - return length() == first_expression_index(); -} - - -void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) { - int count = index_from_top + 1; - int index = values_.length() - count; - DCHECK(HasExpressionAt(index)); - // The push count must include at least the element in question or else - // the new value will not be included in this environment's history. - if (push_count_ < count) { - // This is the same effect as popping then re-pushing 'count' elements. - pop_count_ += (count - push_count_); - push_count_ = count; - } - values_[index] = value; -} - - -HValue* HEnvironment::RemoveExpressionStackAt(int index_from_top) { - int count = index_from_top + 1; - int index = values_.length() - count; - DCHECK(HasExpressionAt(index)); - // Simulate popping 'count' elements and then - // pushing 'count - 1' elements back. - pop_count_ += Max(count - push_count_, 0); - push_count_ = Max(push_count_ - count, 0) + (count - 1); - return values_.Remove(index); -} - - -void HEnvironment::Drop(int count) { - for (int i = 0; i < count; ++i) { - Pop(); - } -} - - -void HEnvironment::Print() const { - OFStream os(stdout); - os << *this << "\n"; -} - - -HEnvironment* HEnvironment::Copy() const { - return new(zone()) HEnvironment(this, zone()); -} - - -HEnvironment* HEnvironment::CopyWithoutHistory() const { - HEnvironment* result = Copy(); - result->ClearHistory(); - return result; -} - - -HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const { - HEnvironment* new_env = Copy(); - for (int i = 0; i < values_.length(); ++i) { - HPhi* phi = loop_header->AddNewPhi(i); - phi->AddInput(values_[i]); - new_env->values_[i] = phi; - } - new_env->ClearHistory(); - return new_env; -} - - -HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer, - Handle target, - FrameType frame_type, - int arguments) const { - HEnvironment* new_env = - new(zone()) HEnvironment(outer, target, frame_type, - arguments + 1, zone()); - for (int i = 0; i <= arguments; ++i) { // Include receiver. - new_env->Push(ExpressionStackAt(arguments - i)); - } - new_env->ClearHistory(); - return new_env; -} - -void HEnvironment::MarkAsTailCaller() { - DCHECK_EQ(JS_FUNCTION, frame_type()); - frame_type_ = TAIL_CALLER_FUNCTION; -} - -void HEnvironment::ClearTailCallerMark() { - DCHECK_EQ(TAIL_CALLER_FUNCTION, frame_type()); - frame_type_ = JS_FUNCTION; -} - -HEnvironment* HEnvironment::CopyForInlining( - Handle target, int arguments, FunctionLiteral* function, - HConstant* undefined, InliningKind inlining_kind, - TailCallMode syntactic_tail_call_mode) const { - DCHECK_EQ(JS_FUNCTION, frame_type()); - - // Outer environment is a copy of this one without the arguments. - int arity = function->scope()->num_parameters(); - - HEnvironment* outer = Copy(); - outer->Drop(arguments + 1); // Including receiver. - outer->ClearHistory(); - - if (syntactic_tail_call_mode == TailCallMode::kAllow) { - DCHECK_EQ(NORMAL_RETURN, inlining_kind); - outer->MarkAsTailCaller(); - } - - if (inlining_kind == CONSTRUCT_CALL_RETURN) { - // Create artificial constructor stub environment. The receiver should - // actually be the constructor function, but we pass the newly allocated - // object instead, DoComputeConstructStubFrame() relies on that. - outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments); - } else if (inlining_kind == GETTER_CALL_RETURN) { - // We need an additional StackFrame::INTERNAL frame for restoring the - // correct context. - outer = CreateStubEnvironment(outer, target, JS_GETTER, arguments); - } else if (inlining_kind == SETTER_CALL_RETURN) { - // We need an additional StackFrame::INTERNAL frame for temporarily saving - // the argument of the setter, see StoreStubCompiler::CompileStoreViaSetter. - outer = CreateStubEnvironment(outer, target, JS_SETTER, arguments); - } - - if (arity != arguments) { - // Create artificial arguments adaptation environment. - outer = CreateStubEnvironment(outer, target, ARGUMENTS_ADAPTOR, arguments); - } - - HEnvironment* inner = - new(zone()) HEnvironment(outer, function->scope(), target, zone()); - // Get the argument values from the original environment. - for (int i = 0; i <= arity; ++i) { // Include receiver. - HValue* push = (i <= arguments) ? - ExpressionStackAt(arguments - i) : undefined; - inner->SetValueAt(i, push); - } - inner->SetValueAt(arity + 1, context()); - for (int i = arity + 2; i < inner->length(); ++i) { - inner->SetValueAt(i, undefined); - } - - inner->set_ast_id(BailoutId::FunctionEntry()); - return inner; -} - - -std::ostream& operator<<(std::ostream& os, const HEnvironment& env) { - for (int i = 0; i < env.length(); i++) { - if (i == 0) os << "parameters\n"; - if (i == env.parameter_count()) os << "specials\n"; - if (i == env.parameter_count() + env.specials_count()) os << "locals\n"; - if (i == env.parameter_count() + env.specials_count() + env.local_count()) { - os << "expressions\n"; - } - HValue* val = env.values()->at(i); - os << i << ": "; - if (val != NULL) { - os << val; - } else { - os << "NULL"; - } - os << "\n"; - } - return os << "\n"; -} - - -void HTracer::TraceCompilation(CompilationInfo* info) { - Tag tag(this, "compilation"); - std::string name; - if (info->parse_info()) { - Object* source_name = info->script()->name(); - if (source_name->IsString()) { - String* str = String::cast(source_name); - if (str->length() > 0) { - name.append(str->ToCString().get()); - name.append(":"); - } - } - } - std::unique_ptr method_name = info->GetDebugName(); - name.append(method_name.get()); - if (info->IsOptimizing()) { - PrintStringProperty("name", name.c_str()); - PrintIndent(); - trace_.Add("method \"%s:%d\"\n", method_name.get(), - info->optimization_id()); - } else { - PrintStringProperty("name", name.c_str()); - PrintStringProperty("method", "stub"); - } - PrintLongProperty("date", - static_cast(base::OS::TimeCurrentMillis())); -} - - -void HTracer::TraceLithium(const char* name, LChunk* chunk) { - DCHECK(!chunk->isolate()->concurrent_recompilation_enabled()); - AllowHandleDereference allow_deref; - AllowDeferredHandleDereference allow_deferred_deref; - Trace(name, chunk->graph(), chunk); -} - - -void HTracer::TraceHydrogen(const char* name, HGraph* graph) { - DCHECK(!graph->isolate()->concurrent_recompilation_enabled()); - AllowHandleDereference allow_deref; - AllowDeferredHandleDereference allow_deferred_deref; - Trace(name, graph, NULL); -} - - -void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) { - Tag tag(this, "cfg"); - PrintStringProperty("name", name); - const ZoneList* blocks = graph->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* current = blocks->at(i); - Tag block_tag(this, "block"); - PrintBlockProperty("name", current->block_id()); - PrintIntProperty("from_bci", -1); - PrintIntProperty("to_bci", -1); - - if (!current->predecessors()->is_empty()) { - PrintIndent(); - trace_.Add("predecessors"); - for (int j = 0; j < current->predecessors()->length(); ++j) { - trace_.Add(" \"B%d\"", current->predecessors()->at(j)->block_id()); - } - trace_.Add("\n"); - } else { - PrintEmptyProperty("predecessors"); - } - - if (current->end()->SuccessorCount() == 0) { - PrintEmptyProperty("successors"); - } else { - PrintIndent(); - trace_.Add("successors"); - for (HSuccessorIterator it(current->end()); !it.Done(); it.Advance()) { - trace_.Add(" \"B%d\"", it.Current()->block_id()); - } - trace_.Add("\n"); - } - - PrintEmptyProperty("xhandlers"); - - { - PrintIndent(); - trace_.Add("flags"); - if (current->IsLoopSuccessorDominator()) { - trace_.Add(" \"dom-loop-succ\""); - } - if (current->IsUnreachable()) { - trace_.Add(" \"dead\""); - } - if (current->is_osr_entry()) { - trace_.Add(" \"osr\""); - } - trace_.Add("\n"); - } - - if (current->dominator() != NULL) { - PrintBlockProperty("dominator", current->dominator()->block_id()); - } - - PrintIntProperty("loop_depth", current->LoopNestingDepth()); - - if (chunk != NULL) { - int first_index = current->first_instruction_index(); - int last_index = current->last_instruction_index(); - PrintIntProperty( - "first_lir_id", - LifetimePosition::FromInstructionIndex(first_index).Value()); - PrintIntProperty( - "last_lir_id", - LifetimePosition::FromInstructionIndex(last_index).Value()); - } - - { - Tag states_tag(this, "states"); - Tag locals_tag(this, "locals"); - int total = current->phis()->length(); - PrintIntProperty("size", current->phis()->length()); - PrintStringProperty("method", "None"); - for (int j = 0; j < total; ++j) { - HPhi* phi = current->phis()->at(j); - PrintIndent(); - std::ostringstream os; - os << phi->merged_index() << " " << NameOf(phi) << " " << *phi << "\n"; - trace_.Add(os.str().c_str()); - } - } - - { - Tag HIR_tag(this, "HIR"); - for (HInstructionIterator it(current); !it.Done(); it.Advance()) { - HInstruction* instruction = it.Current(); - int uses = instruction->UseCount(); - PrintIndent(); - std::ostringstream os; - os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction; - if (instruction->has_position()) { - const SourcePosition pos = instruction->position(); - os << " pos:"; - if (pos.isInlined()) os << "inlining(" << pos.InliningId() << "),"; - os << pos.ScriptOffset(); - } - os << " <|@\n"; - trace_.Add(os.str().c_str()); - } - } - - - if (chunk != NULL) { - Tag LIR_tag(this, "LIR"); - int first_index = current->first_instruction_index(); - int last_index = current->last_instruction_index(); - if (first_index != -1 && last_index != -1) { - const ZoneList* instructions = chunk->instructions(); - for (int i = first_index; i <= last_index; ++i) { - LInstruction* linstr = instructions->at(i); - if (linstr != NULL) { - PrintIndent(); - trace_.Add("%d ", - LifetimePosition::FromInstructionIndex(i).Value()); - linstr->PrintTo(&trace_); - std::ostringstream os; - os << " [hir:" << NameOf(linstr->hydrogen_value()) << "] <|@\n"; - trace_.Add(os.str().c_str()); - } - } - } - } - } -} - - -void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) { - Tag tag(this, "intervals"); - PrintStringProperty("name", name); - - const Vector* fixed_d = allocator->fixed_double_live_ranges(); - for (int i = 0; i < fixed_d->length(); ++i) { - TraceLiveRange(fixed_d->at(i), "fixed", allocator->zone()); - } - - const Vector* fixed = allocator->fixed_live_ranges(); - for (int i = 0; i < fixed->length(); ++i) { - TraceLiveRange(fixed->at(i), "fixed", allocator->zone()); - } - - const ZoneList* live_ranges = allocator->live_ranges(); - for (int i = 0; i < live_ranges->length(); ++i) { - TraceLiveRange(live_ranges->at(i), "object", allocator->zone()); - } -} - - -void HTracer::TraceLiveRange(LiveRange* range, const char* type, - Zone* zone) { - if (range != NULL && !range->IsEmpty()) { - PrintIndent(); - trace_.Add("%d %s", range->id(), type); - if (range->HasRegisterAssigned()) { - LOperand* op = range->CreateAssignedOperand(zone); - int assigned_reg = op->index(); - if (op->IsDoubleRegister()) { - trace_.Add(" \"%s\"", - GetRegConfig()->GetDoubleRegisterName(assigned_reg)); - } else { - DCHECK(op->IsRegister()); - trace_.Add(" \"%s\"", - GetRegConfig()->GetGeneralRegisterName(assigned_reg)); - } - } else if (range->IsSpilled()) { - LOperand* op = range->TopLevel()->GetSpillOperand(); - if (op->IsDoubleStackSlot()) { - trace_.Add(" \"double_stack:%d\"", op->index()); - } else { - DCHECK(op->IsStackSlot()); - trace_.Add(" \"stack:%d\"", op->index()); - } - } - int parent_index = -1; - if (range->IsChild()) { - parent_index = range->parent()->id(); - } else { - parent_index = range->id(); - } - LOperand* op = range->FirstHint(); - int hint_index = -1; - if (op != NULL && op->IsUnallocated()) { - hint_index = LUnallocated::cast(op)->virtual_register(); - } - trace_.Add(" %d %d", parent_index, hint_index); - UseInterval* cur_interval = range->first_interval(); - while (cur_interval != NULL && range->Covers(cur_interval->start())) { - trace_.Add(" [%d, %d[", - cur_interval->start().Value(), - cur_interval->end().Value()); - cur_interval = cur_interval->next(); - } - - UsePosition* current_pos = range->first_pos(); - while (current_pos != NULL) { - if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) { - trace_.Add(" %d M", current_pos->pos().Value()); - } - current_pos = current_pos->next(); - } - - trace_.Add(" \"\"\n"); - } -} - - -void HTracer::FlushToFile() { - AppendChars(filename_.start(), trace_.ToCString().get(), trace_.length(), - false); - trace_.Reset(); -} - - -void HStatistics::Initialize(CompilationInfo* info) { - if (!info->has_shared_info()) return; - source_size_ += info->shared_info()->SourceSize(); -} - - -void HStatistics::Print() { - PrintF( - "\n" - "----------------------------------------" - "----------------------------------------\n" - "--- Hydrogen timing results:\n" - "----------------------------------------" - "----------------------------------------\n"); - base::TimeDelta sum; - for (int i = 0; i < times_.length(); ++i) { - sum += times_[i]; - } - - for (int i = 0; i < names_.length(); ++i) { - PrintF("%33s", names_[i]); - double ms = times_[i].InMillisecondsF(); - double percent = times_[i].PercentOf(sum); - PrintF(" %8.3f ms / %4.1f %% ", ms, percent); - - size_t size = sizes_[i]; - double size_percent = static_cast(size) * 100 / total_size_; - PrintF(" %9zu bytes / %4.1f %%\n", size, size_percent); - } - - PrintF( - "----------------------------------------" - "----------------------------------------\n"); - base::TimeDelta total = create_graph_ + optimize_graph_ + generate_code_; - PrintF("%33s %8.3f ms / %4.1f %% \n", "Create graph", - create_graph_.InMillisecondsF(), create_graph_.PercentOf(total)); - PrintF("%33s %8.3f ms / %4.1f %% \n", "Optimize graph", - optimize_graph_.InMillisecondsF(), optimize_graph_.PercentOf(total)); - PrintF("%33s %8.3f ms / %4.1f %% \n", "Generate and install code", - generate_code_.InMillisecondsF(), generate_code_.PercentOf(total)); - PrintF( - "----------------------------------------" - "----------------------------------------\n"); - PrintF("%33s %8.3f ms %9zu bytes\n", "Total", - total.InMillisecondsF(), total_size_); - PrintF("%33s (%.1f times slower than full code gen)\n", "", - total.TimesOf(full_code_gen_)); - - double source_size_in_kb = static_cast(source_size_) / 1024; - double normalized_time = source_size_in_kb > 0 - ? total.InMillisecondsF() / source_size_in_kb - : 0; - double normalized_size_in_kb = - source_size_in_kb > 0 - ? static_cast(total_size_) / 1024 / source_size_in_kb - : 0; - PrintF("%33s %8.3f ms %7.3f kB allocated\n", - "Average per kB source", normalized_time, normalized_size_in_kb); -} - - -void HStatistics::SaveTiming(const char* name, base::TimeDelta time, - size_t size) { - total_size_ += size; - for (int i = 0; i < names_.length(); ++i) { - if (strcmp(names_[i], name) == 0) { - times_[i] += time; - sizes_[i] += size; - return; - } - } - names_.Add(name); - times_.Add(time); - sizes_.Add(size); -} - - -HPhase::~HPhase() { - if (ShouldProduceTraceOutput()) { - isolate()->GetHTracer()->TraceHydrogen(name(), graph_); - } - -#ifdef DEBUG - graph_->Verify(false); // No full verify. -#endif -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h deleted file mode 100644 index 3f0c66d0ee..0000000000 --- a/src/crankshaft/hydrogen.h +++ /dev/null @@ -1,1864 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_HYDROGEN_H_ -#define V8_CRANKSHAFT_HYDROGEN_H_ - -#include "src/accessors.h" -#include "src/allocation.h" -#include "src/ast/ast-type-bounds.h" -#include "src/ast/scopes.h" -#include "src/bailout-reason.h" -#include "src/compilation-info.h" -#include "src/compiler.h" -#include "src/counters.h" -#include "src/crankshaft/compilation-phase.h" -#include "src/crankshaft/hydrogen-instructions.h" -#include "src/globals.h" -#include "src/parsing/parse-info.h" -#include "src/string-stream.h" -#include "src/transitions.h" -#include "src/zone/zone.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class BitVector; -class HEnvironment; -class HGraph; -class HLoopInformation; -class HTracer; -class LAllocator; -class LChunk; -class LiveRange; - -class HBasicBlock final : public ZoneObject { - public: - explicit HBasicBlock(HGraph* graph); - ~HBasicBlock() { } - - // Simple accessors. - int block_id() const { return block_id_; } - void set_block_id(int id) { block_id_ = id; } - HGraph* graph() const { return graph_; } - Isolate* isolate() const; - const ZoneList* phis() const { return &phis_; } - HInstruction* first() const { return first_; } - HInstruction* last() const { return last_; } - void set_last(HInstruction* instr) { last_ = instr; } - HControlInstruction* end() const { return end_; } - HLoopInformation* loop_information() const { return loop_information_; } - HLoopInformation* current_loop() const { - return IsLoopHeader() ? loop_information() - : (parent_loop_header() != NULL - ? parent_loop_header()->loop_information() : NULL); - } - const ZoneList* predecessors() const { return &predecessors_; } - bool HasPredecessor() const { return predecessors_.length() > 0; } - const ZoneList* dominated_blocks() const { - return &dominated_blocks_; - } - const ZoneList* deleted_phis() const { - return &deleted_phis_; - } - void RecordDeletedPhi(int merge_index) { - deleted_phis_.Add(merge_index, zone()); - } - HBasicBlock* dominator() const { return dominator_; } - HEnvironment* last_environment() const { return last_environment_; } - int argument_count() const { return argument_count_; } - void set_argument_count(int count) { argument_count_ = count; } - int first_instruction_index() const { return first_instruction_index_; } - void set_first_instruction_index(int index) { - first_instruction_index_ = index; - } - int last_instruction_index() const { return last_instruction_index_; } - void set_last_instruction_index(int index) { - last_instruction_index_ = index; - } - bool is_osr_entry() { return is_osr_entry_; } - void set_osr_entry() { is_osr_entry_ = true; } - - void AttachLoopInformation(); - void DetachLoopInformation(); - bool IsLoopHeader() const { return loop_information() != NULL; } - bool IsStartBlock() const { return block_id() == 0; } - - bool IsFinished() const { return end_ != NULL; } - void AddPhi(HPhi* phi); - void RemovePhi(HPhi* phi); - void AddInstruction(HInstruction* instr, SourcePosition position); - bool Dominates(HBasicBlock* other) const; - bool EqualToOrDominates(HBasicBlock* other) const; - int LoopNestingDepth() const; - - void SetInitialEnvironment(HEnvironment* env); - void ClearEnvironment() { - DCHECK(IsFinished()); - DCHECK(end()->SuccessorCount() == 0); - last_environment_ = NULL; - } - bool HasEnvironment() const { return last_environment_ != NULL; } - void UpdateEnvironment(HEnvironment* env); - HBasicBlock* parent_loop_header() const { return parent_loop_header_; } - - void set_parent_loop_header(HBasicBlock* block) { - DCHECK(parent_loop_header_ == NULL); - parent_loop_header_ = block; - } - - bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; } - - void SetJoinId(BailoutId ast_id); - - int PredecessorIndexOf(HBasicBlock* predecessor) const; - HPhi* AddNewPhi(int merged_index); - HSimulate* AddNewSimulate(BailoutId ast_id, SourcePosition position, - RemovableSimulate removable = FIXED_SIMULATE) { - HSimulate* instr = CreateSimulate(ast_id, removable); - AddInstruction(instr, position); - return instr; - } - void AssignCommonDominator(HBasicBlock* other); - void AssignLoopSuccessorDominators(); - - bool IsDeoptimizing() const { - return end() != NULL && end()->IsDeoptimize(); - } - - void MarkUnreachable(); - bool IsUnreachable() const { return !is_reachable_; } - bool IsReachable() const { return is_reachable_; } - - bool IsLoopSuccessorDominator() const { - return dominates_loop_successors_; - } - void MarkAsLoopSuccessorDominator() { - dominates_loop_successors_ = true; - } - - bool IsOrdered() const { return is_ordered_; } - void MarkAsOrdered() { is_ordered_ = true; } - - void MarkSuccEdgeUnreachable(int succ); - - inline Zone* zone() const; - -#ifdef DEBUG - void Verify(); -#endif - - protected: - friend class HGraphBuilder; - - HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable); - void Finish(HControlInstruction* last, SourcePosition position); - void FinishExit(HControlInstruction* instruction, SourcePosition position); - void Goto(HBasicBlock* block, SourcePosition position, - bool add_simulate = true); - void GotoNoSimulate(HBasicBlock* block, SourcePosition position) { - Goto(block, position, false); - } - - private: - void RegisterPredecessor(HBasicBlock* pred); - void AddDominatedBlock(HBasicBlock* block); - - int block_id_; - HGraph* graph_; - ZoneList phis_; - HInstruction* first_; - HInstruction* last_; - HControlInstruction* end_; - HLoopInformation* loop_information_; - ZoneList predecessors_; - HBasicBlock* dominator_; - ZoneList dominated_blocks_; - HEnvironment* last_environment_; - // Outgoing parameter count at block exit, set during lithium translation. - int argument_count_; - // Instruction indices into the lithium code stream. - int first_instruction_index_; - int last_instruction_index_; - ZoneList deleted_phis_; - HBasicBlock* parent_loop_header_; - bool is_reachable_ : 1; - bool dominates_loop_successors_ : 1; - bool is_osr_entry_ : 1; - bool is_ordered_ : 1; -}; - - -std::ostream& operator<<(std::ostream& os, const HBasicBlock& b); - - -class HPredecessorIterator final BASE_EMBEDDED { - public: - explicit HPredecessorIterator(HBasicBlock* block) - : predecessor_list_(block->predecessors()), current_(0) { } - - bool Done() { return current_ >= predecessor_list_->length(); } - HBasicBlock* Current() { return predecessor_list_->at(current_); } - void Advance() { current_++; } - - private: - const ZoneList* predecessor_list_; - int current_; -}; - - -class HInstructionIterator final BASE_EMBEDDED { - public: - explicit HInstructionIterator(HBasicBlock* block) - : instr_(block->first()) { - next_ = Done() ? NULL : instr_->next(); - } - - inline bool Done() const { return instr_ == NULL; } - inline HInstruction* Current() { return instr_; } - inline void Advance() { - instr_ = next_; - next_ = Done() ? NULL : instr_->next(); - } - - private: - HInstruction* instr_; - HInstruction* next_; -}; - - -class HLoopInformation final : public ZoneObject { - public: - HLoopInformation(HBasicBlock* loop_header, Zone* zone) - : back_edges_(4, zone), - loop_header_(loop_header), - blocks_(8, zone), - stack_check_(NULL) { - blocks_.Add(loop_header, zone); - } - ~HLoopInformation() {} - - const ZoneList* back_edges() const { return &back_edges_; } - const ZoneList* blocks() const { return &blocks_; } - HBasicBlock* loop_header() const { return loop_header_; } - HBasicBlock* GetLastBackEdge() const; - void RegisterBackEdge(HBasicBlock* block); - - HStackCheck* stack_check() const { return stack_check_; } - void set_stack_check(HStackCheck* stack_check) { - stack_check_ = stack_check; - } - - bool IsNestedInThisLoop(HLoopInformation* other) { - while (other != NULL) { - if (other == this) { - return true; - } - other = other->parent_loop(); - } - return false; - } - HLoopInformation* parent_loop() { - HBasicBlock* parent_header = loop_header()->parent_loop_header(); - return parent_header != NULL ? parent_header->loop_information() : NULL; - } - - private: - void AddBlock(HBasicBlock* block); - - ZoneList back_edges_; - HBasicBlock* loop_header_; - ZoneList blocks_; - HStackCheck* stack_check_; -}; - -class HGraph final : public ZoneObject { - public: - explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor); - - Isolate* isolate() const { return isolate_; } - Zone* zone() const { return zone_; } - CompilationInfo* info() const { return info_; } - CallInterfaceDescriptor descriptor() const { return descriptor_; } - - const ZoneList* blocks() const { return &blocks_; } - const ZoneList* phi_list() const { return phi_list_; } - HBasicBlock* entry_block() const { return entry_block_; } - HEnvironment* start_environment() const { return start_environment_; } - - void FinalizeUniqueness(); - void OrderBlocks(); - void AssignDominators(); - void RestoreActualValues(); - - // Returns false if there are phi-uses of the arguments-object - // which are not supported by the optimizing compiler. - bool CheckArgumentsPhiUses(); - - // Returns false if there are phi-uses of an uninitialized const - // which are not supported by the optimizing compiler. - bool CheckConstPhiUses(); - - void CollectPhis(); - - HConstant* GetConstantUndefined(); - HConstant* GetConstant0(); - HConstant* GetConstant1(); - HConstant* GetConstantMinus1(); - HConstant* GetConstantTrue(); - HConstant* GetConstantFalse(); - HConstant* GetConstantBool(bool value); - HConstant* GetConstantHole(); - HConstant* GetConstantNull(); - HConstant* GetConstantOptimizedOut(); - HConstant* GetInvalidContext(); - - bool IsConstantUndefined(HConstant* constant); - bool IsConstant0(HConstant* constant); - bool IsConstant1(HConstant* constant); - bool IsConstantMinus1(HConstant* constant); - bool IsConstantTrue(HConstant* constant); - bool IsConstantFalse(HConstant* constant); - bool IsConstantHole(HConstant* constant); - bool IsConstantNull(HConstant* constant); - bool IsStandardConstant(HConstant* constant); - - HBasicBlock* CreateBasicBlock(); - - int GetMaximumValueID() const { return values_.length(); } - int GetNextBlockID() { return next_block_id_++; } - int GetNextValueID(HValue* value) { - DCHECK(!disallow_adding_new_values_); - values_.Add(value, zone()); - return values_.length() - 1; - } - HValue* LookupValue(int id) const { - if (id >= 0 && id < values_.length()) return values_[id]; - return NULL; - } - void DisallowAddingNewValues() { - disallow_adding_new_values_ = true; - } - - bool Optimize(BailoutReason* bailout_reason); - -#ifdef DEBUG - void Verify(bool do_full_verify) const; -#endif - - int update_type_change_checksum(int delta) { - type_change_checksum_ += delta; - return type_change_checksum_; - } - - void update_maximum_environment_size(int environment_size) { - if (environment_size > maximum_environment_size_) { - maximum_environment_size_ = environment_size; - } - } - int maximum_environment_size() { return maximum_environment_size_; } - - bool allow_code_motion() const { return allow_code_motion_; } - void set_allow_code_motion(bool value) { allow_code_motion_ = value; } - - bool use_optimistic_licm() const { return use_optimistic_licm_; } - void set_use_optimistic_licm(bool value) { use_optimistic_licm_ = value; } - - void MarkDependsOnEmptyArrayProtoElements() { - // Add map dependency if not already added. - if (depends_on_empty_array_proto_elements_) return; - info()->dependencies()->AssumePropertyCell( - isolate()->factory()->array_protector()); - depends_on_empty_array_proto_elements_ = true; - } - - bool depends_on_empty_array_proto_elements() { - return depends_on_empty_array_proto_elements_; - } - - void MarkDependsOnStringLengthOverflow() { - if (depends_on_string_length_overflow_) return; - info()->dependencies()->AssumePropertyCell( - isolate()->factory()->string_length_protector()); - depends_on_string_length_overflow_ = true; - } - - bool has_uint32_instructions() { - DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); - return uint32_instructions_ != NULL; - } - - ZoneList* uint32_instructions() { - DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); - return uint32_instructions_; - } - - void RecordUint32Instruction(HInstruction* instr) { - DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); - if (uint32_instructions_ == NULL) { - uint32_instructions_ = new(zone()) ZoneList(4, zone()); - } - uint32_instructions_->Add(instr, zone()); - } - - void IncrementInNoSideEffectsScope() { no_side_effects_scope_count_++; } - void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; } - bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; } - - private: - HConstant* ReinsertConstantIfNecessary(HConstant* constant); - HConstant* GetConstant(SetOncePointer* pointer, - int32_t integer_value); - - template - void Run() { - Phase phase(this); - phase.Run(); - } - - Isolate* isolate_; - int next_block_id_; - HBasicBlock* entry_block_; - HEnvironment* start_environment_; - ZoneList blocks_; - ZoneList values_; - ZoneList* phi_list_; - ZoneList* uint32_instructions_; - SetOncePointer constant_undefined_; - SetOncePointer constant_0_; - SetOncePointer constant_1_; - SetOncePointer constant_minus1_; - SetOncePointer constant_true_; - SetOncePointer constant_false_; - SetOncePointer constant_the_hole_; - SetOncePointer constant_null_; - SetOncePointer constant_optimized_out_; - SetOncePointer constant_invalid_context_; - - CompilationInfo* info_; - CallInterfaceDescriptor descriptor_; - Zone* zone_; - - bool allow_code_motion_; - bool use_optimistic_licm_; - bool depends_on_empty_array_proto_elements_; - bool depends_on_string_length_overflow_; - int type_change_checksum_; - int maximum_environment_size_; - int no_side_effects_scope_count_; - bool disallow_adding_new_values_; - - DISALLOW_COPY_AND_ASSIGN(HGraph); -}; - - -Zone* HBasicBlock::zone() const { return graph_->zone(); } - - -// Type of stack frame an environment might refer to. -enum FrameType { - JS_FUNCTION, - JS_CONSTRUCT, - JS_GETTER, - JS_SETTER, - ARGUMENTS_ADAPTOR, - TAIL_CALLER_FUNCTION, - STUB -}; - -class HEnvironment final : public ZoneObject { - public: - HEnvironment(HEnvironment* outer, - Scope* scope, - Handle closure, - Zone* zone); - - HEnvironment(Zone* zone, int parameter_count); - - HEnvironment* arguments_environment() { - return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this; - } - - // Simple accessors. - Handle closure() const { return closure_; } - const ZoneList* values() const { return &values_; } - const GrowableBitVector* assigned_variables() const { - return &assigned_variables_; - } - FrameType frame_type() const { return frame_type_; } - int parameter_count() const { return parameter_count_; } - int specials_count() const { return specials_count_; } - int local_count() const { return local_count_; } - HEnvironment* outer() const { return outer_; } - int pop_count() const { return pop_count_; } - int push_count() const { return push_count_; } - - BailoutId ast_id() const { return ast_id_; } - void set_ast_id(BailoutId id) { ast_id_ = id; } - - HEnterInlined* entry() const { return entry_; } - void set_entry(HEnterInlined* entry) { entry_ = entry; } - - int length() const { return values_.length(); } - - int first_expression_index() const { - return parameter_count() + specials_count() + local_count(); - } - - int first_local_index() const { - return parameter_count() + specials_count(); - } - - void Bind(Variable* variable, HValue* value) { - Bind(IndexFor(variable), value); - } - - void Bind(int index, HValue* value); - - void BindContext(HValue* value) { - Bind(parameter_count(), value); - } - - HValue* Lookup(Variable* variable) const { - return Lookup(IndexFor(variable)); - } - - HValue* Lookup(int index) const { - HValue* result = values_[index]; - DCHECK(result != NULL); - return result; - } - - HValue* context() const { - // Return first special. - return Lookup(parameter_count()); - } - - void Push(HValue* value) { - DCHECK(value != NULL); - ++push_count_; - values_.Add(value, zone()); - } - - HValue* Pop() { - DCHECK(!ExpressionStackIsEmpty()); - if (push_count_ > 0) { - --push_count_; - } else { - ++pop_count_; - } - return values_.RemoveLast(); - } - - void Drop(int count); - - HValue* Top() const { return ExpressionStackAt(0); } - - bool ExpressionStackIsEmpty() const; - - HValue* ExpressionStackAt(int index_from_top) const { - int index = length() - index_from_top - 1; - DCHECK(HasExpressionAt(index)); - return values_[index]; - } - - void SetExpressionStackAt(int index_from_top, HValue* value); - HValue* RemoveExpressionStackAt(int index_from_top); - - void Print() const; - - HEnvironment* Copy() const; - HEnvironment* CopyWithoutHistory() const; - HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const; - - // Create an "inlined version" of this environment, where the original - // environment is the outer environment but the top expression stack - // elements are moved to an inner environment as parameters. - HEnvironment* CopyForInlining(Handle target, int arguments, - FunctionLiteral* function, HConstant* undefined, - InliningKind inlining_kind, - TailCallMode syntactic_tail_call_mode) const; - - HEnvironment* DiscardInlined(bool drop_extra) { - HEnvironment* outer = outer_; - while (outer->frame_type() != JS_FUNCTION && - outer->frame_type() != TAIL_CALLER_FUNCTION) { - outer = outer->outer_; - } - if (drop_extra) outer->Drop(1); - if (outer->frame_type() == TAIL_CALLER_FUNCTION) { - outer->ClearTailCallerMark(); - } - return outer; - } - - void AddIncomingEdge(HBasicBlock* block, HEnvironment* other); - - void ClearHistory() { - pop_count_ = 0; - push_count_ = 0; - assigned_variables_.Clear(); - } - - void SetValueAt(int index, HValue* value) { - DCHECK(index < length()); - values_[index] = value; - } - - // Map a variable to an environment index. Parameter indices are shifted - // by 1 (receiver is parameter index -1 but environment index 0). - // Stack-allocated local indices are shifted by the number of parameters. - int IndexFor(Variable* variable) const { - DCHECK(variable->IsStackAllocated()); - int shift = variable->IsParameter() - ? 1 - : parameter_count_ + specials_count_; - return variable->index() + shift; - } - - bool is_local_index(int i) const { - return i >= first_local_index() && i < first_expression_index(); - } - - bool is_parameter_index(int i) const { - return i >= 0 && i < parameter_count(); - } - - bool is_special_index(int i) const { - return i >= parameter_count() && i < parameter_count() + specials_count(); - } - - Zone* zone() const { return zone_; } - - private: - HEnvironment(const HEnvironment* other, Zone* zone); - - HEnvironment(HEnvironment* outer, - Handle closure, - FrameType frame_type, - int arguments, - Zone* zone); - - // Create an artificial stub environment (e.g. for argument adaptor or - // constructor stub). - HEnvironment* CreateStubEnvironment(HEnvironment* outer, - Handle target, - FrameType frame_type, - int arguments) const; - - // Marks current environment as tail caller by setting frame type to - // TAIL_CALLER_FUNCTION. - void MarkAsTailCaller(); - void ClearTailCallerMark(); - - // True if index is included in the expression stack part of the environment. - bool HasExpressionAt(int index) const; - - void Initialize(int parameter_count, int local_count, int stack_height); - void Initialize(const HEnvironment* other); - - Handle closure_; - // Value array [parameters] [specials] [locals] [temporaries]. - ZoneList values_; - GrowableBitVector assigned_variables_; - FrameType frame_type_; - int parameter_count_; - int specials_count_; - int local_count_; - HEnvironment* outer_; - HEnterInlined* entry_; - int pop_count_; - int push_count_; - BailoutId ast_id_; - Zone* zone_; -}; - - -std::ostream& operator<<(std::ostream& os, const HEnvironment& env); - - -enum ArgumentsAllowedFlag { - ARGUMENTS_NOT_ALLOWED, - ARGUMENTS_ALLOWED, - ARGUMENTS_FAKED -}; - - -class HIfContinuation; - - -class HIfContinuation final { - public: - HIfContinuation() - : continuation_captured_(false), - true_branch_(NULL), - false_branch_(NULL) {} - HIfContinuation(HBasicBlock* true_branch, - HBasicBlock* false_branch) - : continuation_captured_(true), true_branch_(true_branch), - false_branch_(false_branch) {} - ~HIfContinuation() { DCHECK(!continuation_captured_); } - - void Capture(HBasicBlock* true_branch, - HBasicBlock* false_branch) { - DCHECK(!continuation_captured_); - true_branch_ = true_branch; - false_branch_ = false_branch; - continuation_captured_ = true; - } - - void Continue(HBasicBlock** true_branch, - HBasicBlock** false_branch) { - DCHECK(continuation_captured_); - *true_branch = true_branch_; - *false_branch = false_branch_; - continuation_captured_ = false; - } - - bool IsTrueReachable() { return true_branch_ != NULL; } - bool IsFalseReachable() { return false_branch_ != NULL; } - bool TrueAndFalseReachable() { - return IsTrueReachable() || IsFalseReachable(); - } - - HBasicBlock* true_branch() const { return true_branch_; } - HBasicBlock* false_branch() const { return false_branch_; } - - private: - bool continuation_captured_; - HBasicBlock* true_branch_; - HBasicBlock* false_branch_; -}; - - -class HAllocationMode final BASE_EMBEDDED { - public: - explicit HAllocationMode(Handle feedback_site) - : current_site_(NULL), feedback_site_(feedback_site), - pretenure_flag_(NOT_TENURED) {} - explicit HAllocationMode(HValue* current_site) - : current_site_(current_site), pretenure_flag_(NOT_TENURED) {} - explicit HAllocationMode(PretenureFlag pretenure_flag) - : current_site_(NULL), pretenure_flag_(pretenure_flag) {} - HAllocationMode() - : current_site_(NULL), pretenure_flag_(NOT_TENURED) {} - - HValue* current_site() const { return current_site_; } - Handle feedback_site() const { return feedback_site_; } - - bool CreateAllocationMementos() const WARN_UNUSED_RESULT { - return current_site() != NULL; - } - - PretenureFlag GetPretenureMode() const WARN_UNUSED_RESULT { - if (!feedback_site().is_null()) return feedback_site()->GetPretenureMode(); - return pretenure_flag_; - } - - private: - HValue* current_site_; - Handle feedback_site_; - PretenureFlag pretenure_flag_; -}; - - -class HGraphBuilder { - public: - explicit HGraphBuilder(CompilationInfo* info, - CallInterfaceDescriptor descriptor, - bool track_positions) - : info_(info), - descriptor_(descriptor), - graph_(NULL), - current_block_(NULL), - scope_(info->scope()), - position_(SourcePosition::Unknown()), - track_positions_(track_positions) {} - virtual ~HGraphBuilder() {} - - Scope* scope() const { return scope_; } - void set_scope(Scope* scope) { scope_ = scope; } - - HBasicBlock* current_block() const { return current_block_; } - void set_current_block(HBasicBlock* block) { current_block_ = block; } - HEnvironment* environment() const { - return current_block()->last_environment(); - } - Zone* zone() const { return info_->zone(); } - HGraph* graph() const { return graph_; } - Isolate* isolate() const { return graph_->isolate(); } - CompilationInfo* top_info() { return info_; } - - HGraph* CreateGraph(); - - // Bailout environment manipulation. - void Push(HValue* value) { environment()->Push(value); } - HValue* Pop() { return environment()->Pop(); } - - virtual HValue* context() = 0; - - // Adding instructions. - HInstruction* AddInstruction(HInstruction* instr); - void FinishCurrentBlock(HControlInstruction* last); - void FinishExitCurrentBlock(HControlInstruction* instruction); - - void Goto(HBasicBlock* from, - HBasicBlock* target, - bool add_simulate = true) { - from->Goto(target, source_position(), add_simulate); - } - void Goto(HBasicBlock* target, - bool add_simulate = true) { - Goto(current_block(), target, add_simulate); - } - void GotoNoSimulate(HBasicBlock* from, HBasicBlock* target) { - Goto(from, target, false); - } - void GotoNoSimulate(HBasicBlock* target) { Goto(target, false); } - - template - HInstruction* NewUncasted() { - return I::New(isolate(), zone(), context()); - } - - template - I* New() { - return I::New(isolate(), zone(), context()); - } - - template - HInstruction* AddUncasted() { return AddInstruction(NewUncasted());} - - template - I* Add() { return AddInstructionTyped(New());} - - template - HInstruction* NewUncasted(P1 p1) { - return I::New(isolate(), zone(), context(), p1); - } - - template - I* New(P1 p1) { - return I::New(isolate(), zone(), context(), p1); - } - - template - HInstruction* AddUncasted(P1 p1) { - HInstruction* result = AddInstruction(NewUncasted(p1)); - // Specializations must have their parameters properly casted - // to avoid landing here. - DCHECK(!result->IsReturn() && !result->IsSimulate() && - !result->IsDeoptimize()); - return result; - } - - template - I* Add(P1 p1) { - I* result = AddInstructionTyped(New(p1)); - // Specializations must have their parameters properly casted - // to avoid landing here. - DCHECK(!result->IsReturn() && !result->IsSimulate() && - !result->IsDeoptimize()); - return result; - } - - template - HInstruction* NewUncasted(P1 p1, P2 p2) { - return I::New(isolate(), zone(), context(), p1, p2); - } - - template - I* New(P1 p1, P2 p2) { - return I::New(isolate(), zone(), context(), p1, p2); - } - - template - HInstruction* AddUncasted(P1 p1, P2 p2) { - HInstruction* result = AddInstruction(NewUncasted(p1, p2)); - // Specializations must have their parameters properly casted - // to avoid landing here. - DCHECK(!result->IsSimulate()); - return result; - } - - template - I* Add(P1 p1, P2 p2) { - I* result = AddInstructionTyped(New(p1, p2)); - // Specializations must have their parameters properly casted - // to avoid landing here. - DCHECK(!result->IsSimulate()); - return result; - } - - template - HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3) { - return I::New(isolate(), zone(), context(), p1, p2, p3); - } - - template - I* New(P1 p1, P2 p2, P3 p3) { - return I::New(isolate(), zone(), context(), p1, p2, p3); - } - - template - HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3) { - return AddInstruction(NewUncasted(p1, p2, p3)); - } - - template - I* Add(P1 p1, P2 p2, P3 p3) { - return AddInstructionTyped(New(p1, p2, p3)); - } - - template - HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4); - } - - template - I* New(P1 p1, P2 p2, P3 p3, P4 p4) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4); - } - - template - HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4) { - return AddInstruction(NewUncasted(p1, p2, p3, p4)); - } - - template - I* Add(P1 p1, P2 p2, P3 p3, P4 p4) { - return AddInstructionTyped(New(p1, p2, p3, p4)); - } - - template - HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5); - } - - template - I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5); - } - - template - HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) { - return AddInstruction(NewUncasted(p1, p2, p3, p4, p5)); - } - - template - I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) { - return AddInstructionTyped(New(p1, p2, p3, p4, p5)); - } - - template - HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6); - } - - template - I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6); - } - - template - HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) { - return AddInstruction(NewUncasted(p1, p2, p3, p4, p5, p6)); - } - - template - I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) { - return AddInstructionTyped(New(p1, p2, p3, p4, p5, p6)); - } - - template - HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7); - } - - template - I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7); - } - - template - HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { - return AddInstruction(NewUncasted(p1, p2, p3, p4, p5, p6, p7)); - } - - template - I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { - return AddInstructionTyped(New(p1, p2, p3, p4, p5, p6, p7)); - } - - template - HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, - P5 p5, P6 p6, P7 p7, P8 p8) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8); - } - - template - I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8); - } - - template - HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, - P5 p5, P6 p6, P7 p7, P8 p8) { - return AddInstruction(NewUncasted(p1, p2, p3, p4, p5, p6, p7, p8)); - } - - template - I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) { - return AddInstructionTyped(New(p1, p2, p3, p4, p5, p6, p7, p8)); - } - - template - I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) { - return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8, - p9); - } - - template - HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, - P8 p8, P9 p9) { - return AddInstruction(NewUncasted(p1, p2, p3, p4, p5, p6, p7, p8, p9)); - } - - template - I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) { - return AddInstructionTyped(New(p1, p2, p3, p4, p5, p6, p7, p8, p9)); - } - - void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE); - - // When initializing arrays, we'll unfold the loop if the number of elements - // is known at compile time and is <= kElementLoopUnrollThreshold. - static const int kElementLoopUnrollThreshold = 8; - - protected: - virtual bool BuildGraph() = 0; - - HBasicBlock* CreateBasicBlock(HEnvironment* env); - HBasicBlock* CreateLoopHeaderBlock(); - - template - HValue* BuildDecodeField(HValue* encoded_field) { - HValue* mask_value = Add(static_cast(BitFieldClass::kMask)); - HValue* masked_field = - AddUncasted(Token::BIT_AND, encoded_field, mask_value); - return AddUncasted(masked_field, - Add(static_cast(BitFieldClass::kShift))); - } - - HValue* BuildGetElementsKind(HValue* object); - - HValue* BuildEnumLength(HValue* map); - - HValue* BuildCheckHeapObject(HValue* object); - HValue* BuildCheckString(HValue* string); - HValue* BuildWrapReceiver(HValue* object, HValue* function); - - // Building common constructs - HValue* BuildCheckForCapacityGrow(HValue* object, - HValue* elements, - ElementsKind kind, - HValue* length, - HValue* key, - bool is_js_array, - PropertyAccessType access_type); - - HValue* BuildCheckAndGrowElementsCapacity(HValue* object, HValue* elements, - ElementsKind kind, HValue* length, - HValue* capacity, HValue* key); - - HValue* BuildCopyElementsOnWrite(HValue* object, - HValue* elements, - ElementsKind kind, - HValue* length); - - HValue* BuildNumberToString(HValue* object, AstType* type); - HValue* BuildToNumber(HValue* input); - HValue* BuildToObject(HValue* receiver); - - // Allocates a new object according with the given allocation properties. - HAllocate* BuildAllocate(HValue* object_size, - HType type, - InstanceType instance_type, - HAllocationMode allocation_mode); - // Computes the sum of two string lengths, taking care of overflow handling. - HValue* BuildAddStringLengths(HValue* left_length, HValue* right_length); - // Creates a cons string using the two input strings. - HValue* BuildCreateConsString(HValue* length, - HValue* left, - HValue* right, - HAllocationMode allocation_mode); - // Copies characters from one sequential string to another. - void BuildCopySeqStringChars(HValue* src, - HValue* src_offset, - String::Encoding src_encoding, - HValue* dst, - HValue* dst_offset, - String::Encoding dst_encoding, - HValue* length); - - // Align an object size to object alignment boundary - HValue* BuildObjectSizeAlignment(HValue* unaligned_size, int header_size); - - // Both operands are non-empty strings. - HValue* BuildUncheckedStringAdd(HValue* left, - HValue* right, - HAllocationMode allocation_mode); - // Add two strings using allocation mode, validating type feedback. - HValue* BuildStringAdd(HValue* left, - HValue* right, - HAllocationMode allocation_mode); - - HInstruction* BuildUncheckedMonomorphicElementAccess( - HValue* checked_object, - HValue* key, - HValue* val, - bool is_js_array, - ElementsKind elements_kind, - PropertyAccessType access_type, - LoadKeyedHoleMode load_mode, - KeyedAccessStoreMode store_mode); - - HInstruction* AddElementAccess( - HValue* elements, HValue* checked_key, HValue* val, HValue* dependency, - HValue* backing_store_owner, ElementsKind elements_kind, - PropertyAccessType access_type, - LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE); - - HInstruction* AddLoadStringInstanceType(HValue* string); - HInstruction* AddLoadStringLength(HValue* string); - HInstruction* BuildLoadStringLength(HValue* string); - HStoreNamedField* AddStoreMapConstant(HValue* object, Handle map) { - return Add(object, HObjectAccess::ForMap(), - Add(map)); - } - HLoadNamedField* AddLoadMap(HValue* object, - HValue* dependency = NULL); - HLoadNamedField* AddLoadElements(HValue* object, - HValue* dependency = NULL); - - bool MatchRotateRight(HValue* left, - HValue* right, - HValue** operand, - HValue** shift_amount); - - HValue* BuildBinaryOperation(Token::Value op, HValue* left, HValue* right, - AstType* left_type, AstType* right_type, - AstType* result_type, Maybe fixed_right_arg, - HAllocationMode allocation_mode, - BailoutId opt_id = BailoutId::None()); - - HLoadNamedField* AddLoadFixedArrayLength(HValue *object, - HValue *dependency = NULL); - - HLoadNamedField* AddLoadArrayLength(HValue *object, - ElementsKind kind, - HValue *dependency = NULL); - - HValue* EnforceNumberType(HValue* number, AstType* expected); - HValue* TruncateToNumber(HValue* value, AstType** expected); - - void FinishExitWithHardDeoptimization(DeoptimizeReason reason); - - void AddIncrementCounter(StatsCounter* counter); - - class IfBuilder final { - public: - // If using this constructor, Initialize() must be called explicitly! - IfBuilder(); - - explicit IfBuilder(HGraphBuilder* builder); - IfBuilder(HGraphBuilder* builder, - HIfContinuation* continuation); - - ~IfBuilder() { - if (!finished_) End(); - } - - void Initialize(HGraphBuilder* builder); - - template - Condition* If(HValue *p) { - Condition* compare = builder()->New(p); - AddCompare(compare); - return compare; - } - - template - Condition* If(HValue* p1, P2 p2) { - Condition* compare = builder()->New(p1, p2); - AddCompare(compare); - return compare; - } - - template - Condition* If(HValue* p1, P2 p2, P3 p3) { - Condition* compare = builder()->New(p1, p2, p3); - AddCompare(compare); - return compare; - } - - template - Condition* IfNot(HValue* p) { - Condition* compare = If(p); - compare->Not(); - return compare; - } - - template - Condition* IfNot(HValue* p1, P2 p2) { - Condition* compare = If(p1, p2); - compare->Not(); - return compare; - } - - template - Condition* IfNot(HValue* p1, P2 p2, P3 p3) { - Condition* compare = If(p1, p2, p3); - compare->Not(); - return compare; - } - - template - Condition* OrIf(HValue *p) { - Or(); - return If(p); - } - - template - Condition* OrIf(HValue* p1, P2 p2) { - Or(); - return If(p1, p2); - } - - template - Condition* OrIf(HValue* p1, P2 p2, P3 p3) { - Or(); - return If(p1, p2, p3); - } - - template - Condition* AndIf(HValue *p) { - And(); - return If(p); - } - - template - Condition* AndIf(HValue* p1, P2 p2) { - And(); - return If(p1, p2); - } - - template - Condition* AndIf(HValue* p1, P2 p2, P3 p3) { - And(); - return If(p1, p2, p3); - } - - void Or(); - void And(); - - // Captures the current state of this IfBuilder in the specified - // continuation and ends this IfBuilder. - void CaptureContinuation(HIfContinuation* continuation); - - // Joins the specified continuation from this IfBuilder and ends this - // IfBuilder. This appends a Goto instruction from the true branch of - // this IfBuilder to the true branch of the continuation unless the - // true branch of this IfBuilder is already finished. And vice versa - // for the false branch. - // - // The basic idea is as follows: You have several nested IfBuilder's - // that you want to join based on two possible outcomes (i.e. success - // and failure, or whatever). You can do this easily using this method - // now, for example: - // - // HIfContinuation cont(graph()->CreateBasicBlock(), - // graph()->CreateBasicBlock()); - // ... - // IfBuilder if_whatever(this); - // if_whatever.If(arg); - // if_whatever.Then(); - // ... - // if_whatever.Else(); - // ... - // if_whatever.JoinContinuation(&cont); - // ... - // IfBuilder if_something(this); - // if_something.If(arg1, arg2); - // if_something.Then(); - // ... - // if_something.Else(); - // ... - // if_something.JoinContinuation(&cont); - // ... - // IfBuilder if_finally(this, &cont); - // if_finally.Then(); - // // continues after then code of if_whatever or if_something. - // ... - // if_finally.Else(); - // // continues after else code of if_whatever or if_something. - // ... - // if_finally.End(); - void JoinContinuation(HIfContinuation* continuation); - - void Then(); - void Else(); - void End(); - void EndUnreachable(); - - void Deopt(DeoptimizeReason reason); - void ThenDeopt(DeoptimizeReason reason) { - Then(); - Deopt(reason); - } - void ElseDeopt(DeoptimizeReason reason) { - Else(); - Deopt(reason); - } - - void Return(HValue* value); - - private: - void InitializeDontCreateBlocks(HGraphBuilder* builder); - - HControlInstruction* AddCompare(HControlInstruction* compare); - - HGraphBuilder* builder() const { - DCHECK(builder_ != NULL); // Have you called "Initialize"? - return builder_; - } - - void AddMergeAtJoinBlock(bool deopt); - - void Finish(); - void Finish(HBasicBlock** then_continuation, - HBasicBlock** else_continuation); - - class MergeAtJoinBlock : public ZoneObject { - public: - MergeAtJoinBlock(HBasicBlock* block, - bool deopt, - MergeAtJoinBlock* next) - : block_(block), - deopt_(deopt), - next_(next) {} - HBasicBlock* block_; - bool deopt_; - MergeAtJoinBlock* next_; - }; - - HGraphBuilder* builder_; - bool finished_ : 1; - bool did_then_ : 1; - bool did_else_ : 1; - bool did_else_if_ : 1; - bool did_and_ : 1; - bool did_or_ : 1; - bool captured_ : 1; - bool needs_compare_ : 1; - bool pending_merge_block_ : 1; - HBasicBlock* first_true_block_; - HBasicBlock* first_false_block_; - HBasicBlock* split_edge_merge_block_; - MergeAtJoinBlock* merge_at_join_blocks_; - int normal_merge_at_join_block_count_; - int deopt_merge_at_join_block_count_; - }; - - class LoopBuilder final { - public: - enum Direction { - kPreIncrement, - kPostIncrement, - kPreDecrement, - kPostDecrement, - kWhileTrue - }; - - explicit LoopBuilder(HGraphBuilder* builder); // while (true) {...} - LoopBuilder(HGraphBuilder* builder, - HValue* context, - Direction direction); - LoopBuilder(HGraphBuilder* builder, - HValue* context, - Direction direction, - HValue* increment_amount); - - ~LoopBuilder() { - DCHECK(finished_); - } - - HValue* BeginBody( - HValue* initial, - HValue* terminating, - Token::Value token); - - void BeginBody(int drop_count); - - void Break(); - - void EndBody(); - - private: - void Initialize(HGraphBuilder* builder, HValue* context, - Direction direction, HValue* increment_amount); - Zone* zone() { return builder_->zone(); } - - HGraphBuilder* builder_; - HValue* context_; - HValue* increment_amount_; - HInstruction* increment_; - HPhi* phi_; - HBasicBlock* header_block_; - HBasicBlock* body_block_; - HBasicBlock* exit_block_; - HBasicBlock* exit_trampoline_block_; - Direction direction_; - bool finished_; - }; - - HValue* BuildNewElementsCapacity(HValue* old_capacity); - - HValue* BuildCalculateElementsSize(ElementsKind kind, - HValue* capacity); - HAllocate* AllocateJSArrayObject(AllocationSiteMode mode); - HConstant* EstablishElementsAllocationSize(ElementsKind kind, int capacity); - - HAllocate* BuildAllocateElements(ElementsKind kind, HValue* size_in_bytes); - - void BuildInitializeElementsHeader(HValue* elements, - ElementsKind kind, - HValue* capacity); - - // Build allocation and header initialization code for respective successor - // of FixedArrayBase. - HValue* BuildAllocateAndInitializeArray(ElementsKind kind, HValue* capacity); - - // |array| must have been allocated with enough room for - // 1) the JSArray and 2) an AllocationMemento if mode requires it. - // If the |elements| value provided is NULL then the array elements storage - // is initialized with empty array. - void BuildJSArrayHeader(HValue* array, - HValue* array_map, - HValue* elements, - AllocationSiteMode mode, - ElementsKind elements_kind, - HValue* allocation_site_payload, - HValue* length_field); - - HValue* BuildGrowElementsCapacity(HValue* object, - HValue* elements, - ElementsKind kind, - ElementsKind new_kind, - HValue* length, - HValue* new_capacity); - - void BuildFillElementsWithValue(HValue* elements, - ElementsKind elements_kind, - HValue* from, - HValue* to, - HValue* value); - - void BuildFillElementsWithHole(HValue* elements, - ElementsKind elements_kind, - HValue* from, - HValue* to); - - void BuildCopyProperties(HValue* from_properties, HValue* to_properties, - HValue* length, HValue* capacity); - - void BuildCopyElements(HValue* from_elements, - ElementsKind from_elements_kind, - HValue* to_elements, - ElementsKind to_elements_kind, - HValue* length, - HValue* capacity); - - void BuildCreateAllocationMemento(HValue* previous_object, - HValue* previous_object_size, - HValue* payload); - - HInstruction* BuildConstantMapCheck(Handle constant, - bool ensure_no_elements = false); - HInstruction* BuildCheckPrototypeMaps(Handle prototype, - Handle holder, - bool ensure_no_elements = false); - - HInstruction* BuildGetNativeContext(); - - HValue* BuildArrayBufferViewFieldAccessor(HValue* object, - HValue* checked_object, - FieldIndex index); - - - protected: - void SetSourcePosition(int position) { - if (position != kNoSourcePosition) { - position_.SetScriptOffset(position); - } - // Otherwise position remains unknown. - } - - void EnterInlinedSource(int inlining_id) { - if (is_tracking_positions()) { - position_.SetInliningId(inlining_id); - } - } - - // Convert the given absolute offset from the start of the script to - // the SourcePosition assuming that this position corresponds to the - // same function as position_. - SourcePosition ScriptPositionToSourcePosition(int position) { - if (position == kNoSourcePosition) { - return SourcePosition::Unknown(); - } - return SourcePosition(position, position_.InliningId()); - } - - SourcePosition source_position() { return position_; } - void set_source_position(SourcePosition position) { position_ = position; } - - bool is_tracking_positions() { return track_positions_; } - - HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length); - template - void BuildArrayBufferViewInitialization(HValue* obj, - HValue* buffer, - HValue* byte_offset, - HValue* byte_length); - - private: - HGraphBuilder(); - - template - I* AddInstructionTyped(I* instr) { - return I::cast(AddInstruction(instr)); - } - - CompilationInfo* info_; - CallInterfaceDescriptor descriptor_; - HGraph* graph_; - HBasicBlock* current_block_; - Scope* scope_; - SourcePosition position_; - bool track_positions_; -}; - -template <> -inline HDeoptimize* HGraphBuilder::Add( - DeoptimizeReason reason, Deoptimizer::BailoutType type) { - if (type == Deoptimizer::SOFT) { - isolate()->counters()->soft_deopts_requested()->Increment(); - if (FLAG_always_opt) return NULL; - } - if (current_block()->IsDeoptimizing()) return NULL; - HBasicBlock* after_deopt_block = CreateBasicBlock( - current_block()->last_environment()); - HDeoptimize* instr = New(reason, type, after_deopt_block); - if (type == Deoptimizer::SOFT) { - isolate()->counters()->soft_deopts_inserted()->Increment(); - } - FinishCurrentBlock(instr); - set_current_block(after_deopt_block); - return instr; -} - -template <> -inline HInstruction* HGraphBuilder::AddUncasted( - DeoptimizeReason reason, Deoptimizer::BailoutType type) { - return Add(reason, type); -} - - -template<> -inline HSimulate* HGraphBuilder::Add( - BailoutId id, - RemovableSimulate removable) { - HSimulate* instr = current_block()->CreateSimulate(id, removable); - AddInstruction(instr); - return instr; -} - - -template<> -inline HSimulate* HGraphBuilder::Add( - BailoutId id) { - return Add(id, FIXED_SIMULATE); -} - - -template<> -inline HInstruction* HGraphBuilder::AddUncasted(BailoutId id) { - return Add(id, FIXED_SIMULATE); -} - - -template<> -inline HReturn* HGraphBuilder::Add(HValue* value) { - int num_parameters = graph()->info()->num_parameters(); - HValue* params = AddUncasted(num_parameters); - HReturn* return_instruction = New(value, params); - FinishExitCurrentBlock(return_instruction); - return return_instruction; -} - - -template<> -inline HReturn* HGraphBuilder::Add(HConstant* value) { - return Add(static_cast(value)); -} - -template<> -inline HInstruction* HGraphBuilder::AddUncasted(HValue* value) { - return Add(value); -} - - -template<> -inline HInstruction* HGraphBuilder::AddUncasted(HConstant* value) { - return Add(value); -} - - -template<> -inline HCallRuntime* HGraphBuilder::Add( - const Runtime::Function* c_function, - int argument_count) { - HCallRuntime* instr = New(c_function, argument_count); - if (graph()->info()->IsStub()) { - // When compiling code stubs, we don't want to save all double registers - // upon entry to the stub, but instead have the call runtime instruction - // save the double registers only on-demand (in the fallback case). - instr->set_save_doubles(kSaveFPRegs); - } - AddInstruction(instr); - return instr; -} - - -template<> -inline HInstruction* HGraphBuilder::AddUncasted( - Handle name, - const Runtime::Function* c_function, - int argument_count) { - return Add(c_function, argument_count); -} - - -template <> -inline HParameter* HGraphBuilder::New(unsigned index) { - return HParameter::New(isolate(), zone(), nullptr, index); -} - - -template <> -inline HParameter* HGraphBuilder::New( - unsigned index, HParameter::ParameterKind kind) { - return HParameter::New(isolate(), zone(), nullptr, index, kind); -} - - -template <> -inline HParameter* HGraphBuilder::New( - unsigned index, HParameter::ParameterKind kind, Representation r) { - return HParameter::New(isolate(), zone(), nullptr, index, kind, r); -} - - -template <> -inline HPrologue* HGraphBuilder::New() { - return HPrologue::New(zone()); -} - - -template <> -inline HContext* HGraphBuilder::New() { - return HContext::New(zone()); -} - - -class HStatistics final : public Malloced { - public: - HStatistics() - : times_(5), - names_(5), - sizes_(5), - total_size_(0), - source_size_(0) { } - - void Initialize(CompilationInfo* info); - void Print(); - void SaveTiming(const char* name, base::TimeDelta time, size_t size); - - void IncrementFullCodeGen(base::TimeDelta full_code_gen) { - full_code_gen_ += full_code_gen; - } - - void IncrementCreateGraph(base::TimeDelta delta) { create_graph_ += delta; } - - void IncrementOptimizeGraph(base::TimeDelta delta) { - optimize_graph_ += delta; - } - - void IncrementGenerateCode(base::TimeDelta delta) { generate_code_ += delta; } - - void IncrementSubtotals(base::TimeDelta create_graph, - base::TimeDelta optimize_graph, - base::TimeDelta generate_code) { - IncrementCreateGraph(create_graph); - IncrementOptimizeGraph(optimize_graph); - IncrementGenerateCode(generate_code); - } - - private: - List times_; - List names_; - List sizes_; - base::TimeDelta create_graph_; - base::TimeDelta optimize_graph_; - base::TimeDelta generate_code_; - size_t total_size_; - base::TimeDelta full_code_gen_; - double source_size_; -}; - - -class HPhase : public CompilationPhase { - public: - HPhase(const char* name, HGraph* graph) - : CompilationPhase(name, graph->info()), - graph_(graph) { } - ~HPhase(); - - protected: - HGraph* graph() const { return graph_; } - - private: - HGraph* graph_; - - DISALLOW_COPY_AND_ASSIGN(HPhase); -}; - - -class HTracer final : public Malloced { - public: - explicit HTracer(int isolate_id) - : trace_(&string_allocator_), indent_(0) { - if (FLAG_trace_hydrogen_file == NULL) { - SNPrintF(filename_, - "hydrogen-%d-%d.cfg", - base::OS::GetCurrentProcessId(), - isolate_id); - } else { - StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length()); - } - WriteChars(filename_.start(), "", 0, false); - } - - void TraceCompilation(CompilationInfo* info); - void TraceHydrogen(const char* name, HGraph* graph); - void TraceLithium(const char* name, LChunk* chunk); - void TraceLiveRanges(const char* name, LAllocator* allocator); - - private: - class Tag final BASE_EMBEDDED { - public: - Tag(HTracer* tracer, const char* name) { - name_ = name; - tracer_ = tracer; - tracer->PrintIndent(); - tracer->trace_.Add("begin_%s\n", name); - tracer->indent_++; - } - - ~Tag() { - tracer_->indent_--; - tracer_->PrintIndent(); - tracer_->trace_.Add("end_%s\n", name_); - DCHECK(tracer_->indent_ >= 0); - tracer_->FlushToFile(); - } - - private: - HTracer* tracer_; - const char* name_; - }; - - void TraceLiveRange(LiveRange* range, const char* type, Zone* zone); - void Trace(const char* name, HGraph* graph, LChunk* chunk); - void FlushToFile(); - - void PrintEmptyProperty(const char* name) { - PrintIndent(); - trace_.Add("%s\n", name); - } - - void PrintStringProperty(const char* name, const char* value) { - PrintIndent(); - trace_.Add("%s \"%s\"\n", name, value); - } - - void PrintLongProperty(const char* name, int64_t value) { - PrintIndent(); - trace_.Add("%s %d000\n", name, static_cast(value / 1000)); - } - - void PrintBlockProperty(const char* name, int block_id) { - PrintIndent(); - trace_.Add("%s \"B%d\"\n", name, block_id); - } - - void PrintIntProperty(const char* name, int value) { - PrintIndent(); - trace_.Add("%s %d\n", name, value); - } - - void PrintIndent() { - for (int i = 0; i < indent_; i++) { - trace_.Add(" "); - } - } - - EmbeddedVector filename_; - HeapStringAllocator string_allocator_; - StringStream trace_; - int indent_; -}; - - -class NoObservableSideEffectsScope final { - public: - explicit NoObservableSideEffectsScope(HGraphBuilder* builder) : - builder_(builder) { - builder_->graph()->IncrementInNoSideEffectsScope(); - } - ~NoObservableSideEffectsScope() { - builder_->graph()->DecrementInNoSideEffectsScope(); - } - - private: - HGraphBuilder* builder_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_HYDROGEN_H_ diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc deleted file mode 100644 index 64d27ce7f2..0000000000 --- a/src/crankshaft/ia32/lithium-codegen-ia32.cc +++ /dev/null @@ -1,5098 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_IA32 - -#include "src/crankshaft/ia32/lithium-codegen-ia32.h" - -#include "src/base/bits.h" -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/codegen.h" -#include "src/deoptimizer.h" -#include "src/ia32/frames-ia32.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" - -namespace v8 { -namespace internal { - -// When invoking builtins, we need to record the safepoint in the middle of -// the invoke instruction sequence generated by the macro assembler. -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, - LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), - pointers_(pointers), - deopt_mode_(mode) {} - virtual ~SafepointGenerator() {} - - void BeforeCall(int call_size) const override {} - - void AfterCall() const override { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::MANUAL); - - return GeneratePrologue() && - GenerateBody() && - GenerateDeferredCode() && - GenerateJumpTable() && - GenerateSafepointTable(); -} - - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); - } -} - - -#ifdef _MSC_VER -void LCodeGen::MakeSureStackPagesMapped(int offset) { - const int kPageSize = 4 * KB; - for (offset -= kPageSize; offset > 0; offset -= kPageSize) { - __ mov(Operand(esp, offset), eax); - } -} -#endif - - -void LCodeGen::SaveCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ movsd(MemOperand(esp, count * kDoubleSize), - XMMRegister::from_code(save_iterator.Current())); - save_iterator.Advance(); - count++; - } -} - - -void LCodeGen::RestoreCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Restore clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ movsd(XMMRegister::from_code(save_iterator.Current()), - MemOperand(esp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - } - - info()->set_prologue_offset(masm_->pc_offset()); - if (NeedsEagerFrame()) { - DCHECK(!frame_is_built_); - frame_is_built_ = true; - if (info()->IsStub()) { - __ StubPrologue(StackFrame::STUB); - } else { - __ Prologue(info()->GeneratePreagedPrologue()); - } - } - - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - DCHECK(slots != 0 || !info()->IsOptimizing()); - if (slots > 0) { - __ sub(Operand(esp), Immediate(slots * kPointerSize)); -#ifdef _MSC_VER - MakeSureStackPagesMapped(slots * kPointerSize); -#endif - if (FLAG_debug_code) { - __ push(eax); - __ mov(Operand(eax), Immediate(slots)); - Label loop; - __ bind(&loop); - __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue)); - __ dec(eax); - __ j(not_zero, &loop); - __ pop(eax); - } - - if (info()->saves_caller_doubles()) SaveCallerDoubles(); - } - return !is_aborted(); -} - - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Possibly allocate a local context. - if (info_->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is still in edi. - int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ push(edi); - __ Push(info()->scope()->scope_info()); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), - Immediate(slots)); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of the FastNewFunctionContext builtin is always in new space. - need_write_barrier = false; - } else { - __ Push(edi); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - - // Context is returned in eax. It replaces the context passed to us. - // It's saved in the stack and kept live in esi. - __ mov(esi, eax); - __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax); - - // Copy parameters into context if necessary. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ mov(eax, Operand(ebp, parameter_offset)); - // Store it in the context. - int context_offset = Context::SlotOffset(var->index()); - __ mov(Operand(esi, context_offset), eax); - // Update the write barrier. This clobbers eax and ebx. - if (need_write_barrier) { - __ RecordWriteContextSlot(esi, - context_offset, - eax, - ebx, - kDontSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } -} - - -void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { } - - -bool LCodeGen::GenerateJumpTable() { - if (!jump_table_.length()) return !is_aborted(); - - Label needs_frame; - Comment(";;; -------------------- Jump table --------------------"); - - for (int i = 0; i < jump_table_.length(); i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); - __ call(&needs_frame); - } else { - if (info()->saves_caller_doubles()) RestoreCallerDoubles(); - __ call(entry, RelocInfo::RUNTIME_ENTRY); - } - } - if (needs_frame.is_linked()) { - __ bind(&needs_frame); - /* stack layout - 3: entry address - 2: return address <-- esp - 1: garbage - 0: garbage - */ - __ push(MemOperand(esp, 0)); // Copy return address. - __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address. - - /* stack layout - 4: entry address - 3: return address - 1: return address - 0: entry address <-- esp - */ - __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp. - // Fill ebp with the right stack frame address. - __ lea(ebp, MemOperand(esp, 3 * kPointerSize)); - - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - DCHECK(info()->IsStub()); - __ mov(MemOperand(esp, 2 * kPointerSize), - Immediate(StackFrame::TypeToMarker(StackFrame::STUB))); - - /* stack layout - 3: old ebp - 2: stub marker - 1: return address - 0: entry address <-- esp - */ - __ ret(0); // Call the continuation without clobbering registers. - } - return !is_aborted(); -} - - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment(";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), - code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - __ bind(code->entry()); - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - // Build the frame in such a way that esi isn't trashed. - __ push(ebp); // Caller's frame pointer. - __ push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB))); - __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp)); - Comment(";;; Deferred code"); - } - code->Generate(); - if (NeedsDeferredFrame()) { - __ bind(code->done()); - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - frame_is_built_ = false; - __ mov(esp, ebp); - __ pop(ebp); - } - __ jmp(code->exit()); - } - } - - // Deferred code is the last part of the instruction sequence. Mark - // the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // For lazy deoptimization we need space to patch a call after every call. - // Ensure there is always space for such patching, even if the code ends - // in a call. - int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); - while (masm()->pc_offset() < target_offset) { - masm()->nop(); - } - } - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - - -Register LCodeGen::ToRegister(int code) const { - return Register::from_code(code); -} - - -XMMRegister LCodeGen::ToDoubleRegister(int code) const { - return XMMRegister::from_code(code); -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - DCHECK(op->IsRegister()); - return ToRegister(op->index()); -} - - -XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - DCHECK(op->IsDoubleRegister()); - return ToDoubleRegister(op->index()); -} - - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - return ToRepresentation(op, Representation::Integer32()); -} - - -int32_t LCodeGen::ToRepresentation(LConstantOperand* op, - const Representation& r) const { - HConstant* constant = chunk_->LookupConstant(op); - if (r.IsExternal()) { - return reinterpret_cast( - constant->ExternalReferenceValue().address()); - } - int32_t value = constant->Integer32Value(); - if (r.IsInteger32()) return value; - DCHECK(r.IsSmiOrTagged()); - return reinterpret_cast(Smi::FromInt(value)); -} - - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - - -ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasExternalReferenceValue()); - return constant->ExternalReferenceValue(); -} - - -bool LCodeGen::IsInteger32(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - - -bool LCodeGen::IsSmi(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - - -static int ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize + kPCOnStackSize; -} - - -Operand LCodeGen::ToOperand(LOperand* op) const { - if (op->IsRegister()) return Operand(ToRegister(op)); - if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return Operand(ebp, FrameSlotToFPOffset(op->index())); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); - } -} - - -Operand LCodeGen::HighOperand(LOperand* op) { - DCHECK(op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return Operand( - esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); - } -} - - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation( - environment, translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); - } -} - - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, - dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - XMMRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode) { - DCHECK(instr != NULL); - __ call(code, mode); - RecordSafepointWithLazyDeopt(instr, safepoint_mode); - - // Signal that we don't inline smi code before these stubs in the - // optimizing code generator. - if (code->kind() == Code::COMPARE_IC) { - __ nop(); - } -} - - -void LCodeGen::CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::CallRuntime(const Runtime::Function* fun, - int argc, - LInstruction* instr, - SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - DCHECK(instr->HasPointerMap()); - - __ CallRuntime(fun, argc, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - - DCHECK(info()->is_calling()); -} - - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - if (!ToRegister(context).is(esi)) { - __ mov(esi, ToRegister(context)); - } - } else if (context->IsStackSlot()) { - __ mov(esi, ToOperand(context)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ LoadObject(esi, Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context) { - LoadContextFromDeferred(context); - - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters( - instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); - - DCHECK(info()->is_calling()); -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization( - LEnvironment* environment, Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, - translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, zone()); - } -} - -void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - return; - } - - if (DeoptEveryNTimes()) { - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - Label no_deopt; - __ pushfd(); - __ push(eax); - __ mov(eax, Operand::StaticVariable(count)); - __ sub(eax, Immediate(1)); - __ j(not_zero, &no_deopt, Label::kNear); - if (FLAG_trap_on_deopt) __ int3(); - __ mov(eax, Immediate(FLAG_deopt_every_n_times)); - __ mov(Operand::StaticVariable(count), eax); - __ pop(eax); - __ popfd(); - DCHECK(frame_is_built_); - __ call(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&no_deopt); - __ mov(Operand::StaticVariable(count), eax); - __ pop(eax); - __ popfd(); - } - - if (info()->ShouldTrapOnDeopt()) { - Label done; - if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); - __ int3(); - __ bind(&done); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - if (cc == no_condition && frame_is_built_) { - DeoptComment(deopt_info); - __ call(entry, RelocInfo::RUNTIME_ENTRY); - } else { - Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, - !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - if (cc == no_condition) { - __ jmp(&jump_table_.last().label); - } else { - __ j(cc, &jump_table_.last().label); - } - } -} - -void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason) { - Deoptimizer::BailoutType bailout_type = info()->IsStub() - ? Deoptimizer::LAZY - : Deoptimizer::EAGER; - DeoptimizeIf(cc, instr, deopt_reason, bailout_type); -} - - -void LCodeGen::RecordSafepointWithLazyDeopt( - LInstruction* instr, SafepointMode safepoint_mode) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kLazyDeopt); - } -} - - -void LCodeGen::RecordSafepoint( - LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode deopt_mode) { - DCHECK(kind == expected_safepoint_kind_); - const ZoneList* operands = pointers->GetNormalizedOperands(); - Safepoint safepoint = - safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, mode); -} - - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, mode); -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode) { - RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode); -} - - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, - label->hydrogen_value()->id(), - label->block_id(), - LabelType(label)); - __ bind(label->label()); - current_block_ = label->block_id(); - DoGap(label); -} - - -void LCodeGen::DoParallelMove(LParallelMove* move) { - resolver_.Resolve(move); -} - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; - i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } -} - - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { - DoGap(instr); -} - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - Label dividend_is_not_negative, done; - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ test(dividend, dividend); - __ j(not_sign, ÷nd_is_not_negative, Label::kNear); - // Note that this is correct even for kMinInt operands. - __ neg(dividend); - __ and_(dividend, mask); - __ neg(dividend); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - __ jmp(&done, Label::kNear); - } - - __ bind(÷nd_is_not_negative); - __ and_(dividend, mask); - __ bind(&done); -} - - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(eax)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - __ TruncatingDiv(dividend, Abs(divisor)); - __ imul(edx, edx, Abs(divisor)); - __ mov(eax, dividend); - __ sub(eax, edx); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ j(not_zero, &remainder_not_zero, Label::kNear); - __ cmp(dividend, Immediate(0)); - DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero); - __ bind(&remainder_not_zero); - } -} - - -void LCodeGen::DoModI(LModI* instr) { - HMod* hmod = instr->hydrogen(); - - Register left_reg = ToRegister(instr->left()); - DCHECK(left_reg.is(eax)); - Register right_reg = ToRegister(instr->right()); - DCHECK(!right_reg.is(eax)); - DCHECK(!right_reg.is(edx)); - Register result_reg = ToRegister(instr->result()); - DCHECK(result_reg.is(edx)); - - Label done; - // Check for x % 0, idiv would signal a divide error. We have to - // deopt in this case because we can't return a NaN. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - __ test(right_reg, Operand(right_reg)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for kMinInt % -1, idiv would signal a divide error. We - // have to deopt if we care about -0, because we can't return that. - if (hmod->CheckFlag(HValue::kCanOverflow)) { - Label no_overflow_possible; - __ cmp(left_reg, kMinInt); - __ j(not_equal, &no_overflow_possible, Label::kNear); - __ cmp(right_reg, -1); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero); - } else { - __ j(not_equal, &no_overflow_possible, Label::kNear); - __ Move(result_reg, Immediate(0)); - __ jmp(&done, Label::kNear); - } - __ bind(&no_overflow_possible); - } - - // Sign extend dividend in eax into edx:eax. - __ cdq(); - - // If we care about -0, test if the dividend is <0 and the result is 0. - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label positive_left; - __ test(left_reg, Operand(left_reg)); - __ j(not_sign, &positive_left, Label::kNear); - __ idiv(right_reg); - __ test(result_reg, Operand(result_reg)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - __ jmp(&done, Label::kNear); - __ bind(&positive_left); - } - __ idiv(right_reg); - __ bind(&done); -} - - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ test(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - __ cmp(dividend, kMinInt); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - } - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1) { - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - __ test(dividend, Immediate(mask)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision); - } - __ Move(result, dividend); - int32_t shift = WhichPowerOf2Abs(divisor); - if (shift > 0) { - // The arithmetic shift is always OK, the 'if' is an optimization only. - if (shift > 1) __ sar(result, 31); - __ shr(result, 32 - shift); - __ add(result, dividend); - __ sar(result, shift); - } - if (divisor < 0) __ neg(result); -} - - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(edx)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ test(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ neg(edx); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - __ mov(eax, edx); - __ imul(eax, eax, divisor); - __ sub(eax, dividend); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision); - } -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - Register remainder = ToRegister(instr->temp()); - DCHECK(dividend.is(eax)); - DCHECK(remainder.is(edx)); - DCHECK(ToRegister(instr->result()).is(eax)); - DCHECK(!divisor.is(eax)); - DCHECK(!divisor.is(edx)); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ test(divisor, divisor); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ test(dividend, dividend); - __ j(not_zero, ÷nd_not_zero, Label::kNear); - __ test(divisor, divisor); - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - Label dividend_not_min_int; - __ cmp(dividend, kMinInt); - __ j(not_zero, ÷nd_not_min_int, Label::kNear); - __ cmp(divisor, -1); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - __ bind(÷nd_not_min_int); - } - - // Sign extend to edx (= remainder). - __ cdq(); - __ idiv(divisor); - - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - // Deoptimize if remainder is not 0. - __ test(remainder, remainder); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision); - } -} - - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - if (divisor == 1) return; - int32_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 1) { - __ sar(dividend, shift); - return; - } - - // If the divisor is negative, we have to negate and handle edge cases. - __ neg(dividend); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - // Dividing by -1 is basically negation, unless we overflow. - if (divisor == -1) { - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - return; - } - - // If the negation could not overflow, simply shifting is OK. - if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - __ sar(dividend, shift); - return; - } - - Label not_kmin_int, done; - __ j(no_overflow, ¬_kmin_int, Label::kNear); - __ mov(dividend, Immediate(kMinInt / divisor)); - __ jmp(&done, Label::kNear); - __ bind(¬_kmin_int); - __ sar(dividend, shift); - __ bind(&done); -} - - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(edx)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ test(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ neg(edx); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister(instr->temp3()); - DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx)); - Label needs_adjustment, done; - __ cmp(dividend, Immediate(0)); - __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ neg(edx); - __ jmp(&done, Label::kNear); - __ bind(&needs_adjustment); - __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1)); - __ TruncatingDiv(temp, Abs(divisor)); - if (divisor < 0) __ neg(edx); - __ dec(edx); - __ bind(&done); -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - Register remainder = ToRegister(instr->temp()); - Register result = ToRegister(instr->result()); - DCHECK(dividend.is(eax)); - DCHECK(remainder.is(edx)); - DCHECK(result.is(eax)); - DCHECK(!divisor.is(eax)); - DCHECK(!divisor.is(edx)); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ test(divisor, divisor); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ test(dividend, dividend); - __ j(not_zero, ÷nd_not_zero, Label::kNear); - __ test(divisor, divisor); - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - Label dividend_not_min_int; - __ cmp(dividend, kMinInt); - __ j(not_zero, ÷nd_not_min_int, Label::kNear); - __ cmp(divisor, -1); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - __ bind(÷nd_not_min_int); - } - - // Sign extend to edx (= remainder). - __ cdq(); - __ idiv(divisor); - - Label done; - __ test(remainder, remainder); - __ j(zero, &done, Label::kNear); - __ xor_(remainder, divisor); - __ sar(remainder, 31); - __ add(result, remainder); - __ bind(&done); -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register left = ToRegister(instr->left()); - LOperand* right = instr->right(); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ mov(ToRegister(instr->temp()), left); - } - - if (right->IsConstantOperand()) { - // Try strength reductions on the multiplication. - // All replacement instructions are at most as long as the imul - // and have better latency. - int constant = ToInteger32(LConstantOperand::cast(right)); - if (constant == -1) { - __ neg(left); - } else if (constant == 0) { - __ xor_(left, Operand(left)); - } else if (constant == 2) { - __ add(left, Operand(left)); - } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - // If we know that the multiplication can't overflow, it's safe to - // use instructions that don't set the overflow flag for the - // multiplication. - switch (constant) { - case 1: - // Do nothing. - break; - case 3: - __ lea(left, Operand(left, left, times_2, 0)); - break; - case 4: - __ shl(left, 2); - break; - case 5: - __ lea(left, Operand(left, left, times_4, 0)); - break; - case 8: - __ shl(left, 3); - break; - case 9: - __ lea(left, Operand(left, left, times_8, 0)); - break; - case 16: - __ shl(left, 4); - break; - default: - __ imul(left, left, constant); - break; - } - } else { - __ imul(left, left, constant); - } - } else { - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(left); - } - __ imul(left, ToOperand(right)); - } - - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Bail out if the result is supposed to be negative zero. - Label done; - __ test(left, Operand(left)); - __ j(not_zero, &done, Label::kNear); - if (right->IsConstantOperand()) { - if (ToInteger32(LConstantOperand::cast(right)) < 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { - __ cmp(ToRegister(instr->temp()), Immediate(0)); - DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero); - } - } else { - // Test the non-zero operand for negative sign. - __ or_(ToRegister(instr->temp()), ToOperand(right)); - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - } - __ bind(&done); - } -} - - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - DCHECK(left->IsRegister()); - - if (right->IsConstantOperand()) { - int32_t right_operand = - ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->representation()); - switch (instr->op()) { - case Token::BIT_AND: - __ and_(ToRegister(left), right_operand); - break; - case Token::BIT_OR: - __ or_(ToRegister(left), right_operand); - break; - case Token::BIT_XOR: - if (right_operand == int32_t(~0)) { - __ not_(ToRegister(left)); - } else { - __ xor_(ToRegister(left), right_operand); - } - break; - default: - UNREACHABLE(); - break; - } - } else { - switch (instr->op()) { - case Token::BIT_AND: - __ and_(ToRegister(left), ToOperand(right)); - break; - case Token::BIT_OR: - __ or_(ToRegister(left), ToOperand(right)); - break; - case Token::BIT_XOR: - __ xor_(ToRegister(left), ToOperand(right)); - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - DCHECK(left->IsRegister()); - if (right->IsRegister()) { - DCHECK(ToRegister(right).is(ecx)); - - switch (instr->op()) { - case Token::ROR: - __ ror_cl(ToRegister(left)); - break; - case Token::SAR: - __ sar_cl(ToRegister(left)); - break; - case Token::SHR: - __ shr_cl(ToRegister(left)); - if (instr->can_deopt()) { - __ test(ToRegister(left), ToRegister(left)); - DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); - } - break; - case Token::SHL: - __ shl_cl(ToRegister(left)); - break; - default: - UNREACHABLE(); - break; - } - } else { - int value = ToInteger32(LConstantOperand::cast(right)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::ROR: - if (shift_count == 0 && instr->can_deopt()) { - __ test(ToRegister(left), ToRegister(left)); - DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); - } else { - __ ror(ToRegister(left), shift_count); - } - break; - case Token::SAR: - if (shift_count != 0) { - __ sar(ToRegister(left), shift_count); - } - break; - case Token::SHR: - if (shift_count != 0) { - __ shr(ToRegister(left), shift_count); - } else if (instr->can_deopt()) { - __ test(ToRegister(left), ToRegister(left)); - DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); - } - break; - case Token::SHL: - if (shift_count != 0) { - if (instr->hydrogen_value()->representation().IsSmi() && - instr->can_deopt()) { - if (shift_count != 1) { - __ shl(ToRegister(left), shift_count - 1); - } - __ SmiTag(ToRegister(left)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } else { - __ shl(ToRegister(left), shift_count); - } - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - - if (right->IsConstantOperand()) { - __ sub(ToOperand(left), - ToImmediate(right, instr->hydrogen()->representation())); - } else { - __ sub(ToRegister(left), ToOperand(right)); - } - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - __ Move(ToRegister(instr->result()), Immediate(instr->value())); -} - - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ Move(ToRegister(instr->result()), Immediate(instr->value())); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - uint64_t const bits = instr->bits(); - uint32_t const lower = static_cast(bits); - uint32_t const upper = static_cast(bits >> 32); - DCHECK(instr->result()->IsDoubleRegister()); - - XMMRegister result = ToDoubleRegister(instr->result()); - if (bits == 0u) { - __ xorps(result, result); - } else { - Register temp = ToRegister(instr->temp()); - if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatureScope scope2(masm(), SSE4_1); - if (lower != 0) { - __ Move(temp, Immediate(lower)); - __ movd(result, Operand(temp)); - __ Move(temp, Immediate(upper)); - __ pinsrd(result, Operand(temp), 1); - } else { - __ xorps(result, result); - __ Move(temp, Immediate(upper)); - __ pinsrd(result, Operand(temp), 1); - } - } else { - __ Move(temp, Immediate(upper)); - __ movd(result, Operand(temp)); - __ psllq(result, 32); - if (lower != 0u) { - XMMRegister xmm_scratch = double_scratch0(); - __ Move(temp, Immediate(lower)); - __ movd(xmm_scratch, Operand(temp)); - __ orps(result, xmm_scratch); - } - } - } -} - - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - Register reg = ToRegister(instr->result()); - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ LoadObject(reg, object); -} - - -Operand LCodeGen::BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToRepresentation(LConstantOperand::cast(index), - Representation::Integer32()); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldOperand(string, SeqString::kHeaderSize + offset); - } - return FieldOperand( - string, ToRegister(index), - encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, - SeqString::kHeaderSize); -} - - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register result = ToRegister(instr->result()); - Register string = ToRegister(instr->string()); - - if (FLAG_debug_code) { - __ push(string); - __ mov(string, FieldOperand(string, HeapObject::kMapOffset)); - __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset)); - - __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(equal, kUnexpectedStringType); - __ pop(string); - } - - Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ movzx_b(result, operand); - } else { - __ movzx_w(result, operand); - } -} - - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - - if (FLAG_debug_code) { - Register value = ToRegister(instr->value()); - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); - } - - Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (instr->value()->IsConstantOperand()) { - int value = ToRepresentation(LConstantOperand::cast(instr->value()), - Representation::Integer32()); - DCHECK_LE(0, value); - if (encoding == String::ONE_BYTE_ENCODING) { - DCHECK_LE(value, String::kMaxOneByteCharCode); - __ mov_b(operand, static_cast(value)); - } else { - DCHECK_LE(value, String::kMaxUtf16CodeUnit); - __ mov_w(operand, static_cast(value)); - } - } else { - Register value = ToRegister(instr->value()); - if (encoding == String::ONE_BYTE_ENCODING) { - __ mov_b(operand, value); - } else { - __ mov_w(operand, value); - } - } -} - - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - - if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { - if (right->IsConstantOperand()) { - int32_t offset = ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->representation()); - __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); - } else { - Operand address(ToRegister(left), ToRegister(right), times_1, 0); - __ lea(ToRegister(instr->result()), address); - } - } else { - if (right->IsConstantOperand()) { - __ add(ToOperand(left), - ToImmediate(right, instr->hydrogen()->representation())); - } else { - __ add(ToRegister(left), ToOperand(right)); - } - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - } -} - - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - HMathMinMax::Operation operation = instr->hydrogen()->operation(); - if (instr->hydrogen()->representation().IsSmiOrInteger32()) { - Label return_left; - Condition condition = (operation == HMathMinMax::kMathMin) - ? less_equal - : greater_equal; - if (right->IsConstantOperand()) { - Operand left_op = ToOperand(left); - Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()), - instr->hydrogen()->representation()); - __ cmp(left_op, immediate); - __ j(condition, &return_left, Label::kNear); - __ mov(left_op, immediate); - } else { - Register left_reg = ToRegister(left); - Operand right_op = ToOperand(right); - __ cmp(left_reg, right_op); - __ j(condition, &return_left, Label::kNear); - __ mov(left_reg, right_op); - } - __ bind(&return_left); - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - Label check_nan_left, check_zero, return_left, return_right; - Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; - XMMRegister left_reg = ToDoubleRegister(left); - XMMRegister right_reg = ToDoubleRegister(right); - __ ucomisd(left_reg, right_reg); - __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. - __ j(equal, &check_zero, Label::kNear); // left == right. - __ j(condition, &return_left, Label::kNear); - __ jmp(&return_right, Label::kNear); - - __ bind(&check_zero); - XMMRegister xmm_scratch = double_scratch0(); - __ xorps(xmm_scratch, xmm_scratch); - __ ucomisd(left_reg, xmm_scratch); - __ j(not_equal, &return_left, Label::kNear); // left == right != 0. - // At this point, both left and right are either 0 or -0. - if (operation == HMathMinMax::kMathMin) { - __ orpd(left_reg, right_reg); - } else { - // Since we operate on +0 and/or -0, addsd and andsd have the same effect. - __ addsd(left_reg, right_reg); - } - __ jmp(&return_left, Label::kNear); - - __ bind(&check_nan_left); - __ ucomisd(left_reg, left_reg); // NaN check. - __ j(parity_even, &return_left, Label::kNear); // left == NaN. - __ bind(&return_right); - __ movaps(left_reg, right_reg); - - __ bind(&return_left); - } -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - XMMRegister left = ToDoubleRegister(instr->left()); - XMMRegister right = ToDoubleRegister(instr->right()); - XMMRegister result = ToDoubleRegister(instr->result()); - switch (instr->op()) { - case Token::ADD: - if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope scope(masm(), AVX); - __ vaddsd(result, left, right); - } else { - DCHECK(result.is(left)); - __ addsd(left, right); - } - break; - case Token::SUB: - if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope scope(masm(), AVX); - __ vsubsd(result, left, right); - } else { - DCHECK(result.is(left)); - __ subsd(left, right); - } - break; - case Token::MUL: - if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope scope(masm(), AVX); - __ vmulsd(result, left, right); - } else { - DCHECK(result.is(left)); - __ mulsd(left, right); - } - break; - case Token::DIV: - if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope scope(masm(), AVX); - __ vdivsd(result, left, right); - } else { - DCHECK(result.is(left)); - __ divsd(left, right); - } - // Don't delete this mov. It may improve performance on some CPUs, - // when there is a (v)mulsd depending on the result - __ movaps(result, result); - break; - case Token::MOD: { - // Pass two doubles as arguments on the stack. - __ PrepareCallCFunction(4, eax); - __ movsd(Operand(esp, 0 * kDoubleSize), left); - __ movsd(Operand(esp, 1 * kDoubleSize), right); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), - 4); - - // Return value is in st(0) on ia32. - // Store it into the result register. - __ sub(Operand(esp), Immediate(kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - __ movsd(result, Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->left()).is(edx)); - DCHECK(ToRegister(instr->right()).is(eax)); - DCHECK(ToRegister(instr->result()).is(eax)); - - UNREACHABLE(); -} - - -template -void LCodeGen::EmitBranch(InstrType instr, Condition cc) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - - if (right_block == left_block || cc == no_condition) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); - } else if (right_block == next_block) { - __ j(cc, chunk_->GetAssemblyLabel(left_block)); - } else { - __ j(cc, chunk_->GetAssemblyLabel(left_block)); - __ jmp(chunk_->GetAssemblyLabel(right_block)); - } -} - - -template -void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) { - int true_block = instr->TrueDestination(chunk_); - if (cc == no_condition) { - __ jmp(chunk_->GetAssemblyLabel(true_block)); - } else { - __ j(cc, chunk_->GetAssemblyLabel(true_block)); - } -} - - -template -void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { - int false_block = instr->FalseDestination(chunk_); - if (cc == no_condition) { - __ jmp(chunk_->GetAssemblyLabel(false_block)); - } else { - __ j(cc, chunk_->GetAssemblyLabel(false_block)); - } -} - - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsSmiOrInteger32()) { - Register reg = ToRegister(instr->value()); - __ test(reg, Operand(reg)); - EmitBranch(instr, not_zero); - } else if (r.IsDouble()) { - DCHECK(!info()->IsStub()); - XMMRegister reg = ToDoubleRegister(instr->value()); - XMMRegister xmm_scratch = double_scratch0(); - __ xorps(xmm_scratch, xmm_scratch); - __ ucomisd(reg, xmm_scratch); - EmitBranch(instr, not_equal); - } else { - DCHECK(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ cmp(reg, factory()->true_value()); - EmitBranch(instr, equal); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - __ test(reg, Operand(reg)); - EmitBranch(instr, not_equal); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, no_condition); - } else if (type.IsHeapNumber()) { - DCHECK(!info()->IsStub()); - XMMRegister xmm_scratch = double_scratch0(); - __ xorps(xmm_scratch, xmm_scratch); - __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); - EmitBranch(instr, not_equal); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); - EmitBranch(instr, not_equal); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ cmp(reg, factory()->undefined_value()); - __ j(equal, instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kBoolean) { - // true -> true. - __ cmp(reg, factory()->true_value()); - __ j(equal, instr->TrueLabel(chunk_)); - // false -> false. - __ cmp(reg, factory()->false_value()); - __ j(equal, instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ cmp(reg, factory()->null_value()); - __ j(equal, instr->FalseLabel(chunk_)); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - __ test(reg, Operand(reg)); - __ j(equal, instr->FalseLabel(chunk_)); - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a Smi -> deopt. - __ test(reg, Immediate(kSmiTagMask)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi); - } - - Register map = no_reg; // Keep the compiler happy. - if (expected & ToBooleanHint::kNeedsMap) { - map = ToRegister(instr->temp()); - DCHECK(!map.is(reg)); - __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); - - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ test_b(FieldOperand(map, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, instr->FalseLabel(chunk_)); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE); - __ j(above_equal, instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); - __ j(above_equal, ¬_string, Label::kNear); - __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); - __ j(not_zero, instr->TrueLabel(chunk_)); - __ jmp(instr->FalseLabel(chunk_)); - __ bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - __ CmpInstanceType(map, SYMBOL_TYPE); - __ j(equal, instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kHeapNumber) { - // heap number -> false iff +0, -0, or NaN. - Label not_heap_number; - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - __ j(not_equal, ¬_heap_number, Label::kNear); - XMMRegister xmm_scratch = double_scratch0(); - __ xorps(xmm_scratch, xmm_scratch); - __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); - __ j(zero, instr->FalseLabel(chunk_)); - __ jmp(instr->TrueLabel(chunk_)); - __ bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject); - } - } - } -} - - -void LCodeGen::EmitGoto(int block) { - if (!IsNextEmittedBlock(block)) { - __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); - } -} - - -void LCodeGen::DoGoto(LGoto* instr) { - EmitGoto(instr->block_id()); -} - - -Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { - Condition cond = no_condition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = equal; - break; - case Token::NE: - case Token::NE_STRICT: - cond = not_equal; - break; - case Token::LT: - cond = is_unsigned ? below : less; - break; - case Token::GT: - cond = is_unsigned ? above : greater; - break; - case Token::LTE: - cond = is_unsigned ? below_equal : less_equal; - break; - case Token::GTE: - cond = is_unsigned ? above_equal : greater_equal; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->is_double() || - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cc = TokenToCondition(instr->op(), is_unsigned); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); - // Don't base result on EFLAGS when a NaN is involved. Instead - // jump to the false block. - __ j(parity_even, instr->FalseLabel(chunk_)); - } else { - if (right->IsConstantOperand()) { - __ cmp(ToOperand(left), - ToImmediate(right, instr->hydrogen()->representation())); - } else if (left->IsConstantOperand()) { - __ cmp(ToOperand(right), - ToImmediate(left, instr->hydrogen()->representation())); - // We commuted the operands, so commute the condition. - cc = CommuteCondition(cc); - } else { - __ cmp(ToRegister(left), ToOperand(right)); - } - } - EmitBranch(instr, cc); - } -} - - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - - if (instr->right()->IsConstantOperand()) { - Handle right = ToHandle(LConstantOperand::cast(instr->right())); - __ CmpObject(left, right); - } else { - Operand right = ToOperand(instr->right()); - __ cmp(left, right); - } - EmitBranch(instr, equal); -} - - -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ cmp(input_reg, factory()->the_hole_value()); - EmitBranch(instr, equal); - return; - } - - XMMRegister input_reg = ToDoubleRegister(instr->object()); - __ ucomisd(input_reg, input_reg); - EmitFalseBranch(instr, parity_odd); - - __ sub(esp, Immediate(kDoubleSize)); - __ movsd(MemOperand(esp, 0), input_reg); - - __ add(esp, Immediate(kDoubleSize)); - int offset = sizeof(kHoleNanUpper32); - __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); - EmitBranch(instr, equal); -} - - -Condition LCodeGen::EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - - Condition cond = masm_->IsObjectStringType(input, temp1, temp1); - - return cond; -} - - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - - Condition true_cond = EmitIsString( - reg, temp, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Operand input = ToOperand(instr->value()); - - __ test(input, Immediate(kSmiTagMask)); - EmitBranch(instr, zero); -} - - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); - __ test_b(FieldOperand(temp, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - EmitBranch(instr, not_zero); -} - - -static Condition ComputeCompareCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return equal; - case Token::LT: - return less; - case Token::GT: - return greater; - case Token::LTE: - return less_equal; - case Token::GTE: - return greater_equal; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->left()).is(edx)); - DCHECK(ToRegister(instr->right()).is(eax)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ CompareRoot(eax, Heap::kTrueValueRootIndex); - EmitBranch(instr, equal); -} - - -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK(from == to || to == LAST_TYPE); - return from; -} - - -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return equal; - if (to == LAST_TYPE) return above_equal; - if (from == FIRST_TYPE) return below_equal; - UNREACHABLE(); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - - __ CmpObjectType(input, TestType(instr->hydrogen()), temp); - EmitBranch(instr, BranchCondition(instr->hydrogen())); -} - -// Branches to a label or falls through with the answer in the z flag. Trashes -// the temp registers, but not the input. -void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handle class_name, Register input, - Register temp, Register temp2) { - DCHECK(!input.is(temp)); - DCHECK(!input.is(temp2)); - DCHECK(!temp.is(temp2)); - __ JumpIfSmi(input, is_false); - - __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ j(above_equal, is_true); - } else { - __ j(above_equal, is_false); - } - - // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. - // Check if the constructor in the map is a function. - __ GetMapConstructor(temp, temp, temp2); - // Objects with a non-function constructor have class 'Object'. - __ CmpInstanceType(temp2, JS_FUNCTION_TYPE); - if (String::Equals(class_name, isolate()->factory()->Object_string())) { - __ j(not_equal, is_true); - } else { - __ j(not_equal, is_false); - } - - // temp now contains the constructor function. Grab the - // instance class name from there. - __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); - __ mov(temp, - FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - __ cmp(temp, class_name); - // End with the answer in the z flag. -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); - - Handle class_name = instr->hydrogen()->class_name(); - - EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - class_name, input, temp, temp2); - - EmitBranch(instr, equal); -} - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->value()); - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); - EmitBranch(instr, equal); -} - - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = ToRegister(instr->scratch()); - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - __ test(object, Immediate(kSmiTagMask)); - EmitFalseBranch(instr, zero); - } - - // Loop through the {object}s prototype chain looking for the {prototype}. - __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset)); - Label loop; - __ bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ test_b(FieldOperand(object_map, Map::kBitFieldOffset), - Immediate(1 << Map::kIsAccessCheckNeeded)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck); - // Deoptimize for proxies. - __ CmpInstanceType(object_map, JS_PROXY_TYPE); - DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy); - - __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset)); - __ cmp(object_prototype, factory()->null_value()); - EmitFalseBranch(instr, equal); - __ cmp(object_prototype, prototype); - EmitTrueBranch(instr, equal); - __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset)); - __ jmp(&loop); -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - Token::Value op = instr->op(); - - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - - Condition condition = ComputeCompareCondition(op); - Label true_value, done; - __ test(eax, Operand(eax)); - __ j(condition, &true_value, Label::kNear); - __ mov(ToRegister(instr->result()), factory()->false_value()); - __ jmp(&done, Label::kNear); - __ bind(&true_value); - __ mov(ToRegister(instr->result()), factory()->true_value()); - __ bind(&done); -} - -void LCodeGen::EmitReturn(LReturn* instr) { - int extra_value_count = 1; - - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register reg = ToRegister(instr->parameter_count()); - // The argument count parameter is a smi - __ SmiUntag(reg); - Register return_addr_reg = reg.is(ecx) ? ebx : ecx; - - // emit code to restore stack based on instr->parameter_count() - __ pop(return_addr_reg); // save return address - __ shl(reg, kPointerSizeLog2); - __ add(esp, reg); - __ jmp(return_addr_reg); - } -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Preserve the return value on the stack and rely on the runtime call - // to return the value in the same register. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ push(eax); - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - if (info()->saves_caller_doubles()) RestoreCallerDoubles(); - if (NeedsEagerFrame()) { - __ mov(esp, ebp); - __ pop(ebp); - } - - EmitReturn(instr); -} - - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - __ mov(result, ContextOperand(context, instr->slot_index())); - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ cmp(result, factory()->the_hole_value()); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } else { - Label is_not_hole; - __ j(not_equal, &is_not_hole, Label::kNear); - __ mov(result, factory()->undefined_value()); - __ bind(&is_not_hole); - } - } -} - - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - - Label skip_assignment; - - Operand target = ContextOperand(context, instr->slot_index()); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ cmp(target, factory()->the_hole_value()); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } else { - __ j(not_equal, &skip_assignment, Label::kNear); - } - } - - __ mov(target, value); - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - Register temp = ToRegister(instr->temp()); - int offset = Context::SlotOffset(instr->slot_index()); - __ RecordWriteContextSlot(context, - offset, - value, - temp, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } - - __ bind(&skip_assignment); -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - MemOperand operand = instr->object()->IsConstantOperand() - ? MemOperand::StaticVariable(ToExternalReference( - LConstantOperand::cast(instr->object()))) - : MemOperand(ToRegister(instr->object()), offset); - __ Load(result, operand, access.representation()); - return; - } - - Register object = ToRegister(instr->object()); - if (instr->hydrogen()->representation().IsDouble()) { - XMMRegister result = ToDoubleRegister(instr->result()); - __ movsd(result, FieldOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - if (!access.IsInobject()) { - __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); - object = result; - } - __ Load(result, FieldOperand(object, offset), access.representation()); -} - - -void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { - DCHECK(!operand->IsDoubleRegister()); - if (operand->IsConstantOperand()) { - Handle object = ToHandle(LConstantOperand::cast(operand)); - AllowDeferredHandleDereference smi_check; - if (object->IsSmi()) { - __ Push(Handle::cast(object)); - } else { - __ PushHeapObject(Handle::cast(object)); - } - } else if (operand->IsRegister()) { - __ push(ToRegister(operand)); - } else { - __ push(ToOperand(operand)); - } -} - - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register function = ToRegister(instr->function()); - Register temp = ToRegister(instr->temp()); - Register result = ToRegister(instr->result()); - - // Get the prototype or initial map from the function. - __ mov(result, - FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - __ cmp(Operand(result), Immediate(factory()->the_hole_value())); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - - // If the function does not have an initial map, we're done. - Label done; - __ CmpObjectType(result, MAP_TYPE, temp); - __ j(not_equal, &done, Label::kNear); - - // Get the prototype from the initial map. - __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); - - // All done. - __ bind(&done); -} - - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - if (instr->length()->IsConstantOperand() && - instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int const_length = ToInteger32(LConstantOperand::cast(instr->length())); - int index = (const_length - const_index) + 1; - __ mov(result, Operand(arguments, index * kPointerSize)); - } else { - Register length = ToRegister(instr->length()); - Operand index = ToOperand(instr->index()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - __ sub(length, index); - __ mov(result, Operand(arguments, length, times_4, kPointerSize)); - } -} - - -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = instr->key(); - if (!key->IsConstantOperand() && - ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), - elements_kind)) { - __ SmiUntag(ToRegister(key)); - } - Operand operand(BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - elements_kind, - instr->base_offset())); - if (elements_kind == FLOAT32_ELEMENTS) { - XMMRegister result(ToDoubleRegister(instr->result())); - __ movss(result, operand); - __ cvtss2sd(result, result); - } else if (elements_kind == FLOAT64_ELEMENTS) { - __ movsd(ToDoubleRegister(instr->result()), operand); - } else { - Register result(ToRegister(instr->result())); - switch (elements_kind) { - case INT8_ELEMENTS: - __ movsx_b(result, operand); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ movzx_b(result, operand); - break; - case INT16_ELEMENTS: - __ movsx_w(result, operand); - break; - case UINT16_ELEMENTS: - __ movzx_w(result, operand); - break; - case INT32_ELEMENTS: - __ mov(result, operand); - break; - case UINT32_ELEMENTS: - __ mov(result, operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ test(result, Operand(result)); - DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { - if (instr->hydrogen()->RequiresHoleCheck()) { - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset() + sizeof(kHoleNanLower32)); - __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } - - Operand double_load_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset()); - XMMRegister result = ToDoubleRegister(instr->result()); - __ movsd(result, double_load_operand); -} - - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - Register result = ToRegister(instr->result()); - - // Load the result. - __ mov(result, - BuildFastArrayOperand(instr->elements(), instr->key(), - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, instr->base_offset())); - - // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ test(result, Immediate(kSmiTagMask)); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi); - } else { - __ cmp(result, factory()->the_hole_value()); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } - } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ cmp(result, factory()->the_hole_value()); - __ j(not_equal, &done); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. - // Otherwise it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - __ cmp(FieldOperand(result, PropertyCell::kValueOffset), - Immediate(Smi::FromInt(Isolate::kProtectorValid))); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole); - } - __ mov(result, isolate()->factory()->undefined_value()); - __ bind(&done); - } -} - - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } -} - - -Operand LCodeGen::BuildFastArrayOperand( - LOperand* elements_pointer, - LOperand* key, - Representation key_representation, - ElementsKind elements_kind, - uint32_t base_offset) { - Register elements_pointer_reg = ToRegister(elements_pointer); - int element_shift_size = ElementsKindToShiftSize(elements_kind); - int shift_size = element_shift_size; - if (key->IsConstantOperand()) { - int constant_value = ToInteger32(LConstantOperand::cast(key)); - if (constant_value & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - return Operand(elements_pointer_reg, - ((constant_value) << shift_size) - + base_offset); - } else { - // Take the tag bit into account while computing the shift size. - if (key_representation.IsSmi() && (shift_size >= 1)) { - shift_size -= kSmiTagSize; - } - ScaleFactor scale_factor = static_cast(shift_size); - return Operand(elements_pointer_reg, - ToRegister(key), - scale_factor, - base_offset); - } -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - __ lea(result, Operand(esp, -2 * kPointerSize)); - } else if (instr->hydrogen()->arguments_adaptor()) { - // Check for arguments adapter frame. - Label done, adapted; - __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(result, - Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ cmp(Operand(result), - Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(equal, &adapted, Label::kNear); - - // No arguments adaptor frame. - __ mov(result, Operand(ebp)); - __ jmp(&done, Label::kNear); - - // Arguments adaptor frame present. - __ bind(&adapted); - __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ bind(&done); - } else { - __ mov(result, Operand(ebp)); - } -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Operand elem = ToOperand(instr->elements()); - Register result = ToRegister(instr->result()); - - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - __ cmp(ebp, elem); - __ mov(result, Immediate(scope()->num_parameters())); - __ j(equal, &done, Label::kNear); - - // Arguments adaptor frame present. Get argument length from there. - __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(result, Operand(result, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(result); - - // Argument length is in result register. - __ bind(&done); -} - - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. - Label receiver_ok, global_object; - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - Register scratch = ToRegister(instr->temp()); - - if (!instr->hydrogen()->known_function()) { - // Do not transform the receiver to object for strict mode functions or - // builtins - __ mov(scratch, - FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ test(FieldOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset), - Immediate(SharedFunctionInfo::IsStrictBit::kMask | - SharedFunctionInfo::IsNativeBit::kMask)); - __ j(not_equal, &receiver_ok, dist); - } - - // Normal function. Replace undefined or null with global receiver. - __ cmp(receiver, factory()->null_value()); - __ j(equal, &global_object, dist); - __ cmp(receiver, factory()->undefined_value()); - __ j(equal, &global_object, dist); - - // The receiver should be a JS object. - __ test(receiver, Immediate(kSmiTagMask)); - DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi); - __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch); - DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject); - - __ jmp(&receiver_ok, dist); - __ bind(&global_object); - __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); - __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX)); - __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX)); - __ bind(&receiver_ok); -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); - DCHECK(receiver.is(eax)); // Used for parameter count. - DCHECK(function.is(edi)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).is(eax)); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - __ cmp(length, kArgumentsLimit); - DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments); - - __ push(receiver); - __ mov(receiver, length); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ test(length, Operand(length)); - __ j(zero, &invoke, Label::kNear); - __ bind(&loop); - __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); - __ dec(length); - __ j(not_zero, &loop); - - // Invoke the function. - __ bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(eax); - // It is safe to use ebx, ecx and edx as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) ebx (expected arguments count) and edx (new.target) will be - // initialized below. - PrepareForTailCall(actual, ebx, ecx, edx); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(eax); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { - __ int3(); -} - - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - EmitPushTaggedOperand(argument); -} - - -void LCodeGen::DoDrop(LDrop* instr) { - __ Drop(instr->count()); -} - - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); -} - - -void LCodeGen::DoContext(LContext* instr) { - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in esi. - DCHECK(result.is(esi)); - } -} - - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - __ push(Immediate(instr->hydrogen()->declarations())); - __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); - __ push(Immediate(instr->hydrogen()->feedback_vector())); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - Register function_reg = edi; - - if (can_invoke_directly) { - // Change context. - __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ mov(edx, factory()->undefined_value()); - __ mov(eax, arity); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function directly. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset); - if (is_tail_call) { - __ jmp(target); - } else { - __ call(target); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - } - } else { - // We need to adapt arguments. - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator( - this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, expected, actual, flag, generator); - } -} - - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(ToRegister(instr->result()).is(eax)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ leave(); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - __ jmp(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(target); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - __ call(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(Operand(target))); - __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ call(target); - } - generator.AfterCall(); - } -} - - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - Register input_reg = ToRegister(instr->value()); - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - - Label slow, allocated, done; - uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit(); - available_regs &= ~input_reg.bit(); - if (instr->context()->IsRegister()) { - // Make sure that the context isn't overwritten in the AllocateHeapNumber - // macro below. - available_regs &= ~ToRegister(instr->context()).bit(); - } - - Register tmp = - Register::from_code(base::bits::CountTrailingZeros32(available_regs)); - available_regs &= ~tmp.bit(); - Register tmp2 = - Register::from_code(base::bits::CountTrailingZeros32(available_regs)); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - - __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, just - // return it. We do not need to patch the stack since |input| and - // |result| are the same register and |input| will be restored - // unchanged by popping safepoint registers. - __ test(tmp, Immediate(HeapNumber::kSignMask)); - __ j(zero, &done, Label::kNear); - - __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); - __ jmp(&allocated, Label::kNear); - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, - instr, instr->context()); - // Set the pointer to the new heap number in tmp. - if (!tmp.is(eax)) __ mov(tmp, eax); - // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input_reg, input_reg); - - __ bind(&allocated); - __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - __ and_(tmp2, ~HeapNumber::kSignMask); - __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); - __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); - __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); - __ StoreToSafepointRegisterSlot(input_reg, tmp); - - __ bind(&done); -} - - -void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { - Register input_reg = ToRegister(instr->value()); - __ test(input_reg, Operand(input_reg)); - Label is_positive; - __ j(not_sign, &is_positive, Label::kNear); - __ neg(input_reg); // Sets flags. - DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow); - __ bind(&is_positive); -} - - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - // Class for deferred case. - class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { - public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, - LMathAbs* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LMathAbs* instr_; - }; - - DCHECK(instr->value()->Equals(instr->result())); - Representation r = instr->hydrogen()->value()->representation(); - - if (r.IsDouble()) { - XMMRegister scratch = double_scratch0(); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - __ xorps(scratch, scratch); - __ subsd(scratch, input_reg); - __ andps(input_reg, scratch); - } else if (r.IsSmiOrInteger32()) { - EmitIntegerMathAbs(instr); - } else { // Tagged case. - DeferredMathAbsTaggedHeapNumber* deferred = - new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); - Register input_reg = ToRegister(instr->value()); - // Smi check. - __ JumpIfNotSmi(input_reg, deferred->entry()); - EmitIntegerMathAbs(instr); - __ bind(deferred->exit()); - } -} - -void LCodeGen::DoMathFloorD(LMathFloorD* instr) { - XMMRegister output_reg = ToDoubleRegister(instr->result()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - CpuFeatureScope scope(masm(), SSE4_1); - __ roundsd(output_reg, input_reg, kRoundDown); -} - -void LCodeGen::DoMathFloorI(LMathFloorI* instr) { - XMMRegister xmm_scratch = double_scratch0(); - Register output_reg = ToRegister(instr->result()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - - if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatureScope scope(masm(), SSE4_1); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Deoptimize on negative zero. - Label non_zero; - __ xorps(xmm_scratch, xmm_scratch); // Zero the register. - __ ucomisd(input_reg, xmm_scratch); - __ j(not_equal, &non_zero, Label::kNear); - __ movmskpd(output_reg, input_reg); - __ test(output_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - __ bind(&non_zero); - } - __ roundsd(xmm_scratch, input_reg, kRoundDown); - __ cvttsd2si(output_reg, Operand(xmm_scratch)); - // Overflow is signalled with minint. - __ cmp(output_reg, 0x1); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } else { - Label negative_sign, done; - // Deoptimize on unordered. - __ xorps(xmm_scratch, xmm_scratch); // Zero the register. - __ ucomisd(input_reg, xmm_scratch); - DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN); - __ j(below, &negative_sign, Label::kNear); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Check for negative zero. - Label positive_sign; - __ j(above, &positive_sign, Label::kNear); - __ movmskpd(output_reg, input_reg); - __ test(output_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - __ Move(output_reg, Immediate(0)); - __ jmp(&done, Label::kNear); - __ bind(&positive_sign); - } - - // Use truncating instruction (OK because input is positive). - __ cvttsd2si(output_reg, Operand(input_reg)); - // Overflow is signalled with minint. - __ cmp(output_reg, 0x1); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - __ jmp(&done, Label::kNear); - - // Non-zero negative reaches here. - __ bind(&negative_sign); - // Truncate, then compare and compensate. - __ cvttsd2si(output_reg, Operand(input_reg)); - __ Cvtsi2sd(xmm_scratch, output_reg); - __ ucomisd(input_reg, xmm_scratch); - __ j(equal, &done, Label::kNear); - __ sub(output_reg, Immediate(1)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - - __ bind(&done); - } -} - -void LCodeGen::DoMathRoundD(LMathRoundD* instr) { - XMMRegister xmm_scratch = double_scratch0(); - XMMRegister output_reg = ToDoubleRegister(instr->result()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - CpuFeatureScope scope(masm(), SSE4_1); - Label done; - __ roundsd(output_reg, input_reg, kRoundUp); - __ Move(xmm_scratch, -0.5); - __ addsd(xmm_scratch, output_reg); - __ ucomisd(xmm_scratch, input_reg); - __ j(below_equal, &done, Label::kNear); - __ Move(xmm_scratch, 1.0); - __ subsd(output_reg, xmm_scratch); - __ bind(&done); -} - -void LCodeGen::DoMathRoundI(LMathRoundI* instr) { - Register output_reg = ToRegister(instr->result()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - XMMRegister xmm_scratch = double_scratch0(); - XMMRegister input_temp = ToDoubleRegister(instr->temp()); - ExternalReference one_half = ExternalReference::address_of_one_half(); - ExternalReference minus_one_half = - ExternalReference::address_of_minus_one_half(); - - Label done, round_to_zero, below_one_half, do_not_compensate; - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - - __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); - __ ucomisd(xmm_scratch, input_reg); - __ j(above, &below_one_half, Label::kNear); - - // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). - __ addsd(xmm_scratch, input_reg); - __ cvttsd2si(output_reg, Operand(xmm_scratch)); - // Overflow is signalled with minint. - __ cmp(output_reg, 0x1); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - __ jmp(&done, dist); - - __ bind(&below_one_half); - __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); - __ ucomisd(xmm_scratch, input_reg); - __ j(below_equal, &round_to_zero, Label::kNear); - - // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then - // compare and compensate. - __ movaps(input_temp, input_reg); // Do not alter input_reg. - __ subsd(input_temp, xmm_scratch); - __ cvttsd2si(output_reg, Operand(input_temp)); - // Catch minint due to overflow, and to prevent overflow when compensating. - __ cmp(output_reg, 0x1); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - - __ Cvtsi2sd(xmm_scratch, output_reg); - __ ucomisd(xmm_scratch, input_temp); - __ j(equal, &done, dist); - __ sub(output_reg, Immediate(1)); - // No overflow because we already ruled out minint. - __ jmp(&done, dist); - - __ bind(&round_to_zero); - // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if - // we can ignore the difference between a result of -0 and +0. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // If the sign is positive, we return +0. - __ movmskpd(output_reg, input_reg); - __ test(output_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - } - __ Move(output_reg, Immediate(0)); - __ bind(&done); -} - - -void LCodeGen::DoMathFround(LMathFround* instr) { - XMMRegister input_reg = ToDoubleRegister(instr->value()); - XMMRegister output_reg = ToDoubleRegister(instr->result()); - __ cvtsd2ss(output_reg, input_reg); - __ cvtss2sd(output_reg, output_reg); -} - - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - Operand input = ToOperand(instr->value()); - XMMRegister output = ToDoubleRegister(instr->result()); - __ sqrtsd(output, input); -} - - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - XMMRegister xmm_scratch = double_scratch0(); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - Register scratch = ToRegister(instr->temp()); - DCHECK(ToDoubleRegister(instr->result()).is(input_reg)); - - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label done, sqrt; - // Check base for -Infinity. According to IEEE-754, single-precision - // -Infinity has the highest 9 bits set and the lowest 23 bits cleared. - __ mov(scratch, 0xFF800000); - __ movd(xmm_scratch, scratch); - __ cvtss2sd(xmm_scratch, xmm_scratch); - __ ucomisd(input_reg, xmm_scratch); - // Comparing -Infinity with NaN results in "unordered", which sets the - // zero flag as if both were equal. However, it also sets the carry flag. - __ j(not_equal, &sqrt, Label::kNear); - __ j(carry, &sqrt, Label::kNear); - // If input is -Infinity, return Infinity. - __ xorps(input_reg, input_reg); - __ subsd(input_reg, xmm_scratch); - __ jmp(&done, Label::kNear); - - // Square root. - __ bind(&sqrt); - __ xorps(xmm_scratch, xmm_scratch); - __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. - __ sqrtsd(input_reg, input_reg); - __ bind(&done); -} - - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - // Having marked this as a call, we can use any registers. - // Just make sure that the input/output registers are the expected ones. - Register tagged_exponent = MathPowTaggedDescriptor::exponent(); - DCHECK(!instr->right()->IsDoubleRegister() || - ToDoubleRegister(instr->right()).is(xmm1)); - DCHECK(!instr->right()->IsRegister() || - ToRegister(instr->right()).is(tagged_exponent)); - DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); - DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); - - if (exponent_type.IsSmi()) { - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsTagged()) { - Label no_deopt; - __ JumpIfSmi(tagged_exponent, &no_deopt); - DCHECK(!ecx.is(tagged_exponent)); - __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - __ bind(&no_deopt); - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsInteger32()) { - MathPowStub stub(isolate(), MathPowStub::INTEGER); - __ CallStub(&stub); - } else { - DCHECK(exponent_type.IsDouble()); - MathPowStub stub(isolate(), MathPowStub::DOUBLE); - __ CallStub(&stub); - } -} - - -void LCodeGen::DoMathLog(LMathLog* instr) { - XMMRegister input = ToDoubleRegister(instr->value()); - XMMRegister result = ToDoubleRegister(instr->result()); - // Pass one double as argument on the stack. - __ PrepareCallCFunction(2, eax); - __ movsd(Operand(esp, 0 * kDoubleSize), input); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2); - // Return value is in st(0) on ia32. - // Store it into the result register. - __ sub(esp, Immediate(kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - __ movsd(result, Operand(esp, 0)); - __ add(esp, Immediate(kDoubleSize)); -} - - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - - __ Lzcnt(result, input); -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - XMMRegister input = ToDoubleRegister(instr->value()); - XMMRegister result = ToDoubleRegister(instr->result()); - // Pass one double as argument on the stack. - __ PrepareCallCFunction(2, eax); - __ movsd(Operand(esp, 0 * kDoubleSize), input); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2); - // Return value is in st(0) on ia32. - // Store it into the result register. - __ sub(esp, Immediate(kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - __ movsd(result, Operand(esp, 0)); - __ add(esp, Immediate(kDoubleSize)); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - XMMRegister input = ToDoubleRegister(instr->value()); - XMMRegister result = ToDoubleRegister(instr->result()); - // Pass one double as argument on the stack. - __ PrepareCallCFunction(2, eax); - __ movsd(Operand(esp, 0 * kDoubleSize), input); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2); - // Return value is in st(0) on ia32. - // Store it into the result register. - __ sub(esp, Immediate(kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - __ movsd(result, Operand(esp, 0)); - __ add(esp, Immediate(kDoubleSize)); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - XMMRegister input = ToDoubleRegister(instr->value()); - XMMRegister result = ToDoubleRegister(instr->result()); - // Pass one double as argument on the stack. - __ PrepareCallCFunction(2, eax); - __ movsd(Operand(esp, 0 * kDoubleSize), input); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2); - // Return value is in st(0) on ia32. - // Store it into the result register. - __ sub(esp, Immediate(kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - __ movsd(result, Operand(esp, 0)); - __ add(esp, Immediate(kDoubleSize)); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset), - Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(not_equal, &no_arguments_adaptor, Label::kNear); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ mov(ebp, scratch2); - __ mov(caller_args_count_reg, - Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(caller_args_count_reg); - __ jmp(&formal_parameter_count_loaded, Label::kNear); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count. - __ mov(caller_args_count_reg, - Immediate(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3, - ReturnAddressState::kNotOnStack, 0); - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->function()).is(edi)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use ebx, ecx and edx as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) ebx (expected arguments count) and edx (new.target) will be - // initialized below. - PrepareForTailCall(actual, ebx, ecx, edx); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(edi, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } -} - - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->constructor()).is(edi)); - DCHECK(ToRegister(instr->result()).is(eax)); - - __ Move(eax, Immediate(instr->arity())); - __ mov(ebx, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - // We might need a change here - // look at the first argument - __ mov(ecx, Operand(esp, 0)); - __ test(ecx, ecx); - __ j(zero, &packed_case, Label::kNear); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), - holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ jmp(&done, Label::kNear); - __ bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); -} - - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); - __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); -} - - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ lea(result, Operand(base, ToInteger32(offset))); - } else { - Register offset = ToRegister(instr->offset()); - __ lea(result, Operand(base, offset, times_1, 0)); - } -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - Representation representation = instr->hydrogen()->field_representation(); - - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - MemOperand operand = instr->object()->IsConstantOperand() - ? MemOperand::StaticVariable( - ToExternalReference(LConstantOperand::cast(instr->object()))) - : MemOperand(ToRegister(instr->object()), offset); - if (instr->value()->IsConstantOperand()) { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - __ mov(operand, Immediate(ToInteger32(operand_value))); - } else { - Register value = ToRegister(instr->value()); - __ Store(value, operand, representation); - } - return; - } - - Register object = ToRegister(instr->object()); - __ AssertNotSmi(object); - - DCHECK(!representation.IsSmi() || - !instr->value()->IsConstantOperand() || - IsSmi(LConstantOperand::cast(instr->value()))); - if (representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!instr->hydrogen()->has_transition()); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - XMMRegister value = ToDoubleRegister(instr->value()); - __ movsd(FieldOperand(object, offset), value); - return; - } - - if (instr->hydrogen()->has_transition()) { - Handle transition = instr->hydrogen()->transition_map(); - AddDeprecationDependency(transition); - __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); - if (instr->hydrogen()->NeedsWriteBarrierForMap()) { - Register temp = ToRegister(instr->temp()); - Register temp_map = ToRegister(instr->temp_map()); - // Update the write barrier for the map field. - __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs); - } - } - - // Do the store. - Register write_register = object; - if (!access.IsInobject()) { - write_register = ToRegister(instr->temp()); - __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); - } - - MemOperand operand = FieldOperand(write_register, offset); - if (instr->value()->IsConstantOperand()) { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - if (operand_value->IsRegister()) { - Register value = ToRegister(operand_value); - __ Store(value, operand, representation); - } else if (representation.IsInteger32() || representation.IsExternal()) { - Immediate immediate = ToImmediate(operand_value, representation); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - __ mov(operand, immediate); - } else { - Handle handle_value = ToHandle(operand_value); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - __ mov(operand, handle_value); - } - } else { - Register value = ToRegister(instr->value()); - __ Store(value, operand, representation); - } - - if (instr->hydrogen()->NeedsWriteBarrier()) { - Register value = ToRegister(instr->value()); - Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; - // Update the write barrier for the object for in-object properties. - __ RecordWriteField(write_register, - offset, - value, - temp, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - instr->hydrogen()->SmiCheckForWriteBarrier(), - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal; - if (instr->index()->IsConstantOperand()) { - __ cmp(ToOperand(instr->length()), - ToImmediate(LConstantOperand::cast(instr->index()), - instr->hydrogen()->length()->representation())); - cc = CommuteCondition(cc); - } else if (instr->length()->IsConstantOperand()) { - __ cmp(ToOperand(instr->index()), - ToImmediate(LConstantOperand::cast(instr->length()), - instr->hydrogen()->index()->representation())); - } else { - __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - Label done; - __ j(NegateCondition(cc), &done, Label::kNear); - __ int3(); - __ bind(&done); - } else { - DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); - } -} - - -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = instr->key(); - if (!key->IsConstantOperand() && - ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), - elements_kind)) { - __ SmiUntag(ToRegister(key)); - } - Operand operand(BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - elements_kind, - instr->base_offset())); - if (elements_kind == FLOAT32_ELEMENTS) { - XMMRegister xmm_scratch = double_scratch0(); - __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); - __ movss(operand, xmm_scratch); - } else if (elements_kind == FLOAT64_ELEMENTS) { - __ movsd(operand, ToDoubleRegister(instr->value())); - } else { - Register value = ToRegister(instr->value()); - switch (elements_kind) { - case UINT8_ELEMENTS: - case INT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ mov_b(operand, value); - break; - case UINT16_ELEMENTS: - case INT16_ELEMENTS: - __ mov_w(operand, value); - break; - case UINT32_ELEMENTS: - case INT32_ELEMENTS: - __ mov(operand, value); - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset()); - - XMMRegister value = ToDoubleRegister(instr->value()); - - if (instr->NeedsCanonicalization()) { - XMMRegister xmm_scratch = double_scratch0(); - // Turn potential sNaN value into qNaN. - __ xorps(xmm_scratch, xmm_scratch); - __ subsd(value, xmm_scratch); - } - - __ movsd(double_store_operand, value); -} - - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - Register elements = ToRegister(instr->elements()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - - Operand operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, - instr->base_offset()); - if (instr->value()->IsRegister()) { - __ mov(operand, ToRegister(instr->value())); - } else { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - if (IsSmi(operand_value)) { - Immediate immediate = ToImmediate(operand_value, Representation::Smi()); - __ mov(operand, immediate); - } else { - DCHECK(!IsInteger32(operand_value)); - Handle handle_value = ToHandle(operand_value); - __ mov(operand, handle_value); - } - } - - if (instr->hydrogen()->NeedsWriteBarrier()) { - DCHECK(instr->value()->IsRegister()); - Register value = ToRegister(instr->value()); - DCHECK(!instr->key()->IsConstantOperand()); - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ lea(key, operand); - __ RecordWrite(elements, - key, - value, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed, - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - // By cases...external, fast-double, fast - if (instr->is_fixed_typed_array()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp = ToRegister(instr->temp()); - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); - DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound); - __ bind(&no_memento_found); -} - - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = eax; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ jmp(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ cmp(ToOperand(current_capacity), Immediate(constant_key)); - __ j(less_equal, deferred->entry()); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ cmp(ToRegister(key), Immediate(constant_capacity)); - __ j(greater_equal, deferred->entry()); - } else { - __ cmp(ToRegister(key), ToRegister(current_capacity)); - __ j(greater_equal, deferred->entry()); - } - - __ mov(result, ToOperand(instr->elements())); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = eax; - __ Move(result, Immediate(0)); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - if (instr->object()->IsRegister()) { - __ Move(result, ToRegister(instr->object())); - } else { - __ mov(result, ToOperand(instr->object())); - } - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - LConstantOperand* constant_key = LConstantOperand::cast(key); - int32_t int_key = ToInteger32(constant_key); - if (Smi::IsValid(int_key)) { - __ mov(ebx, Immediate(Smi::FromInt(int_key))); - } else { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - Label is_smi; - __ Move(ebx, ToRegister(key)); - __ SmiTag(ebx); - // Deopt if the key is outside Smi range. The stub expects Smi and would - // bump the elements into dictionary mode (and trigger a deopt) anyways. - __ j(no_overflow, &is_smi); - __ PopSafepointRegisters(); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kOverflow); - __ bind(&is_smi); - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - __ test(result, Immediate(kSmiTagMask)); - DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi); -} - - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt final : public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, - LStringCharCodeAt* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new(zone()) DeferredStringCharCodeAt(this, instr); - - StringCharLoadGenerator::Generate(masm(), - factory(), - ToRegister(instr->string()), - ToRegister(instr->index()), - ToRegister(instr->result()), - deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Move(result, Immediate(0)); - - PushSafepointRegistersScope scope(this); - __ push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); - if (instr->index()->IsConstantOperand()) { - Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()), - Representation::Smi()); - __ push(immediate); - } else { - Register index = ToRegister(instr->index()); - __ SmiTag(index); - __ push(index); - } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, - instr, instr->context()); - __ AssertSmi(eax); - __ SmiUntag(eax); - __ StoreToSafepointRegisterSlot(result, eax); -} - - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode final : public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, - LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredStringCharFromCode(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new(zone()) DeferredStringCharFromCode(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - DCHECK(!char_code.is(result)); - - __ cmp(char_code, String::kMaxOneByteCharCode); - __ j(above, deferred->entry()); - __ Move(result, Immediate(factory()->single_character_string_cache())); - __ mov(result, FieldOperand(result, - char_code, times_pointer_size, - FixedArray::kHeaderSize)); - __ cmp(result, factory()->undefined_value()); - __ j(equal, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Move(result, Immediate(0)); - - PushSafepointRegistersScope scope(this); - __ SmiTag(char_code); - __ push(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(result, eax); -} - - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->left()).is(edx)); - DCHECK(ToRegister(instr->right()).is(eax)); - StringAddStub stub(isolate(), - instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - DCHECK(input->IsRegister() || input->IsStackSlot()); - DCHECK(output->IsDoubleRegister()); - __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); -} - - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - __ LoadUint32(ToDoubleRegister(output), ToRegister(input)); -} - - -void LCodeGen::DoNumberTagI(LNumberTagI* instr) { - class DeferredNumberTagI final : public LDeferredCode { - public: - DeferredNumberTagI(LCodeGen* codegen, - LNumberTagI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU( - instr_, instr_->value(), instr_->temp(), SIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister() && input->Equals(instr->result())); - Register reg = ToRegister(input); - - DeferredNumberTagI* deferred = - new(zone()) DeferredNumberTagI(this, instr); - __ SmiTag(reg); - __ j(overflow, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU final : public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU( - instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagU* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister() && input->Equals(instr->result())); - Register reg = ToRegister(input); - - DeferredNumberTagU* deferred = - new(zone()) DeferredNumberTagU(this, instr); - __ cmp(reg, Immediate(Smi::kMaxValue)); - __ j(above, deferred->entry()); - __ SmiTag(reg); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp, - IntegerSignedness signedness) { - Label done, slow; - Register reg = ToRegister(value); - Register tmp = ToRegister(temp); - XMMRegister xmm_scratch = double_scratch0(); - - if (signedness == SIGNED_INT32) { - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - __ SmiUntag(reg); - __ xor_(reg, 0x80000000); - __ Cvtsi2sd(xmm_scratch, Operand(reg)); - } else { - __ LoadUint32(xmm_scratch, reg); - } - - if (FLAG_inline_new) { - __ AllocateHeapNumber(reg, tmp, no_reg, &slow); - __ jmp(&done, Label::kNear); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // TODO(3095996): Put a valid pointer value in the stack slot where the - // result register is stored, as this register is in the pointer map, but - // contains an integer value. - __ Move(reg, Immediate(0)); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(esi)) { - __ Move(esi, Immediate(0)); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(reg, eax); - } - - // Done. Put the value in xmm_scratch into the value of the allocated heap - // number. - __ bind(&done); - __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD final : public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagD* instr_; - }; - - Register reg = ToRegister(instr->result()); - - DeferredNumberTagD* deferred = - new(zone()) DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - Register tmp = ToRegister(instr->temp()); - __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); - } else { - __ jmp(deferred->entry()); - } - __ bind(deferred->exit()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ Move(reg, Immediate(0)); - - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(esi)) { - __ Move(esi, Immediate(0)); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(reg, eax); -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - __ test(input, Immediate(0xc0000000)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow); - } - __ SmiTag(input); - if (hchange->CheckFlag(HValue::kCanOverflow) && - !hchange->value()->CheckFlag(HValue::kUint32)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - LOperand* input = instr->value(); - Register result = ToRegister(input); - DCHECK(input->IsRegister() && input->Equals(instr->result())); - if (instr->needs_check()) { - __ test(result, Immediate(kSmiTagMask)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi); - } else { - __ AssertSmi(result); - } - __ SmiUntag(result); -} - - -void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, - Register temp_reg, XMMRegister result_reg, - NumberUntagDMode mode) { - bool can_convert_undefined_to_nan = instr->truncating(); - bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); - - Label convert, load_smi, done; - - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ JumpIfSmi(input_reg, &load_smi, Label::kNear); - - // Heap number map check. - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - if (can_convert_undefined_to_nan) { - __ j(not_equal, &convert, Label::kNear); - } else { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - } - - // Heap number to XMM conversion. - __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); - - if (deoptimize_on_minus_zero) { - XMMRegister xmm_scratch = double_scratch0(); - __ xorps(xmm_scratch, xmm_scratch); - __ ucomisd(result_reg, xmm_scratch); - __ j(not_zero, &done, Label::kNear); - __ movmskpd(temp_reg, result_reg); - __ test_b(temp_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - } - __ jmp(&done, Label::kNear); - - if (can_convert_undefined_to_nan) { - __ bind(&convert); - - // Convert undefined to NaN. - __ cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, instr, - DeoptimizeReason::kNotAHeapNumberUndefined); - - __ xorpd(result_reg, result_reg); - __ divsd(result_reg, result_reg); - __ jmp(&done, Label::kNear); - } - } else { - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - } - - __ bind(&load_smi); - // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the - // input register since we avoid dependencies. - __ mov(temp_reg, input_reg); - __ SmiUntag(temp_reg); // Untag smi before converting to float. - __ Cvtsi2sd(result_reg, Operand(temp_reg)); - __ bind(&done); -} - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { - Register input_reg = ToRegister(instr->value()); - - // The input was optimistically untagged; revert it. - STATIC_ASSERT(kSmiTagSize == 1); - __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); - - if (instr->truncating()) { - Label truncate; - Label::Distance truncate_distance = - DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - __ j(equal, &truncate, truncate_distance); - __ push(input_reg); - __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg); - __ pop(input_reg); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball); - __ bind(&truncate); - __ TruncateHeapNumberToI(input_reg, input_reg); - } else { - XMMRegister scratch = ToDoubleRegister(instr->temp()); - DCHECK(!scratch.is(xmm0)); - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - isolate()->factory()->heap_number_map()); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ cvttsd2si(input_reg, Operand(xmm0)); - __ Cvtsi2sd(scratch, Operand(input_reg)); - __ ucomisd(xmm0, scratch); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision); - DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN); - if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { - __ test(input_reg, Operand(input_reg)); - __ j(not_zero, done); - __ movmskpd(input_reg, xmm0); - __ and_(input_reg, 1); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - } - } -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI final : public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); } - LInstruction* instr() override { return instr_; } - - private: - LTaggedToI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register input_reg = ToRegister(input); - DCHECK(input_reg.is(ToRegister(instr->result()))); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiUntag(input_reg); - } else { - DeferredTaggedToI* deferred = - new(zone()) DeferredTaggedToI(this, instr); - // Optimistically untag the input. - // If the input is a HeapObject, SmiUntag will set the carry flag. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ SmiUntag(input_reg); - // Branch to deferred code if the input was tagged. - // The deferred code will take care of restoring the tag. - __ j(carry, deferred->entry()); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - LOperand* temp = instr->temp(); - DCHECK(temp->IsRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - Register temp_reg = ToRegister(temp); - - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - XMMRegister result_reg = ToDoubleRegister(result); - EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode); -} - - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsDoubleRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsRegister()); - Register result_reg = ToRegister(result); - - if (instr->truncating()) { - XMMRegister input_reg = ToDoubleRegister(input); - __ TruncateDoubleToI(result_reg, input_reg); - } else { - Label lost_precision, is_nan, minus_zero, done; - XMMRegister input_reg = ToDoubleRegister(input); - XMMRegister xmm_scratch = double_scratch0(); - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ DoubleToI(result_reg, input_reg, xmm_scratch, - instr->hydrogen()->GetMinusZeroMode(), &lost_precision, - &is_nan, &minus_zero, dist); - __ jmp(&done, dist); - __ bind(&lost_precision); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); - __ bind(&is_nan); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); - __ bind(&minus_zero); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } -} - - -void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsDoubleRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsRegister()); - Register result_reg = ToRegister(result); - - Label lost_precision, is_nan, minus_zero, done; - XMMRegister input_reg = ToDoubleRegister(input); - XMMRegister xmm_scratch = double_scratch0(); - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ DoubleToI(result_reg, input_reg, xmm_scratch, - instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, - &minus_zero, dist); - __ jmp(&done, dist); - __ bind(&lost_precision); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); - __ bind(&is_nan); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); - __ bind(&minus_zero); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - __ SmiTag(result_reg); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->value(); - __ test(ToOperand(input), Immediate(kSmiTagMask)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - LOperand* input = instr->value(); - __ test(ToOperand(input), Immediate(kSmiTagMask)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi); - } -} - - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - Register view = ToRegister(instr->view()); - Register scratch = ToRegister(instr->scratch()); - - __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset)); - __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset), - Immediate(1 << JSArrayBuffer::WasNeutered::kShift)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first)); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType); - } else { - DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType); - // Omit check for the last type. - if (last != LAST_TYPE) { - __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last)); - DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType); - } - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); - __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask)); - DeoptimizeIf(tag == 0 ? not_zero : zero, instr, - DeoptimizeReason::kWrongInstanceType); - } else { - __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); - __ and_(temp, mask); - __ cmp(temp, tag); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType); - } - } -} - - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Handle object = instr->hydrogen()->object().handle(); - if (instr->hydrogen()->object_in_new_space()) { - Register reg = ToRegister(instr->value()); - Handle cell = isolate()->factory()->NewCell(object); - __ cmp(reg, Operand::ForCell(cell)); - } else { - Operand operand = ToOperand(instr->value()); - __ cmp(operand, object); - } - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch); -} - - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ push(object); - __ mov(object, FieldOperand(object, HeapObject::kMapOffset)); - __ test(FieldOperand(object, Map::kBitField3Offset), - Immediate(Map::Deprecated::kMask)); - __ pop(object); - __ j(zero, &deopt); - - { - PushSafepointRegistersScope scope(this); - __ push(object); - __ xor_(esi, esi); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters( - instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); - - __ test(eax, Immediate(kSmiTagMask)); - } - __ j(not_zero, &done); - - __ bind(&deopt); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps final : public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - void Generate() override { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - LInstruction* instr() override { return instr_; } - - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register reg = ToRegister(input); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, reg); - __ bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMap(reg, map); - __ j(equal, &success, Label::kNear); - } - - Handle map = maps->at(maps->size() - 1).handle(); - __ CompareMap(reg, map); - if (instr->hydrogen()->HasMigrationTarget()) { - __ j(not_equal, deferred->entry()); - } else { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap); - } - - __ bind(&success); -} - - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); - XMMRegister xmm_scratch = double_scratch0(); - Register result_reg = ToRegister(instr->result()); - __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - DCHECK(instr->unclamped()->Equals(instr->result())); - Register value_reg = ToRegister(instr->result()); - __ ClampUint8(value_reg); -} - - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - DCHECK(instr->unclamped()->Equals(instr->result())); - Register input_reg = ToRegister(instr->unclamped()); - XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); - XMMRegister xmm_scratch = double_scratch0(); - Label is_smi, done, heap_number; - - __ JumpIfSmi(input_reg, &is_smi); - - // Check for heap number - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - __ j(equal, &heap_number, Label::kNear); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - __ cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ mov(input_reg, 0); - __ jmp(&done, Label::kNear); - - // Heap number - __ bind(&heap_number); - __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); - __ jmp(&done, Label::kNear); - - // smi - __ bind(&is_smi); - __ SmiUntag(input_reg); - __ ClampUint8(input_reg); - __ bind(&done); -} - - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate final : public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, LAllocate* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredAllocate(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); - - Register result = ToRegister(instr->result()); - Register temp = ToRegister(instr->temp()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); - } else { - Register size = ToRegister(instr->size()); - __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); - } - - __ bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ mov(temp, (size / kPointerSize) - 1); - } else { - temp = ToRegister(instr->size()); - __ shr(temp, kPointerSizeLog2); - __ dec(temp); - } - Label loop; - __ bind(&loop); - __ mov(FieldOperand(result, temp, times_pointer_size, 0), - isolate()->factory()->one_pointer_filler_map()); - __ dec(temp); - __ j(not_zero, &loop); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register temp = ToRegister(instr->temp()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, temp, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, temp, flags); - } -} - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Move(result, Immediate(Smi::kZero)); - - PushSafepointRegistersScope scope(this); - if (instr->size()->IsRegister()) { - Register size = ToRegister(instr->size()); - DCHECK(!size.is(result)); - __ SmiTag(ToRegister(instr->size())); - __ push(size); - } else { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - if (size >= 0 && size <= Smi::kMaxValue) { - __ push(Immediate(Smi::FromInt(size))); - } else { - // We should never get here at runtime => abort - __ int3(); - return; - } - } - - int flags = AllocateDoubleAlignFlag::encode( - instr->hydrogen()->MustAllocateDoubleAligned()); - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ push(Immediate(Smi::FromInt(flags))); - - CallRuntimeFromDeferred( - Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); - __ StoreToSafepointRegisterSlot(result, eax); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - __ sub(eax, Immediate(kHeapObjectTag)); - __ mov(Operand::StaticVariable(allocation_top), eax); - __ add(eax, Immediate(kHeapObjectTag)); - } -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->value()).is(ebx)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ mov(eax, Immediate(isolate()->factory()->number_string())); - __ jmp(&end); - __ bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ bind(&end); -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->value()); - Condition final_branch_condition = EmitTypeofIs(instr, input); - if (final_branch_condition != no_condition) { - EmitBranch(instr, final_branch_condition); - } -} - - -Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { - Label* true_label = instr->TrueLabel(chunk_); - Label* false_label = instr->FalseLabel(chunk_); - Handle type_name = instr->type_literal(); - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - int next_block = GetNextEmittedBlock(); - - Label::Distance true_distance = left_block == next_block ? Label::kNear - : Label::kFar; - Label::Distance false_distance = right_block == next_block ? Label::kNear - : Label::kFar; - Condition final_branch_condition = no_condition; - if (String::Equals(type_name, factory()->number_string())) { - __ JumpIfSmi(input, true_label, true_distance); - __ cmp(FieldOperand(input, HeapObject::kMapOffset), - factory()->heap_number_map()); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory()->string_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); - final_branch_condition = below; - - } else if (String::Equals(type_name, factory()->symbol_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ CmpObjectType(input, SYMBOL_TYPE, input); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory()->boolean_string())) { - __ cmp(input, factory()->true_value()); - __ j(equal, true_label, true_distance); - __ cmp(input, factory()->false_value()); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory()->undefined_string())) { - __ cmp(input, factory()->null_value()); - __ j(equal, false_label, false_distance); - __ JumpIfSmi(input, false_label, false_distance); - // Check for undetectable objects => true. - __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); - __ test_b(FieldOperand(input, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - final_branch_condition = not_zero; - - } else if (String::Equals(type_name, factory()->function_string())) { - __ JumpIfSmi(input, false_label, false_distance); - // Check for callable and not undetectable objects => true. - __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); - __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset)); - __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); - __ cmp(input, 1 << Map::kIsCallable); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory()->object_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ cmp(input, factory()->null_value()); - __ j(equal, true_label, true_distance); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input); - __ j(below, false_label, false_distance); - // Check for callable or undetectable objects => false. - __ test_b(FieldOperand(input, Map::kBitFieldOffset), - Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - final_branch_condition = zero; - } else { - __ jmp(false_label, false_distance); - } - return final_branch_condition; -} - - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - __ Nop(padding_size); - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && type == Deoptimizer::EAGER) { - type = Deoptimizer::LAZY; - } - DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type); -} - - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck final : public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStackCheck(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(isolate()); - __ cmp(esp, Operand::StaticVariable(stack_limit)); - __ j(above_equal, &done, Label::kNear); - - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(esi)); - CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new(zone()) DeferredStackCheck(this, instr); - ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(isolate()); - __ cmp(esp, Operand::StaticVariable(stack_limit)); - __ j(below, deferred_stack_check->entry()); - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - - Label use_cache, call_runtime; - __ CheckEnumCache(&call_runtime); - - __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); - __ jmp(&use_cache, Label::kNear); - - // Get the set of properties to enumerate. - __ bind(&call_runtime); - __ push(eax); - CallRuntime(Runtime::kForInEnumerate, instr); - __ bind(&use_cache); -} - - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - __ EnumLength(result, map); - __ cmp(result, Immediate(Smi::kZero)); - __ j(not_equal, &load_cache, Label::kNear); - __ mov(result, isolate()->factory()->empty_fixed_array()); - __ jmp(&done, Label::kNear); - - __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ mov(result, FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ mov(result, - FieldOperand(result, FixedArray::SizeFor(instr->idx()))); - __ bind(&done); - __ test(result, result); - DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache); -} - - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - __ cmp(ToRegister(instr->map()), - FieldOperand(object, HeapObject::kMapOffset)); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap); -} - - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ push(object); - __ push(index); - __ xor_(esi, esi); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters( - instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(object, eax); -} - - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, - LLoadFieldByIndex* instr, - Register object, - Register index) - : LDeferredCode(codegen), - instr_(instr), - object_(object), - index_(index) { - } - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register object_; - Register index_; - }; - - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - - DeferredLoadMutableDouble* deferred; - deferred = new(zone()) DeferredLoadMutableDouble( - this, instr, object, index); - - Label out_of_object, done; - __ test(index, Immediate(Smi::FromInt(1))); - __ j(not_zero, deferred->entry()); - - __ sar(index, 1); - - __ cmp(index, Immediate(0)); - __ j(less, &out_of_object, Label::kNear); - __ mov(object, FieldOperand(object, - index, - times_half_pointer_size, - JSObject::kHeaderSize)); - __ jmp(&done, Label::kNear); - - __ bind(&out_of_object); - __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset)); - __ neg(index); - // Index is now equal to out of object property index plus 1. - __ mov(object, FieldOperand(object, - index, - times_half_pointer_size, - FixedArray::kHeaderSize - kPointerSize)); - __ bind(deferred->exit()); - __ bind(&done); -} - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_IA32 diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.h b/src/crankshaft/ia32/lithium-codegen-ia32.h deleted file mode 100644 index 133b8b99a0..0000000000 --- a/src/crankshaft/ia32/lithium-codegen-ia32.h +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_ -#define V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_ - -#include "src/ast/scopes.h" -#include "src/base/logging.h" -#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h" -#include "src/crankshaft/ia32/lithium-ia32.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class LGapNode; -class SafepointGenerator; - -class LCodeGen: public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - // Support for converting LOperands to assembler types. - Operand ToOperand(LOperand* op) const; - Register ToRegister(LOperand* op) const; - XMMRegister ToDoubleRegister(LOperand* op) const; - - bool IsInteger32(LConstantOperand* op) const; - bool IsSmi(LConstantOperand* op) const; - Immediate ToImmediate(LOperand* op, const Representation& r) const { - return Immediate(ToRepresentation(LConstantOperand::cast(op), r)); - } - double ToDouble(LConstantOperand* op) const; - - Handle ToHandle(LConstantOperand* op) const; - - // The operand denoting the second word (the one with a higher address) of - // a double stack slot. - Operand HighOperand(LOperand* op); - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - // Deferred code support. - void DoDeferredNumberTagD(LNumberTagD* instr); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - void DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp, - IntegerSignedness signedness); - - void DoDeferredTaggedToI(LTaggedToI* instr, Label* done); - void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocate(LAllocate* instr); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register object, - Register index); - - // Parallel move support. - void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - - void EnsureRelocSpaceForDeoptimization(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - Scope* scope() const { return scope_; } - - XMMRegister double_scratch0() const { return xmm0; } - - void EmitClassOfTest(Label* if_true, Label* if_false, - Handle class_name, Register input, - Register temporary, Register temporary2); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - void SaveCallerDoubles(); - void RestoreCallerDoubles(); - - // Code generation passes. Returns true if code generation should - // continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - void GenerateBodyInstructionPost(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS - }; - - void CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr); - - void CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode); - - void CallRuntime(const Runtime::Function* fun, - int argc, - LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, - int argc, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, argc, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context); - - void LoadContextFromDeferred(LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in edi. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode); - - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type); - void DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason); - - bool DeoptEveryNTimes() { - return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); - } - - void AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - Register ToRegister(int index) const; - XMMRegister ToDoubleRegister(int index) const; - int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; - int32_t ToInteger32(LConstantOperand* op) const; - ExternalReference ToExternalReference(LConstantOperand* op) const; - - Operand BuildFastArrayOperand(LOperand* elements_pointer, - LOperand* key, - Representation key_representation, - ElementsKind elements_kind, - uint32_t base_offset); - - Operand BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding); - - void EmitIntegerMathAbs(LMathAbs* instr); - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); - - static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block); - - // EmitBranch expects to be the last instruction of a block. - template - void EmitBranch(InstrType instr, Condition cc); - template - void EmitTrueBranch(InstrType instr, Condition cc); - template - void EmitFalseBranch(InstrType instr, Condition cc); - void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp, - XMMRegister result, NumberUntagDMode mode); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed); - - // Emits optimized code to deep-copy the contents of statically known - // object graphs (e.g. object literal boilerplate). - void EmitDeepCopy(Handle object, - Register result, - Register source, - int* offset, - AllocationSiteMode mode); - - void EnsureSpaceForLazyDeopt(int space_needed) override; - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); - - template - void EmitVectorLoadICRegisters(T* instr); - - void EmitReturn(LReturn* instr); - - // Emits code for pushing either a tagged constant, a (non-double) - // register, or a stack slot operand. - void EmitPushTaggedOperand(LOperand* operand); - - friend class LGapResolver; - -#ifdef _MSC_VER - // On windows, you may not access the stack more than one page below - // the most recently mapped page. To make the allocated area randomly - // accessible, we write an arbitrary value to each page in range - // esp + offset - page_size .. esp in turn. - void MakeSureStackPagesMapped(int offset); -#endif - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - class PushSafepointRegistersScope final BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->masm_->PushSafepointRegisters(); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - DCHECK(codegen_->info()->is_calling()); - } - - ~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - codegen_->masm_->PopSafepointRegisters(); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; - } - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode : public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() {} - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); } - int instruction_index() const { return instruction_index_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; - Label done_; - int instruction_index_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_ diff --git a/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc b/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc deleted file mode 100644 index be8251cffb..0000000000 --- a/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_IA32 - -#include "src/crankshaft/ia32/lithium-codegen-ia32.h" -#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h" -#include "src/register-configuration.h" - -namespace v8 { -namespace internal { - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), - moves_(32, owner->zone()), - source_uses_(), - destination_uses_(), - spilled_register_(-1) {} - - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(HasBeenReset()); - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - PerformMove(i); - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated()) { - DCHECK(moves_[i].source()->IsConstantOperand()); - EmitMove(i); - } - } - - Finish(); - DCHECK(HasBeenReset()); -} - - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) AddMove(move); - } - Verify(); -} - - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. We use operand swaps to resolve cycles, - // which means that a call to PerformMove could change any source operand - // in the move graph. - - DCHECK(!moves_[index].IsPending()); - DCHECK(!moves_[index].IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved on the side. - DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. - LOperand* destination = moves_[index].destination(); - moves_[index].set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - // Though PerformMove can change any source operand in the move graph, - // this call cannot create a blocking move via a swap (this loop does - // not miss any). Assume there is a non-blocking move with source A - // and this move is blocked on source B and there is a swap of A and - // B. Then A and B must be involved in the same cycle (or they would - // not be swapped). Since this move's destination is B and there is - // only a single incoming edge to an operand, this move must also be - // involved in the same cycle. In that case, the blocking move will - // be created but will be "pending" when we return from PerformMove. - PerformMove(i); - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - moves_[index].set_destination(destination); - - // This move's source may have changed due to swaps to resolve cycles and - // so it may now be the last move in the cycle. If so remove it. - if (moves_[index].source()->Equals(destination)) { - RemoveMove(index); - return; - } - - // The move may be blocked on a (at most one) pending move, in which case - // we have a cycle. Search for such a blocking move and perform a swap to - // resolve it. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - EmitSwap(index); - return; - } - } - - // This move is not blocked. - EmitMove(index); -} - - -void LGapResolver::AddMove(LMoveOperands move) { - LOperand* source = move.source(); - if (source->IsRegister()) ++source_uses_[source->index()]; - - LOperand* destination = move.destination(); - if (destination->IsRegister()) ++destination_uses_[destination->index()]; - - moves_.Add(move, cgen_->zone()); -} - - -void LGapResolver::RemoveMove(int index) { - LOperand* source = moves_[index].source(); - if (source->IsRegister()) { - --source_uses_[source->index()]; - DCHECK(source_uses_[source->index()] >= 0); - } - - LOperand* destination = moves_[index].destination(); - if (destination->IsRegister()) { - --destination_uses_[destination->index()]; - DCHECK(destination_uses_[destination->index()] >= 0); - } - - moves_[index].Eliminate(); -} - - -int LGapResolver::CountSourceUses(LOperand* operand) { - int count = 0; - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { - ++count; - } - } - return count; -} - - -Register LGapResolver::GetFreeRegisterNot(Register reg) { - int skip_index = reg.is(no_reg) ? -1 : reg.code(); - const RegisterConfiguration* config = RegisterConfiguration::Crankshaft(); - for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { - int code = config->GetAllocatableGeneralCode(i); - if (source_uses_[code] == 0 && destination_uses_[code] > 0 && - code != skip_index) { - return Register::from_code(code); - } - } - return no_reg; -} - - -bool LGapResolver::HasBeenReset() { - if (!moves_.is_empty()) return false; - if (spilled_register_ >= 0) return false; - const RegisterConfiguration* config = RegisterConfiguration::Crankshaft(); - for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { - int code = config->GetAllocatableGeneralCode(i); - if (source_uses_[code] != 0) return false; - if (destination_uses_[code] != 0) return false; - } - return true; -} - - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - - -#define __ ACCESS_MASM(cgen_->masm()) - -void LGapResolver::Finish() { - if (spilled_register_ >= 0) { - __ pop(Register::from_code(spilled_register_)); - spilled_register_ = -1; - } - moves_.Rewind(0); -} - - -void LGapResolver::EnsureRestored(LOperand* operand) { - if (operand->IsRegister() && operand->index() == spilled_register_) { - __ pop(Register::from_code(spilled_register_)); - spilled_register_ = -1; - } -} - - -Register LGapResolver::EnsureTempRegister() { - // 1. We may have already spilled to create a temp register. - if (spilled_register_ >= 0) { - return Register::from_code(spilled_register_); - } - - // 2. We may have a free register that we can use without spilling. - Register free = GetFreeRegisterNot(no_reg); - if (!free.is(no_reg)) return free; - - // 3. Prefer to spill a register that is not used in any remaining move - // because it will not need to be restored until the end. - const RegisterConfiguration* config = RegisterConfiguration::Crankshaft(); - for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { - int code = config->GetAllocatableGeneralCode(i); - if (source_uses_[code] == 0 && destination_uses_[code] == 0) { - Register scratch = Register::from_code(code); - __ push(scratch); - spilled_register_ = code; - return scratch; - } - } - - // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. - spilled_register_ = config->GetAllocatableGeneralCode(0); - Register scratch = Register::from_code(spilled_register_); - __ push(scratch); - return scratch; -} - - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - EnsureRestored(source); - EnsureRestored(destination); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - if (source->IsRegister()) { - DCHECK(destination->IsRegister() || destination->IsStackSlot()); - Register src = cgen_->ToRegister(source); - Operand dst = cgen_->ToOperand(destination); - __ mov(dst, src); - - } else if (source->IsStackSlot()) { - DCHECK(destination->IsRegister() || destination->IsStackSlot()); - Operand src = cgen_->ToOperand(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - __ mov(dst, src); - } else { - // Spill on demand to use a temporary register for memory-to-memory - // moves. - Register tmp = EnsureTempRegister(); - Operand dst = cgen_->ToOperand(destination); - __ mov(tmp, src); - __ mov(dst, tmp); - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - Representation r = cgen_->IsSmi(constant_source) - ? Representation::Smi() : Representation::Integer32(); - if (cgen_->IsInteger32(constant_source)) { - __ Move(dst, cgen_->ToImmediate(constant_source, r)); - } else { - __ LoadObject(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - double v = cgen_->ToDouble(constant_source); - uint64_t int_val = bit_cast(v); - int32_t lower = static_cast(int_val); - int32_t upper = static_cast(int_val >> kBitsPerInt); - XMMRegister dst = cgen_->ToDoubleRegister(destination); - if (int_val == 0) { - __ xorps(dst, dst); - } else { - __ push(Immediate(upper)); - __ push(Immediate(lower)); - __ movsd(dst, Operand(esp, 0)); - __ add(esp, Immediate(kDoubleSize)); - } - } else { - DCHECK(destination->IsStackSlot()); - Operand dst = cgen_->ToOperand(destination); - Representation r = cgen_->IsSmi(constant_source) - ? Representation::Smi() : Representation::Integer32(); - if (cgen_->IsInteger32(constant_source)) { - __ Move(dst, cgen_->ToImmediate(constant_source, r)); - } else { - Register tmp = EnsureTempRegister(); - __ LoadObject(tmp, cgen_->ToHandle(constant_source)); - __ mov(dst, tmp); - } - } - - } else if (source->IsDoubleRegister()) { - XMMRegister src = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - XMMRegister dst = cgen_->ToDoubleRegister(destination); - __ movaps(dst, src); - } else { - DCHECK(destination->IsDoubleStackSlot()); - Operand dst = cgen_->ToOperand(destination); - __ movsd(dst, src); - } - } else if (source->IsDoubleStackSlot()) { - DCHECK(destination->IsDoubleRegister() || - destination->IsDoubleStackSlot()); - Operand src = cgen_->ToOperand(source); - if (destination->IsDoubleRegister()) { - XMMRegister dst = cgen_->ToDoubleRegister(destination); - __ movsd(dst, src); - } else { - // We rely on having xmm0 available as a fixed scratch register. - Operand dst = cgen_->ToOperand(destination); - __ movsd(xmm0, src); - __ movsd(dst, xmm0); - } - } else { - UNREACHABLE(); - } - - RemoveMove(index); -} - - -void LGapResolver::EmitSwap(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - EnsureRestored(source); - EnsureRestored(destination); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - if (source->IsRegister() && destination->IsRegister()) { - // Register-register. - Register src = cgen_->ToRegister(source); - Register dst = cgen_->ToRegister(destination); - __ push(src); - __ mov(src, dst); - __ pop(dst); - - } else if ((source->IsRegister() && destination->IsStackSlot()) || - (source->IsStackSlot() && destination->IsRegister())) { - // Register-memory. Use a free register as a temp if possible. Do not - // spill on demand because the simple spill implementation cannot avoid - // spilling src at this point. - Register tmp = GetFreeRegisterNot(no_reg); - Register reg = - cgen_->ToRegister(source->IsRegister() ? source : destination); - Operand mem = - cgen_->ToOperand(source->IsRegister() ? destination : source); - if (tmp.is(no_reg)) { - __ xor_(reg, mem); - __ xor_(mem, reg); - __ xor_(reg, mem); - } else { - __ mov(tmp, mem); - __ mov(mem, reg); - __ mov(reg, tmp); - } - - } else if (source->IsStackSlot() && destination->IsStackSlot()) { - // Memory-memory. Spill on demand to use a temporary. If there is a - // free register after that, use it as a second temporary. - Register tmp0 = EnsureTempRegister(); - Register tmp1 = GetFreeRegisterNot(tmp0); - Operand src = cgen_->ToOperand(source); - Operand dst = cgen_->ToOperand(destination); - if (tmp1.is(no_reg)) { - // Only one temp register available to us. - __ mov(tmp0, dst); - __ xor_(tmp0, src); - __ xor_(src, tmp0); - __ xor_(tmp0, src); - __ mov(dst, tmp0); - } else { - __ mov(tmp0, dst); - __ mov(tmp1, src); - __ mov(dst, tmp1); - __ mov(src, tmp0); - } - } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { - // XMM register-register swap. We rely on having xmm0 - // available as a fixed scratch register. - XMMRegister src = cgen_->ToDoubleRegister(source); - XMMRegister dst = cgen_->ToDoubleRegister(destination); - __ movaps(xmm0, src); - __ movaps(src, dst); - __ movaps(dst, xmm0); - } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { - // XMM register-memory swap. We rely on having xmm0 - // available as a fixed scratch register. - DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot()); - XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() - ? source - : destination); - Operand other = - cgen_->ToOperand(source->IsDoubleRegister() ? destination : source); - __ movsd(xmm0, other); - __ movsd(other, reg); - __ movaps(reg, xmm0); - } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { - // Double-width memory-to-memory. Spill on demand to use a general - // purpose temporary register and also rely on having xmm0 available as - // a fixed scratch register. - Register tmp = EnsureTempRegister(); - Operand src0 = cgen_->ToOperand(source); - Operand src1 = cgen_->HighOperand(source); - Operand dst0 = cgen_->ToOperand(destination); - Operand dst1 = cgen_->HighOperand(destination); - __ movsd(xmm0, dst0); // Save destination in xmm0. - __ mov(tmp, src0); // Then use tmp to copy source to destination. - __ mov(dst0, tmp); - __ mov(tmp, src1); - __ mov(dst1, tmp); - __ movsd(src0, xmm0); - - } else { - // No other combinations are possible. - UNREACHABLE(); - } - - // The swap of source and destination has executed a move from source to - // destination. - RemoveMove(index); - - // Any unperformed (including pending) move with a source of either - // this move's source or destination needs to have their source - // changed to reflect the state of affairs after the swap. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(source)) { - moves_[i].set_source(destination); - } else if (other_move.Blocks(destination)) { - moves_[i].set_source(source); - } - } - - // In addition to swapping the actual uses as sources, we need to update - // the use counts. - if (source->IsRegister() && destination->IsRegister()) { - int temp = source_uses_[source->index()]; - source_uses_[source->index()] = source_uses_[destination->index()]; - source_uses_[destination->index()] = temp; - } else if (source->IsRegister()) { - // We don't have use counts for non-register operands like destination. - // Compute those counts now. - source_uses_[source->index()] = CountSourceUses(source); - } else if (destination->IsRegister()) { - source_uses_[destination->index()] = CountSourceUses(destination); - } -} - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_IA32 diff --git a/src/crankshaft/ia32/lithium-gap-resolver-ia32.h b/src/crankshaft/ia32/lithium-gap-resolver-ia32.h deleted file mode 100644 index 687087feb3..0000000000 --- a/src/crankshaft/ia32/lithium-gap-resolver-ia32.h +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_ -#define V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class LGapResolver final BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // Emit any code necessary at the end of a gap move. - void Finish(); - - // Add or delete a move from the move graph without emitting any code. - // Used to build up the graph and remove trivial moves. - void AddMove(LMoveOperands move); - void RemoveMove(int index); - - // Report the count of uses of operand as a source in a not-yet-performed - // move. Used to rebuild use counts. - int CountSourceUses(LOperand* operand); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Execute a move by emitting a swap of two operands. The move from - // source to destination is removed from the move graph. - void EmitSwap(int index); - - // Ensure that the given operand is not spilled. - void EnsureRestored(LOperand* operand); - - // Return a register that can be used as a temp register, spilling - // something if necessary. - Register EnsureTempRegister(); - - // Return a known free register different from the given one (which could - // be no_reg---returning any free register), or no_reg if there is no such - // register. - Register GetFreeRegisterNot(Register reg); - - // Verify that the state is the initial one, ready to resolve a single - // parallel move. - bool HasBeenReset(); - - // Verify the move list before performing moves. - void Verify(); - - LCodeGen* cgen_; - - // List of moves not yet resolved. - ZoneList moves_; - - // Source and destination use counts for the general purpose registers. - int source_uses_[Register::kNumRegisters]; - int destination_uses_[DoubleRegister::kMaxNumRegisters]; - - // If we had to spill on demand, the currently spilled register's - // allocation index. - int spilled_register_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_ diff --git a/src/crankshaft/ia32/lithium-ia32.cc b/src/crankshaft/ia32/lithium-ia32.cc deleted file mode 100644 index 068fe0f787..0000000000 --- a/src/crankshaft/ia32/lithium-ia32.cc +++ /dev/null @@ -1,2451 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/ia32/lithium-ia32.h" - -#include - -#if V8_TARGET_ARCH_IA32 - -#include "src/crankshaft/ia32/lithium-codegen-ia32.h" -#include "src/crankshaft/lithium-inl.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || - LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || - operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); - } -} -#endif - - -bool LInstruction::HasDoubleRegisterResult() { - return HasResult() && result()->IsDoubleRegister(); -} - - -bool LInstruction::HasDoubleRegisterInput() { - for (int i = 0; i < InputCount(); i++) { - LOperand* op = InputAt(i); - if (op != NULL && op->IsDoubleRegister()) { - return true; - } - } - return false; -} - - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-d"; - case Token::SUB: return "sub-d"; - case Token::MUL: return "mul-d"; - case Token::DIV: return "div-d"; - case Token::MOD: return "mod-d"; - default: - UNREACHABLE(); - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-t"; - case Token::SUB: return "sub-t"; - case Token::MUL: return "mul-t"; - case Token::MOD: return "mod-t"; - case Token::DIV: return "div-t"; - case Token::BIT_AND: return "bit-and-t"; - case Token::BIT_OR: return "bit-or-t"; - case Token::BIT_XOR: return "bit-xor-t"; - case Token::ROR: return "ror-t"; - case Token::SHL: return "sal-t"; - case Token::SAR: return "sar-t"; - case Token::SHR: return "shr-t"; - default: - UNREACHABLE(); - } -} - - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), - true_block_id(), false_block_id()); -} - - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - context()->PrintTo(stream); - stream->Add(" "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - context()->PrintTo(stream); - stream->Add(" "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - - stream->Add(" length "); - length()->PrintTo(stream); - - stream->Add(" index "); - index()->PrintTo(stream); -} - - -int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { - // Skip a slot if for a double-width slot. - if (kind == DOUBLE_REGISTERS) { - current_frame_slots_++; - current_frame_slots_ |= 1; - num_double_slots_++; - } - return current_frame_slots_++; -} - - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - int index = GetNextSpillIndex(kind); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access() << " <- "; - stream->Add(os.str().c_str()); - value()->PrintTo(stream); -} - - -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", base_offset()); - } else { - stream->Add("]"); - } -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", base_offset()); - } else { - stream->Add("] <- "); - } - - if (value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - value()->PrintTo(stream); - } -} - - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); -} - - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new(zone()) LPlatformChunk(info(), graph()); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) { - return Use(value, ToUnallocated(reg)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE)); -} - - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE, - LUnallocated::USED_AT_START)); -} - - -static inline bool CanBeImmediateConstant(HValue* value) { - return value->IsConstant() && HConstant::cast(value)->NotInNewSpace(); -} - - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - - -LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value, - Register fixed_register) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseFixed(value, fixed_register); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - - -LOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, - int index) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixedDouble( - LTemplateResultInstruction<1>* instr, - XMMRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); - -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = - (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(zone())); - return instr; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new(zone()) LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new(zone()) LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); - } - } else { - right = UseFixed(right_value, ecx); - } - - // Shift operations can only deoptimize if we do a logical shift by 0 and - // the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result = - DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; - } else { - return DoArithmeticT(op, instr); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (op == Token::MOD) { - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return MarkAsCall(DefineSameAsFirst(result), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result) - : DefineSameAsFirst(result); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - HValue* left = instr->left(); - HValue* right = instr->right(); - DCHECK(left->representation().IsTagged()); - DCHECK(right->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left_operand = UseFixed(left, edx); - LOperand* right_operand = UseFixed(right, eax); - LArithmeticT* result = - new(zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, eax), instr); -} - - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - DCHECK(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new(zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new(zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new(zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - instr = new(zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()); -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - ToBooleanHints expected = instr->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || - type.IsJSArray() || type.IsHeapNumber() || type.IsString(); - LOperand* temp = !easy_case && (expected & ToBooleanHint::kNeedsMap) - ? TempRegister() - : NULL; - LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp); - if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) && - (expected & ToBooleanHint::kNeedsMap)) || - expected != ToBooleanHint::kAny)) { - branch = AssignEnvironment(branch); - } - return branch; -} - - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new(zone()) LDebugBreak(); -} - - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LCmpMapAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value()))); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsElements); -} - - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LOperand* temp = TempRegister(); - LHasInPrototypeChainAndBranch* result = - new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegister(instr->receiver()); - LOperand* function = UseRegister(instr->function()); - LOperand* temp = TempRegister(); - LWrapReceiver* result = - new(zone()) LWrapReceiver(receiver, function, temp); - return AssignEnvironment(DefineSameAsFirst(result)); -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), edi); - LOperand* receiver = UseFixed(instr->receiver(), eax); - LOperand* length = UseFixed(instr->length(), ebx); - LOperand* elements = UseFixed(instr->elements(), ecx); - LApplyArguments* result = new(zone()) LApplyArguments(function, - receiver, - length, - elements); - return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - for (int i = 0; i < argc; ++i) { - LOperand* argument = UseAny(instr->argument(i)); - AddInstruction(new(zone()) LPushArgument(argument), instr); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoStoreCodeEntry( - HStoreCodeEntry* store_code_entry) { - LOperand* function = UseRegister(store_code_entry->function()); - LOperand* code_object = UseTempRegister(store_code_entry->code_object()); - return new(zone()) LStoreCodeEntry(function, code_object); -} - - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister( - new(zone()) LInnerAllocatedObject(base_object, offset)); -} - - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() - ? NULL - : DefineAsRegister(new(zone()) LThisFunction); -} - - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new(zone()) LContext, esi); - } - - return DefineAsRegister(new(zone()) LContext); -} - - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), esi); - return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); -} - - -LInstruction* LChunkBuilder::DoCallWithDescriptor( - HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), esi); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - for (; i < descriptor.GetParameterCount(); i++) { - op = UseAny(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - AddInstruction(new (zone()) LPushArgument(op), instr); - } - - LCallWithDescriptor* result = new(zone()) LCallWithDescriptor( - descriptor, ops, zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* function = UseFixed(instr->function(), edi); - LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathCos: - return DoMathCos(instr); - case kMathFloor: - return DoMathFloor(instr); - case kMathRound: - return DoMathRound(instr); - case kMathFround: - return DoMathFround(instr); - case kMathAbs: - return DoMathAbs(instr); - case kMathLog: - return DoMathLog(instr); - case kMathExp: - return DoMathExp(instr); - case kMathSqrt: - return DoMathSqrt(instr); - case kMathPowHalf: - return DoMathPowHalf(instr); - case kMathClz32: - return DoMathClz32(instr); - case kMathSin: - return DoMathSin(instr); - default: - UNREACHABLE(); - } -} - -LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - if (instr->representation().IsInteger32()) { - LMathFloorI* result = new (zone()) LMathFloorI(input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - } else { - DCHECK(instr->representation().IsDouble()); - LMathFloorD* result = new (zone()) LMathFloorD(input); - return DefineAsRegister(result); - } -} - -LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegister(instr->value()); - if (instr->representation().IsInteger32()) { - LOperand* temp = FixedTemp(xmm4); - LMathRoundI* result = new (zone()) LMathRoundI(input, temp); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - } else { - DCHECK(instr->representation().IsDouble()); - LMathRoundD* result = new (zone()) LMathRoundD(input); - return DefineAsRegister(result); - } -} - -LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { - LOperand* context = UseAny(instr->context()); // Deferred use. - LOperand* input = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineSameAsFirst(new(zone()) LMathAbs(context, input)); - Representation r = instr->value()->representation(); - if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; -} - - -LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr); -} - - -LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathClz32* result = new(zone()) LMathClz32(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return MarkAsCall(DefineSameAsFirst(new (zone()) LMathCos(input)), instr); -} - -LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return MarkAsCall(DefineSameAsFirst(new (zone()) LMathSin(input)), instr); -} - -LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return MarkAsCall(DefineSameAsFirst(new (zone()) LMathExp(input)), instr); -} - - -LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseAtStart(instr->value()); - return DefineAsRegister(new(zone()) LMathSqrt(input)); -} - - -LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp); - return DefineSameAsFirst(result); -} - - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* constructor = UseFixed(instr->constructor(), edi); - LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, eax), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), esi); - return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr); -} - - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - return DefineSameAsFirst(new(zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(eax); - LOperand* temp2 = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LDivByConstI( - dividend, divisor, temp1, temp2), edx); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), eax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LDivI( - dividend, divisor, temp), eax); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanOverflow) || - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(eax); - LOperand* temp2 = FixedTemp(edx); - LOperand* temp3 = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? - NULL : TempRegister(); - LInstruction* result = - DefineFixed(new(zone()) LFlooringDivByConstI(dividend, - divisor, - temp1, - temp2, - temp3), - edx); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), eax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LFlooringDivI( - dividend, divisor, temp), eax); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( - dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(eax); - LOperand* temp2 = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LModByConstI( - dividend, divisor, temp1, temp2), eax); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), eax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LModI( - dividend, divisor, temp), edx); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoModByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoModByConstI(instr); - } else { - return DoModI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - HValue* h_right = instr->BetterRightOperand(); - LOperand* right = UseOrConstant(h_right); - LOperand* temp = NULL; - if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - temp = TempRegister(); - } - LMulI* mul = new(zone()) LMulI(left, right, temp); - int constant_value = - h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0; - // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls - // |DeoptimizeIf|. - bool needs_environment = - instr->CheckFlag(HValue::kCanOverflow) || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && - (!right->IsConstantOperand() || constant_value <= 0)); - if (needs_environment) { - AssignEnvironment(mul); - } - return DefineSameAsFirst(mul); - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LSubI* sub = new(zone()) LSubI(left, right); - LInstruction* result = DefineSameAsFirst(sub); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - // Check to see if it would be advantageous to use an lea instruction rather - // than an add. This is the case when no overflow check is needed and there - // are multiple uses of the add's inputs, so using a 3-register add will - // preserve all input values for later uses. - bool use_lea = LAddI::UseLea(instr); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - HValue* right_candidate = instr->BetterRightOperand(); - LOperand* right = use_lea - ? UseRegisterOrConstantAtStart(right_candidate) - : UseOrConstantAtStart(right_candidate); - LAddI* add = new(zone()) LAddI(left, right); - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - LInstruction* result = use_lea - ? DefineAsRegister(add) - : DefineSameAsFirst(add); - if (can_overflow) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::ADD, instr); - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - bool use_lea = LAddI::UseLea(instr); - LOperand* left = UseRegisterAtStart(instr->left()); - HValue* right_candidate = instr->right(); - LOperand* right = use_lea - ? UseRegisterOrConstantAtStart(right_candidate) - : UseOrConstantAtStart(right_candidate); - LAddI* add = new(zone()) LAddI(left, right); - LInstruction* result = use_lea - ? DefineAsRegister(add) - : DefineSameAsFirst(add); - return result; - } else { - return DoArithmeticT(Token::ADD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - LMathMinMax* minmax = new(zone()) LMathMinMax(left, right); - return DefineSameAsFirst(minmax); -} - - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - DCHECK(instr->representation().IsDouble()); - // We call a C function for double power. It can't trigger a GC. - // We need to use fixed result register for the call. - Representation exponent_type = instr->right()->representation(); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseFixedDouble(instr->left(), xmm2); - LOperand* right = - exponent_type.IsDouble() - ? UseFixedDouble(instr->right(), xmm1) - : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent()); - LPower* result = new(zone()) LPower(left, right); - return MarkAsCall(DefineFixedDouble(result, xmm3), instr, - CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsSmiOrTagged()); - DCHECK(instr->right()->representation().IsSmiOrTagged()); - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left = UseFixed(instr->left(), edx); - LOperand* right = UseFixed(instr->right(), eax); - LCmpT* result = new(zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, eax), instr); -} - - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - LOperand* left; - LOperand* right; - if (CanBeImmediateConstant(instr->left()) && - CanBeImmediateConstant(instr->right())) { - // The code generator requires either both inputs to be constant - // operands, or neither. - left = UseConstant(instr->left()); - right = UseConstant(instr->right()); - } else { - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - return new(zone()) LCompareNumericAndBranch(left, right); - } -} - - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - return new(zone()) LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LCmpHoleAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* temp = TempRegister(); - return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp); -} - - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsSmiAndBranch(Use(instr->value())); -} - - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsUndetectableAndBranch( - UseRegisterAtStart(instr->value()), TempRegister()); -} - - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left = UseFixed(instr->left(), edx); - LOperand* right = UseFixed(instr->right(), eax); - - LStringCompareAndBranch* result = new(zone()) - LStringCompareAndBranch(context, left, right); - - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LHasInstanceTypeAndBranch( - UseRegisterAtStart(instr->value()), - TempRegister()); -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new (zone()) LClassOfTestAndBranch(UseRegister(instr->value()), - TempRegister(), TempRegister()); -} - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index)); -} - - -LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) { - if (instr->encoding() == String::ONE_BYTE_ENCODING) { - if (FLAG_debug_code) { - return UseFixed(instr->value(), eax); - } else { - return UseFixedOrConstant(instr->value(), eax); - } - } else { - if (FLAG_debug_code) { - return UseRegisterAtStart(instr->value()); - } else { - return UseRegisterOrConstantAtStart(instr->value()); - } - } -} - - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegisterAtStart(instr->index()) - : UseRegisterOrConstantAtStart(instr->index()); - LOperand* value = GetSeqStringSetCharOperand(instr); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL; - LInstruction* result = new(zone()) LSeqStringSetChar(context, string, - index, value); - if (FLAG_debug_code) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseOrConstantAtStart(instr->length()) - : UseAtStart(instr->length()); - LInstruction* result = new(zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { - return NULL; -} - - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LOperand* temp = TempRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LNumberUntagD(value, temp)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - bool truncating = instr->CanTruncateToInt32(); - LOperand* xmm_temp = !truncating ? FixedTemp(xmm1) : NULL; - LInstruction* result = - DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegisterAtStart(val); - LOperand* temp = FLAG_inline_new ? TempRegister() : NULL; - LUnallocated* result_temp = TempRegister(); - LNumberTagD* result = new(zone()) LNumberTagD(value, temp); - return AssignPointerMap(Define(result, result_temp)); - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - return AssignEnvironment( - DefineAsRegister(new(zone()) LDoubleToSmi(value))); - } else { - DCHECK(to.IsInteger32()); - bool truncating = instr->CanTruncateToInt32(); - bool needs_temp = !truncating; - LOperand* value = needs_temp ? UseTempRegister(val) : UseRegister(val); - LOperand* temp = needs_temp ? TempRegister() : NULL; - LInstruction* result = - DefineAsRegister(new(zone()) LDoubleToI(value, temp)); - if (!truncating) result = AssignEnvironment(result); - return result; - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - if (!instr->CheckFlag(HValue::kCanOverflow)) { - return DefineSameAsFirst(new(zone()) LSmiTag(value)); - } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* temp = TempRegister(); - LNumberTagU* result = new(zone()) LNumberTagU(value, temp); - return AssignPointerMap(DefineSameAsFirst(result)); - } else { - LOperand* temp = TempRegister(); - LNumberTagI* result = new(zone()) LNumberTagI(value, temp); - return AssignPointerMap(DefineSameAsFirst(result)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); - } else { - return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); - } - } - } - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LOperand* scratch = TempRegister(); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view, scratch); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - // If the object is in new space, we'll emit a global cell compare and so - // want the value in a register. If the object gets promoted before we - // emit code, we will still get the register but will do an immediate - // compare instead of the cell compare. This is safe. - LOperand* value = instr->object_in_new_space() - ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckValue(value)); -} - - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - if (input_rep.IsDouble()) { - LOperand* reg = UseRegister(value); - return DefineFixed(new(zone()) LClampDToUint8(reg), eax); - } else if (input_rep.IsInteger32()) { - LOperand* reg = UseFixed(value, eax); - return DefineFixed(new(zone()) LClampIToUint8(reg), eax); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - LOperand* reg = UseFixed(value, eax); - // Register allocator doesn't (yet) support allocation of double - // temps. Reserve xmm1 explicitly. - LOperand* temp = FixedTemp(xmm1); - LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp); - return AssignEnvironment(DefineFixed(result, eax)); - } -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn( - UseFixed(instr->value(), eax), context, parameter_count); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new(zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new(zone()) LConstantI); - } else if (r.IsDouble()) { - uint64_t const bits = instr->DoubleValueAsBits(); - LOperand* temp = bits ? TempRegister() : nullptr; - return DefineAsRegister(new(zone()) LConstantD(temp)); - } else if (r.IsExternal()) { - return DefineAsRegister(new(zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new(zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* value; - LOperand* temp; - LOperand* context = UseRegister(instr->context()); - if (instr->NeedsWriteBarrier()) { - value = UseTempRegister(instr->value()); - temp = TempRegister(); - } else { - value = UseRegister(instr->value()); - temp = NULL; - } - LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - LOperand* obj = (instr->access().IsExternalMemory() && - instr->access().offset() == 0) - ? UseRegisterOrConstantAtStart(instr->object()) - : UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LLoadNamedField(obj)); -} - - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - return AssignEnvironment(DefineAsRegister( - new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()), - TempRegister()))); -} - - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new(zone()) LLoadRoot); -} - - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK(instr->key()->representation().IsSmiOrInteger32()); - ElementsKind elements_kind = instr->elements_kind(); - bool clobbers_key = ExternalArrayOpRequiresTemp( - instr->key()->representation(), elements_kind); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - LInstruction* result = NULL; - - if (!instr->is_fixed_typed_array()) { - LOperand* obj = UseRegisterAtStart(instr->elements()); - result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr)); - } else { - DCHECK( - (instr->representation().IsInteger32() && - !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) || - (instr->representation().IsDouble() && - (IsDoubleOrFloatElementsKind(instr->elements_kind())))); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - result = DefineAsRegister( - new (zone()) LLoadKeyed(backing_store, key, backing_store_owner)); - } - - bool needs_environment; - if (instr->is_fixed_typed_array()) { - // see LCodeGen::DoLoadKeyedExternalArray - needs_environment = elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32); - } else { - // see LCodeGen::DoLoadKeyedFixedDoubleArray and - // LCodeGen::DoLoadKeyedFixedArray - needs_environment = - instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub()); - } - - if (needs_environment) { - result = AssignEnvironment(result); - } - return result; -} - - -LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - - // Determine if we need a byte register in this case for the value. - bool val_is_fixed_register = - elements_kind == UINT8_ELEMENTS || - elements_kind == INT8_ELEMENTS || - elements_kind == UINT8_CLAMPED_ELEMENTS; - if (val_is_fixed_register) { - return UseFixed(instr->value(), eax); - } - - return UseRegister(instr->value()); -} - - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - if (!instr->is_fixed_typed_array()) { - DCHECK(instr->elements()->representation().IsTagged()); - DCHECK(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsSmi()); - - if (instr->value()->representation().IsDouble()) { - LOperand* object = UseRegisterAtStart(instr->elements()); - LOperand* val = NULL; - val = UseRegisterAtStart(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - return new (zone()) LStoreKeyed(object, key, val, nullptr); - } else { - DCHECK(instr->value()->representation().IsSmiOrTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - - LOperand* obj = UseRegister(instr->elements()); - LOperand* val; - LOperand* key; - if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - key = UseTempRegister(instr->key()); - } else { - val = UseRegisterOrConstantAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } - return new (zone()) LStoreKeyed(obj, key, val, nullptr); - } - } - - ElementsKind elements_kind = instr->elements_kind(); - DCHECK( - (instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(elements_kind)) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(elements_kind))); - DCHECK(instr->elements()->representation().IsExternal()); - - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - LOperand* val = GetStoreKeyedValueOperand(instr); - bool clobbers_key = ExternalArrayOpRequiresTemp( - instr->key()->representation(), elements_kind); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner); -} - - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LOperand* new_map_reg = TempRegister(); - LOperand* temp_reg = TempRegister(); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, NULL, - new_map_reg, temp_reg); - return result; - } else { - LOperand* object = UseFixed(instr->object(), eax); - LOperand* context = UseFixed(instr->context(), esi); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, context, NULL, NULL); - return MarkAsCall(result, instr); - } -} - - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp = TempRegister(); - LTrapAllocationMemento* result = - new(zone()) LTrapAllocationMemento(object, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), esi); - LOperand* object = Use(instr->object()); - LOperand* elements = Use(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, eax); - return AssignPointerMap(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool is_in_object = instr->access().IsInobject(); - bool is_external_location = instr->access().IsExternalMemory() && - instr->access().offset() == 0; - bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = instr->has_transition() && - instr->NeedsWriteBarrierForMap(); - - LOperand* obj; - if (needs_write_barrier) { - obj = is_in_object - ? UseRegister(instr->object()) - : UseTempRegister(instr->object()); - } else if (is_external_location) { - DCHECK(!is_in_object); - DCHECK(!needs_write_barrier); - DCHECK(!needs_write_barrier_for_map); - obj = UseRegisterOrConstant(instr->object()); - } else { - obj = needs_write_barrier_for_map - ? UseRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - } - - bool can_be_constant = instr->value()->IsConstant() && - HConstant::cast(instr->value())->NotInNewSpace() && - !instr->field_representation().IsDouble(); - - LOperand* val; - if (instr->field_representation().IsInteger8() || - instr->field_representation().IsUInteger8()) { - // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx). - // Just force the value to be in eax and we're safe here. - val = UseFixed(instr->value(), eax); - } else if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - } else if (can_be_constant) { - val = UseRegisterOrConstant(instr->value()); - } else if (instr->field_representation().IsDouble()) { - val = UseRegisterAtStart(instr->value()); - } else { - val = UseRegister(instr->value()); - } - - // We only need a scratch register if we have a write barrier or we - // have a store into the properties array (not in-object-property). - LOperand* temp = (!is_in_object || needs_write_barrier || - needs_write_barrier_for_map) ? TempRegister() : NULL; - - // We need a temporary register for write barrier of the map field. - LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL; - - return new(zone()) LStoreNamedField(obj, val, temp, temp_map); -} - - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left = UseFixed(instr->left(), edx); - LOperand* right = UseFixed(instr->right(), eax); - LStringAdd* string_add = new(zone()) LStringAdd(context, left, right); - return MarkAsCall(DefineFixed(string_add, eax), instr); -} - - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseTempRegister(instr->string()); - LOperand* index = UseTempRegister(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new(zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new(zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size()) - : UseRegister(instr->size()); - if (instr->IsAllocationFolded()) { - LOperand* temp = TempRegister(); - LFastAllocate* result = new (zone()) LFastAllocate(size, temp); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LOperand* temp = TempRegister(); - LAllocate* result = new (zone()) LAllocate(context, size, temp); - return AssignPointerMap(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new(zone()) LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new(zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk()->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kNotEnoughSpillSlotsForOsr); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = UseRegister(instr->arguments()); - LOperand* length; - LOperand* index; - if (instr->length()->IsConstant() && instr->index()->IsConstant()) { - length = UseRegisterOrConstant(instr->length()); - index = UseOrConstant(instr->index()); - } else { - length = UseTempRegister(instr->length()); - index = Use(instr->index()); - } - return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* value = UseFixed(instr->value(), ebx); - LTypeof* result = new(zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, eax), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); -} - - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - info()->MarkAsDeferredCalling(); - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), esi); - return MarkAsCall(new(zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new(zone()) LStackCheck(context))); - } -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new(zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = current_block_->last_environment()-> - DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - return pop; -} - - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* object = UseFixed(instr->enumerable(), eax); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment(DefineAsRegister( - new(zone()) LForInCacheArray(map))); -} - - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegisterAtStart(instr->map()); - return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); -} - - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* index = UseTempRegister(instr->index()); - LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_IA32 diff --git a/src/crankshaft/ia32/lithium-ia32.h b/src/crankshaft/ia32/lithium-ia32.h deleted file mode 100644 index ce30e1d0cc..0000000000 --- a/src/crankshaft/ia32/lithium-ia32.h +++ /dev/null @@ -1,2514 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_ -#define V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -namespace compiler { -class RCodeVisualizer; -} - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(LoadRoot) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathCos) \ - V(MathExp) \ - V(MathFloorD) \ - V(MathFloorI) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRoundD) \ - V(MathRoundI) \ - V(MathSin) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArgument) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { \ - return H##type::cast(hydrogen_value()); \ - } - - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) { - } - - virtual ~LInstruction() {} - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { - // Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter, - kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - - // Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall(); - } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - bool HasDoubleRegisterResult(); - bool HasDoubleRegisterInput(); - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - -#ifdef DEBUG - void VerifyCall(); -#endif - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - - private: - // Iterator support. - friend class InputIterator; - - friend class TempIterator; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - class IsCallBits: public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int bit_field_; -}; - - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return R != 0 && result() != NULL; } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - protected: - EmbeddedContainer results_; -}; - - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const final { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new(zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return false; - } - - bool jumps_to_join() const { return block_->predecessors()->length() > 1; } - - private: - HBasicBlock* block_; -}; - - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") -}; - - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { - inputs_[0] = value; - } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) - : LGap(block), replacement_(NULL) { } - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -template -class LControlInstruction: public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) { } - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } - - Label* false_label_; - Label* true_label_; -}; - - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 1> { - public: - LWrapReceiver(LOperand* receiver, - LOperand* function, - LOperand* temp) { - inputs_[0] = receiver; - inputs_[1] = function; - temps_[0] = temp; - } - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) -}; - - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, - LOperand* receiver, - LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) -}; - - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - void PrintDataTo(StringStream* stream) override; -}; - - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { - inputs_[0] = elements; - } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LModByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModI final : public LTemplateInstruction<1, 2, 1> { - public: - LModI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = temp; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LDivByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> { - public: - LFlooringDivByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2, - LOperand* temp3) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - LOperand* temp3() { return temps_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - - -class LMulI final : public LTemplateInstruction<1, 2, 1> { - public: - LMulI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = temp; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { - return hydrogen()->representation().IsDouble(); - } - - void PrintDataTo(StringStream* stream) override; -}; - -// Math.floor with a double result. -class LMathFloorD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloorD(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.floor with an integer result. -class LMathFloorI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloorI(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.round with a double result. -class LMathRoundD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathRoundD(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.round with an integer result. -class LMathRoundI final : public LTemplateInstruction<1, 1, 1> { - public: - LMathRoundI(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathFround final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFround(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - - -class LMathAbs final : public LTemplateInstruction<1, 2, 0> { - public: - LMathAbs(LOperand* context, LOperand* value) { - inputs_[1] = context; - inputs_[0] = value; - } - - LOperand* context() { return inputs_[1]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathLog final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathLog(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - - -class LMathClz32 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathClz32(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - -class LMathCos final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathCos(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - -class LMathSin final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSin(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - -class LMathExp final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathExp(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - - -class LMathSqrt final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSqrt(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - - -class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> { - public: - LMathPowHalf(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") -}; - - -class LCmpHoleAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - void PrintDataTo(StringStream* stream) override; - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> { - public: - LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 2> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - LOperand* context() { return inputs_[0]; } - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype, - LOperand* scratch) { - inputs_[0] = object; - inputs_[1] = prototype; - temps_[0] = scratch; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - LOperand* scratch() const { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) - - Token::Value op() const { return hydrogen()->op(); } -}; - - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - Token::Value op() const { return op_; } - bool can_deopt() const { return can_deopt_; } - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - - -class LConstantD final : public LTemplateInstruction<1, 0, 1> { - public: - explicit LConstantD(LOperand* temp) { - temps_[0] = temp; - } - - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); } -}; - - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - - -class LBranch final : public LControlInstruction<1, 1> { - public: - LBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LCmpMapAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpMapAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> { - public: - LSeqStringGetChar(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; - } - - LOperand* string() const { return inputs_[0]; } - LOperand* index() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> { - public: - LSeqStringSetChar(LOperand* context, - LOperand* string, - LOperand* index, - LOperand* value) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - } - - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - static bool UseLea(HAdd* add) { - return !add->CheckFlag(HValue::kCanOverflow) && - add->BetterLeftOperand()->UseCount() > 1; - } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LMathMinMax final : public LTemplateInstruction<1, 2, 0> { - public: - LMathMinMax(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) - : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return op_; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, - LOperand* context, - LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - Token::Value op() const { return op_; } - - Opcode opcode() const override { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - explicit LReturn(LOperand* value, - LOperand* context, - LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - LOperand* parameter_count() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") - DECLARE_HYDROGEN_ACCESSOR(Return) -}; - - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> { - public: - LLoadFunctionPrototype(LOperand* function, LOperand* temp) { - inputs_[0] = function; - temps_[0] = temp; - } - - LOperand* function() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) -}; - - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - - -class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = backing_store_owner; - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* backing_store_owner() { return inputs_[2]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } - bool key_is_smi() { - return hydrogen()->key()->representation().IsTagged(); - } -}; - - -inline static bool ExternalArrayOpRequiresTemp( - Representation key_representation, - ElementsKind elements_kind) { - // Operations that require the key to be divided by two to be converted into - // an index cannot fold the scale operation into a load and need an extra - // temp register to do the work. - return key_representation.IsSmi() && - (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS || - elements_kind == UINT8_CLAMPED_ELEMENTS); -} - - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) { - inputs_[0] = context; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LPushArgument final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LPushArgument(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) { } - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object) { - inputs_[0] = function; - inputs_[1] = code_object; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LUint32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - - -class LNumberTagI final : public LTemplateInstruction<1, 1, 1> { - public: - LNumberTagI(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") -}; - - -class LNumberTagU final : public LTemplateInstruction<1, 1, 1> { - public: - LNumberTagU(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - - -class LNumberTagD final : public LTemplateInstruction<1, 1, 1> { - public: - LNumberTagD(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI final : public LTemplateInstruction<1, 1, 1> { - public: - LDoubleToI(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 1> { - public: - LTaggedToI(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> { - public: - explicit LNumberUntagD(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change); - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) - : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - bool needs_check() const { return needs_check_; } - - private: - bool needs_check_; -}; - - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> { - public: - LStoreNamedField(LOperand* obj, - LOperand* val, - LOperand* temp, - LOperand* temp_map) { - inputs_[0] = obj; - inputs_[1] = val; - temps_[0] = temp; - temps_[1] = temp_map; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp_map() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> { - public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val, - LOperand* backing_store_owner) { - inputs_[0] = obj; - inputs_[1] = key; - inputs_[2] = val; - inputs_[3] = backing_store_owner; - } - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - LOperand* backing_store_owner() { return inputs_[3]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } -}; - - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> { - public: - LTransitionElementsKind(LOperand* object, - LOperand* context, - LOperand* new_map_temp, - LOperand* temp) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = new_map_temp; - temps_[1] = temp; - } - - LOperand* context() { return inputs_[1]; } - LOperand* object() { return inputs_[0]; } - LOperand* new_map_temp() { return temps_[0]; } - LOperand* temp() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() { return hydrogen()->from_kind(); } - ElementsKind to_kind() { return hydrogen()->to_kind(); } -}; - - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> { - public: - LTrapAllocationMemento(LOperand* object, - LOperand* temp) { - inputs_[0] = object; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, - "trap-allocation-memento") -}; - - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 1> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view, LOperand* scratch) { - inputs_[0] = view; - temps_[0] = scratch; - } - - LOperand* view() { return inputs_[0]; } - LOperand* scratch() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> { - public: - LCheckInstanceType(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - - -class LCheckMaps final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckMaps(LOperand* value = NULL) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampDToUint8(LOperand* value) { - inputs_[0] = value; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* value) { - inputs_[0] = value; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - - -class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* value, LOperand* temp_xmm) { - inputs_[0] = value; - temps_[0] = temp_xmm; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp_xmm() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") -}; - - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - - -class LAllocate final : public LTemplateInstruction<1, 2, 1> { - public: - LAllocate(LOperand* context, LOperand* size, LOperand* temp) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 1> { - public: - LFastAllocate(LOperand* size, LOperand* temp) { - inputs_[0] = size; - temps_[0] = temp; - } - - LOperand* size() const { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIsAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LTypeofIsAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { - inputs_[0] = map; - } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { - return HForInCacheArray::cast(this->hydrogen_value())->idx(); - } -}; - - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> { - public: - LCheckMapValue(LOperand* value, LOperand* map) { - inputs_[0] = value; - inputs_[1] = map; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) - : LChunk(info, graph), - num_double_slots_(0) { } - - int GetNextSpillIndex(RegisterKind kind); - LOperand* GetNextSpillSlot(RegisterKind kind); - - int num_double_slots() const { return num_double_slots_; } - - private: - int num_double_slots_; -}; - - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoMathFloor(HUnaryMathOperation* instr); - LInstruction* DoMathRound(HUnaryMathOperation* instr); - LInstruction* DoMathFround(HUnaryMathOperation* instr); - LInstruction* DoMathAbs(HUnaryMathOperation* instr); - LInstruction* DoMathLog(HUnaryMathOperation* instr); - LInstruction* DoMathCos(HUnaryMathOperation* instr); - LInstruction* DoMathSin(HUnaryMathOperation* instr); - LInstruction* DoMathExp(HUnaryMathOperation* instr); - LInstruction* DoMathSqrt(HUnaryMathOperation* instr); - LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); - LInstruction* DoMathClz32(HUnaryMathOperation* instr); - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HDiv* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(XMMRegister reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, - XMMRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register that may be trashed. - MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - - // An input operand in a register or stack slot. - MUST_USE_RESULT LOperand* Use(HValue* value); - MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - - // An input operand in a register, stack slot or a constant operand. - MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - - // An input operand in a fixed register or a constant operand. - MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value, - Register fixed_register); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // An input operand in a constant operand. - MUST_USE_RESULT LOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - MUST_USE_RESULT LOperand* UseAny(HValue* value) override; - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg); - LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, - XMMRegister reg); - // Assigns an environment to an instruction. An instruction which can - // deoptimize must have an environment. - LInstruction* AssignEnvironment(LInstruction* instr); - // Assigns a pointer map to an instruction. An instruction which can - // trigger a GC or a lazy deoptimization must have a pointer map. - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr); - - // Marks a call for the register allocator. Assigns a pointer map to - // support GC and lazy deoptimization. Assigns an environment to support - // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY. - LInstruction* MarkAsCall( - LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, - HBinaryOperation* instr); - - LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_ diff --git a/src/crankshaft/lithium-allocator-inl.h b/src/crankshaft/lithium-allocator-inl.h deleted file mode 100644 index 631af6024b..0000000000 --- a/src/crankshaft/lithium-allocator-inl.h +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_ -#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_ - -#include "src/crankshaft/lithium-allocator.h" - -#if V8_TARGET_ARCH_IA32 -#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT -#elif V8_TARGET_ARCH_X64 -#include "src/crankshaft/x64/lithium-x64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM64 -#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM -#include "src/crankshaft/arm/lithium-arm.h" // NOLINT -#elif V8_TARGET_ARCH_PPC -#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS -#include "src/crankshaft/mips/lithium-mips.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS64 -#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_S390 -#include "src/crankshaft/s390/lithium-s390.h" // NOLINT -#elif V8_TARGET_ARCH_X87 -#include "src/crankshaft/x87/lithium-x87.h" // NOLINT -#else -#error "Unknown architecture." -#endif - -namespace v8 { -namespace internal { - -bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); } - - -LInstruction* LAllocator::InstructionAt(int index) { - return chunk_->instructions()->at(index); -} - - -LGap* LAllocator::GapAt(int index) { - return chunk_->GetGapAt(index); -} - - -void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) { - if (range->Kind() == DOUBLE_REGISTERS) { - assigned_double_registers_->Add(reg); - } else { - DCHECK(range->Kind() == GENERAL_REGISTERS); - assigned_registers_->Add(reg); - } - range->set_assigned_register(reg, chunk()->zone()); -} - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_ diff --git a/src/crankshaft/lithium-allocator.cc b/src/crankshaft/lithium-allocator.cc deleted file mode 100644 index 201c6062a8..0000000000 --- a/src/crankshaft/lithium-allocator.cc +++ /dev/null @@ -1,2192 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/lithium-allocator.h" - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium-allocator-inl.h" -#include "src/crankshaft/lithium-inl.h" -#include "src/objects-inl.h" -#include "src/register-configuration.h" -#include "src/string-stream.h" - -namespace v8 { -namespace internal { - -const auto GetRegConfig = RegisterConfiguration::Crankshaft; - -static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) { - return a.Value() < b.Value() ? a : b; -} - - -static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) { - return a.Value() > b.Value() ? a : b; -} - - -UsePosition::UsePosition(LifetimePosition pos, - LOperand* operand, - LOperand* hint) - : operand_(operand), - hint_(hint), - pos_(pos), - next_(NULL), - requires_reg_(false), - register_beneficial_(true) { - if (operand_ != NULL && operand_->IsUnallocated()) { - LUnallocated* unalloc = LUnallocated::cast(operand_); - requires_reg_ = unalloc->HasRegisterPolicy() || - unalloc->HasDoubleRegisterPolicy(); - register_beneficial_ = !unalloc->HasAnyPolicy(); - } - DCHECK(pos_.IsValid()); -} - - -bool UsePosition::HasHint() const { - return hint_ != NULL && !hint_->IsUnallocated(); -} - - -bool UsePosition::RequiresRegister() const { - return requires_reg_; -} - - -bool UsePosition::RegisterIsBeneficial() const { - return register_beneficial_; -} - - -void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) { - DCHECK(Contains(pos) && pos.Value() != start().Value()); - UseInterval* after = new(zone) UseInterval(pos, end_); - after->next_ = next_; - next_ = after; - end_ = pos; -} - - -#ifdef DEBUG - - -void LiveRange::Verify() const { - UsePosition* cur = first_pos_; - while (cur != NULL) { - DCHECK(Start().Value() <= cur->pos().Value() && - cur->pos().Value() <= End().Value()); - cur = cur->next(); - } -} - - -bool LiveRange::HasOverlap(UseInterval* target) const { - UseInterval* current_interval = first_interval_; - while (current_interval != NULL) { - // Intervals overlap if the start of one is contained in the other. - if (current_interval->Contains(target->start()) || - target->Contains(current_interval->start())) { - return true; - } - current_interval = current_interval->next(); - } - return false; -} - - -#endif - - -LiveRange::LiveRange(int id, Zone* zone) - : id_(id), - spilled_(false), - kind_(UNALLOCATED_REGISTERS), - assigned_register_(kInvalidAssignment), - last_interval_(NULL), - first_interval_(NULL), - first_pos_(NULL), - parent_(NULL), - next_(NULL), - current_interval_(NULL), - last_processed_use_(NULL), - current_hint_operand_(NULL), - spill_operand_(new (zone) LOperand()), - spill_start_index_(kMaxInt) {} - - -void LiveRange::set_assigned_register(int reg, Zone* zone) { - DCHECK(!HasRegisterAssigned() && !IsSpilled()); - assigned_register_ = reg; - ConvertOperands(zone); -} - - -void LiveRange::MakeSpilled(Zone* zone) { - DCHECK(!IsSpilled()); - DCHECK(TopLevel()->HasAllocatedSpillOperand()); - spilled_ = true; - assigned_register_ = kInvalidAssignment; - ConvertOperands(zone); -} - - -bool LiveRange::HasAllocatedSpillOperand() const { - DCHECK(spill_operand_ != NULL); - return !spill_operand_->IsIgnored(); -} - - -void LiveRange::SetSpillOperand(LOperand* operand) { - DCHECK(!operand->IsUnallocated()); - DCHECK(spill_operand_ != NULL); - DCHECK(spill_operand_->IsIgnored()); - spill_operand_->ConvertTo(operand->kind(), operand->index()); -} - - -UsePosition* LiveRange::NextUsePosition(LifetimePosition start) { - UsePosition* use_pos = last_processed_use_; - if (use_pos == NULL) use_pos = first_pos(); - while (use_pos != NULL && use_pos->pos().Value() < start.Value()) { - use_pos = use_pos->next(); - } - last_processed_use_ = use_pos; - return use_pos; -} - - -UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial( - LifetimePosition start) { - UsePosition* pos = NextUsePosition(start); - while (pos != NULL && !pos->RegisterIsBeneficial()) { - pos = pos->next(); - } - return pos; -} - - -UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial( - LifetimePosition start) { - UsePosition* pos = first_pos(); - UsePosition* prev = NULL; - while (pos != NULL && pos->pos().Value() < start.Value()) { - if (pos->RegisterIsBeneficial()) prev = pos; - pos = pos->next(); - } - return prev; -} - - -UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) { - UsePosition* pos = NextUsePosition(start); - while (pos != NULL && !pos->RequiresRegister()) { - pos = pos->next(); - } - return pos; -} - - -bool LiveRange::CanBeSpilled(LifetimePosition pos) { - // We cannot spill a live range that has a use requiring a register - // at the current or the immediate next position. - UsePosition* use_pos = NextRegisterPosition(pos); - if (use_pos == NULL) return true; - return - use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value(); -} - - -LOperand* LiveRange::CreateAssignedOperand(Zone* zone) { - LOperand* op = NULL; - if (HasRegisterAssigned()) { - DCHECK(!IsSpilled()); - switch (Kind()) { - case GENERAL_REGISTERS: - op = LRegister::Create(assigned_register(), zone); - break; - case DOUBLE_REGISTERS: - op = LDoubleRegister::Create(assigned_register(), zone); - break; - default: - UNREACHABLE(); - } - } else if (IsSpilled()) { - DCHECK(!HasRegisterAssigned()); - op = TopLevel()->GetSpillOperand(); - DCHECK(!op->IsUnallocated()); - } else { - LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE); - unalloc->set_virtual_register(id_); - op = unalloc; - } - return op; -} - - -UseInterval* LiveRange::FirstSearchIntervalForPosition( - LifetimePosition position) const { - if (current_interval_ == NULL) return first_interval_; - if (current_interval_->start().Value() > position.Value()) { - current_interval_ = NULL; - return first_interval_; - } - return current_interval_; -} - - -void LiveRange::AdvanceLastProcessedMarker( - UseInterval* to_start_of, LifetimePosition but_not_past) const { - if (to_start_of == NULL) return; - if (to_start_of->start().Value() > but_not_past.Value()) return; - LifetimePosition start = - current_interval_ == NULL ? LifetimePosition::Invalid() - : current_interval_->start(); - if (to_start_of->start().Value() > start.Value()) { - current_interval_ = to_start_of; - } -} - - -void LiveRange::SplitAt(LifetimePosition position, - LiveRange* result, - Zone* zone) { - DCHECK(Start().Value() < position.Value()); - DCHECK(result->IsEmpty()); - // Find the last interval that ends before the position. If the - // position is contained in one of the intervals in the chain, we - // split that interval and use the first part. - UseInterval* current = FirstSearchIntervalForPosition(position); - - // If the split position coincides with the beginning of a use interval - // we need to split use positons in a special way. - bool split_at_start = false; - - if (current->start().Value() == position.Value()) { - // When splitting at start we need to locate the previous use interval. - current = first_interval_; - } - - while (current != NULL) { - if (current->Contains(position)) { - current->SplitAt(position, zone); - break; - } - UseInterval* next = current->next(); - if (next->start().Value() >= position.Value()) { - split_at_start = (next->start().Value() == position.Value()); - break; - } - current = next; - } - - // Partition original use intervals to the two live ranges. - UseInterval* before = current; - UseInterval* after = before->next(); - result->last_interval_ = (last_interval_ == before) - ? after // Only interval in the range after split. - : last_interval_; // Last interval of the original range. - result->first_interval_ = after; - last_interval_ = before; - - // Find the last use position before the split and the first use - // position after it. - UsePosition* use_after = first_pos_; - UsePosition* use_before = NULL; - if (split_at_start) { - // The split position coincides with the beginning of a use interval (the - // end of a lifetime hole). Use at this position should be attributed to - // the split child because split child owns use interval covering it. - while (use_after != NULL && use_after->pos().Value() < position.Value()) { - use_before = use_after; - use_after = use_after->next(); - } - } else { - while (use_after != NULL && use_after->pos().Value() <= position.Value()) { - use_before = use_after; - use_after = use_after->next(); - } - } - - // Partition original use positions to the two live ranges. - if (use_before != NULL) { - use_before->next_ = NULL; - } else { - first_pos_ = NULL; - } - result->first_pos_ = use_after; - - // Discard cached iteration state. It might be pointing - // to the use that no longer belongs to this live range. - last_processed_use_ = NULL; - current_interval_ = NULL; - - // Link the new live range in the chain before any of the other - // ranges linked from the range before the split. - result->parent_ = (parent_ == NULL) ? this : parent_; - result->kind_ = result->parent_->kind_; - result->next_ = next_; - next_ = result; - -#ifdef DEBUG - Verify(); - result->Verify(); -#endif -} - - -// This implements an ordering on live ranges so that they are ordered by their -// start positions. This is needed for the correctness of the register -// allocation algorithm. If two live ranges start at the same offset then there -// is a tie breaker based on where the value is first used. This part of the -// ordering is merely a heuristic. -bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const { - LifetimePosition start = Start(); - LifetimePosition other_start = other->Start(); - if (start.Value() == other_start.Value()) { - UsePosition* pos = first_pos(); - if (pos == NULL) return false; - UsePosition* other_pos = other->first_pos(); - if (other_pos == NULL) return true; - return pos->pos().Value() < other_pos->pos().Value(); - } - return start.Value() < other_start.Value(); -} - - -void LiveRange::ShortenTo(LifetimePosition start) { - LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value()); - DCHECK(first_interval_ != NULL); - DCHECK(first_interval_->start().Value() <= start.Value()); - DCHECK(start.Value() < first_interval_->end().Value()); - first_interval_->set_start(start); -} - - -void LiveRange::EnsureInterval(LifetimePosition start, - LifetimePosition end, - Zone* zone) { - LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n", - id_, - start.Value(), - end.Value()); - LifetimePosition new_end = end; - while (first_interval_ != NULL && - first_interval_->start().Value() <= end.Value()) { - if (first_interval_->end().Value() > end.Value()) { - new_end = first_interval_->end(); - } - first_interval_ = first_interval_->next(); - } - - UseInterval* new_interval = new(zone) UseInterval(start, new_end); - new_interval->next_ = first_interval_; - first_interval_ = new_interval; - if (new_interval->next() == NULL) { - last_interval_ = new_interval; - } -} - - -void LiveRange::AddUseInterval(LifetimePosition start, - LifetimePosition end, - Zone* zone) { - LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", - id_, - start.Value(), - end.Value()); - if (first_interval_ == NULL) { - UseInterval* interval = new(zone) UseInterval(start, end); - first_interval_ = interval; - last_interval_ = interval; - } else { - if (end.Value() == first_interval_->start().Value()) { - first_interval_->set_start(start); - } else if (end.Value() < first_interval_->start().Value()) { - UseInterval* interval = new(zone) UseInterval(start, end); - interval->set_next(first_interval_); - first_interval_ = interval; - } else { - // Order of instruction's processing (see ProcessInstructions) guarantees - // that each new use interval either precedes or intersects with - // last added interval. - DCHECK(start.Value() < first_interval_->end().Value()); - first_interval_->start_ = Min(start, first_interval_->start_); - first_interval_->end_ = Max(end, first_interval_->end_); - } - } -} - - -void LiveRange::AddUsePosition(LifetimePosition pos, - LOperand* operand, - LOperand* hint, - Zone* zone) { - LAllocator::TraceAlloc("Add to live range %d use position %d\n", - id_, - pos.Value()); - UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint); - UsePosition* prev_hint = NULL; - UsePosition* prev = NULL; - UsePosition* current = first_pos_; - while (current != NULL && current->pos().Value() < pos.Value()) { - prev_hint = current->HasHint() ? current : prev_hint; - prev = current; - current = current->next(); - } - - if (prev == NULL) { - use_pos->set_next(first_pos_); - first_pos_ = use_pos; - } else { - use_pos->next_ = prev->next_; - prev->next_ = use_pos; - } - - if (prev_hint == NULL && use_pos->HasHint()) { - current_hint_operand_ = hint; - } -} - - -void LiveRange::ConvertOperands(Zone* zone) { - LOperand* op = CreateAssignedOperand(zone); - UsePosition* use_pos = first_pos(); - while (use_pos != NULL) { - DCHECK(Start().Value() <= use_pos->pos().Value() && - use_pos->pos().Value() <= End().Value()); - - if (use_pos->HasOperand()) { - DCHECK(op->IsRegister() || op->IsDoubleRegister() || - !use_pos->RequiresRegister()); - use_pos->operand()->ConvertTo(op->kind(), op->index()); - } - use_pos = use_pos->next(); - } -} - - -bool LiveRange::CanCover(LifetimePosition position) const { - if (IsEmpty()) return false; - return Start().Value() <= position.Value() && - position.Value() < End().Value(); -} - - -bool LiveRange::Covers(LifetimePosition position) { - if (!CanCover(position)) return false; - UseInterval* start_search = FirstSearchIntervalForPosition(position); - for (UseInterval* interval = start_search; - interval != NULL; - interval = interval->next()) { - DCHECK(interval->next() == NULL || - interval->next()->start().Value() >= interval->start().Value()); - AdvanceLastProcessedMarker(interval, position); - if (interval->Contains(position)) return true; - if (interval->start().Value() > position.Value()) return false; - } - return false; -} - - -LifetimePosition LiveRange::FirstIntersection(LiveRange* other) { - UseInterval* b = other->first_interval(); - if (b == NULL) return LifetimePosition::Invalid(); - LifetimePosition advance_last_processed_up_to = b->start(); - UseInterval* a = FirstSearchIntervalForPosition(b->start()); - while (a != NULL && b != NULL) { - if (a->start().Value() > other->End().Value()) break; - if (b->start().Value() > End().Value()) break; - LifetimePosition cur_intersection = a->Intersect(b); - if (cur_intersection.IsValid()) { - return cur_intersection; - } - if (a->start().Value() < b->start().Value()) { - a = a->next(); - if (a == NULL || a->start().Value() > other->End().Value()) break; - AdvanceLastProcessedMarker(a, advance_last_processed_up_to); - } else { - b = b->next(); - } - } - return LifetimePosition::Invalid(); -} - -LAllocator::LAllocator(int num_values, HGraph* graph) - : zone_(graph->isolate()->allocator(), ZONE_NAME), - chunk_(NULL), - live_in_sets_(graph->blocks()->length(), zone()), - live_ranges_(num_values * 2, zone()), - fixed_live_ranges_(NULL), - fixed_double_live_ranges_(NULL), - unhandled_live_ranges_(num_values * 2, zone()), - active_live_ranges_(8, zone()), - inactive_live_ranges_(8, zone()), - reusable_slots_(8, zone()), - next_virtual_register_(num_values), - first_artificial_register_(num_values), - mode_(UNALLOCATED_REGISTERS), - num_registers_(-1), - graph_(graph), - has_osr_entry_(false), - allocation_ok_(true) {} - -void LAllocator::InitializeLivenessAnalysis() { - // Initialize the live_in sets for each block to NULL. - int block_count = graph_->blocks()->length(); - live_in_sets_.Initialize(block_count, zone()); - live_in_sets_.AddBlock(NULL, block_count, zone()); -} - - -BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) { - // Compute live out for the given block, except not including backward - // successor edges. - BitVector* live_out = new(zone()) BitVector(next_virtual_register_, zone()); - - // Process all successor blocks. - for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) { - // Add values live on entry to the successor. Note the successor's - // live_in will not be computed yet for backwards edges. - HBasicBlock* successor = it.Current(); - BitVector* live_in = live_in_sets_[successor->block_id()]; - if (live_in != NULL) live_out->Union(*live_in); - - // All phi input operands corresponding to this successor edge are live - // out from this block. - int index = successor->PredecessorIndexOf(block); - const ZoneList* phis = successor->phis(); - for (int i = 0; i < phis->length(); ++i) { - HPhi* phi = phis->at(i); - if (!phi->OperandAt(index)->IsConstant()) { - live_out->Add(phi->OperandAt(index)->id()); - } - } - } - - return live_out; -} - - -void LAllocator::AddInitialIntervals(HBasicBlock* block, - BitVector* live_out) { - // Add an interval that includes the entire block to the live range for - // each live_out value. - LifetimePosition start = LifetimePosition::FromInstructionIndex( - block->first_instruction_index()); - LifetimePosition end = LifetimePosition::FromInstructionIndex( - block->last_instruction_index()).NextInstruction(); - BitVector::Iterator iterator(live_out); - while (!iterator.Done()) { - int operand_index = iterator.Current(); - LiveRange* range = LiveRangeFor(operand_index); - range->AddUseInterval(start, end, zone()); - iterator.Advance(); - } -} - - -int LAllocator::FixedDoubleLiveRangeID(int index) { - return -index - 1 - Register::kNumRegisters; -} - - -LOperand* LAllocator::AllocateFixed(LUnallocated* operand, - int pos, - bool is_tagged) { - TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register()); - DCHECK(operand->HasFixedPolicy()); - if (operand->HasFixedSlotPolicy()) { - operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index()); - } else if (operand->HasFixedRegisterPolicy()) { - int reg_index = operand->fixed_register_index(); - operand->ConvertTo(LOperand::REGISTER, reg_index); - } else if (operand->HasFixedDoubleRegisterPolicy()) { - int reg_index = operand->fixed_register_index(); - operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index); - } else { - UNREACHABLE(); - } - if (is_tagged) { - TraceAlloc("Fixed reg is tagged at %d\n", pos); - LInstruction* instr = InstructionAt(pos); - if (instr->HasPointerMap()) { - instr->pointer_map()->RecordPointer(operand, chunk()->zone()); - } - } - return operand; -} - - -LiveRange* LAllocator::FixedLiveRangeFor(int index) { - DCHECK(index < Register::kNumRegisters); - LiveRange* result = fixed_live_ranges_[index]; - if (result == NULL) { - result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone()); - DCHECK(result->IsFixed()); - result->kind_ = GENERAL_REGISTERS; - SetLiveRangeAssignedRegister(result, index); - fixed_live_ranges_[index] = result; - } - return result; -} - - -LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) { - DCHECK(index < DoubleRegister::kMaxNumRegisters); - LiveRange* result = fixed_double_live_ranges_[index]; - if (result == NULL) { - result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index), - chunk()->zone()); - DCHECK(result->IsFixed()); - result->kind_ = DOUBLE_REGISTERS; - SetLiveRangeAssignedRegister(result, index); - fixed_double_live_ranges_[index] = result; - } - return result; -} - - -LiveRange* LAllocator::LiveRangeFor(int index) { - if (index >= live_ranges_.length()) { - live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone()); - } - LiveRange* result = live_ranges_[index]; - if (result == NULL) { - result = new(zone()) LiveRange(index, chunk()->zone()); - live_ranges_[index] = result; - } - return result; -} - - -LGap* LAllocator::GetLastGap(HBasicBlock* block) { - int last_instruction = block->last_instruction_index(); - int index = chunk_->NearestGapPos(last_instruction); - return GapAt(index); -} - - -HPhi* LAllocator::LookupPhi(LOperand* operand) const { - if (!operand->IsUnallocated()) return NULL; - int index = LUnallocated::cast(operand)->virtual_register(); - HValue* instr = graph_->LookupValue(index); - if (instr != NULL && instr->IsPhi()) { - return HPhi::cast(instr); - } - return NULL; -} - - -LiveRange* LAllocator::LiveRangeFor(LOperand* operand) { - if (operand->IsUnallocated()) { - return LiveRangeFor(LUnallocated::cast(operand)->virtual_register()); - } else if (operand->IsRegister()) { - return FixedLiveRangeFor(operand->index()); - } else if (operand->IsDoubleRegister()) { - return FixedDoubleLiveRangeFor(operand->index()); - } else { - return NULL; - } -} - - -void LAllocator::Define(LifetimePosition position, - LOperand* operand, - LOperand* hint) { - LiveRange* range = LiveRangeFor(operand); - if (range == NULL) return; - - if (range->IsEmpty() || range->Start().Value() > position.Value()) { - // Can happen if there is a definition without use. - range->AddUseInterval(position, position.NextInstruction(), zone()); - range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone()); - } else { - range->ShortenTo(position); - } - - if (operand->IsUnallocated()) { - LUnallocated* unalloc_operand = LUnallocated::cast(operand); - range->AddUsePosition(position, unalloc_operand, hint, zone()); - } -} - - -void LAllocator::Use(LifetimePosition block_start, - LifetimePosition position, - LOperand* operand, - LOperand* hint) { - LiveRange* range = LiveRangeFor(operand); - if (range == NULL) return; - if (operand->IsUnallocated()) { - LUnallocated* unalloc_operand = LUnallocated::cast(operand); - range->AddUsePosition(position, unalloc_operand, hint, zone()); - } - range->AddUseInterval(block_start, position, zone()); -} - - -void LAllocator::AddConstraintsGapMove(int index, - LOperand* from, - LOperand* to) { - LGap* gap = GapAt(index); - LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, - chunk()->zone()); - if (from->IsUnallocated()) { - const ZoneList* move_operands = move->move_operands(); - for (int i = 0; i < move_operands->length(); ++i) { - LMoveOperands cur = move_operands->at(i); - LOperand* cur_to = cur.destination(); - if (cur_to->IsUnallocated()) { - if (LUnallocated::cast(cur_to)->virtual_register() == - LUnallocated::cast(from)->virtual_register()) { - move->AddMove(cur.source(), to, chunk()->zone()); - return; - } - } - } - } - move->AddMove(from, to, chunk()->zone()); -} - - -void LAllocator::MeetRegisterConstraints(HBasicBlock* block) { - int start = block->first_instruction_index(); - int end = block->last_instruction_index(); - if (start == -1) return; - for (int i = start; i <= end; ++i) { - if (IsGapAt(i)) { - LInstruction* instr = NULL; - LInstruction* prev_instr = NULL; - if (i < end) instr = InstructionAt(i + 1); - if (i > start) prev_instr = InstructionAt(i - 1); - MeetConstraintsBetween(prev_instr, instr, i); - if (!AllocationOk()) return; - } - } -} - - -void LAllocator::MeetConstraintsBetween(LInstruction* first, - LInstruction* second, - int gap_index) { - // Handle fixed temporaries. - if (first != NULL) { - for (TempIterator it(first); !it.Done(); it.Advance()) { - LUnallocated* temp = LUnallocated::cast(it.Current()); - if (temp->HasFixedPolicy()) { - AllocateFixed(temp, gap_index - 1, false); - } - } - } - - // Handle fixed output operand. - if (first != NULL && first->Output() != NULL) { - LUnallocated* first_output = LUnallocated::cast(first->Output()); - LiveRange* range = LiveRangeFor(first_output->virtual_register()); - bool assigned = false; - if (first_output->HasFixedPolicy()) { - LUnallocated* output_copy = first_output->CopyUnconstrained( - chunk()->zone()); - bool is_tagged = HasTaggedValue(first_output->virtual_register()); - AllocateFixed(first_output, gap_index, is_tagged); - - // This value is produced on the stack, we never need to spill it. - if (first_output->IsStackSlot()) { - range->SetSpillOperand(first_output); - range->SetSpillStartIndex(gap_index - 1); - assigned = true; - } - chunk_->AddGapMove(gap_index, first_output, output_copy); - } - - if (!assigned) { - range->SetSpillStartIndex(gap_index); - - // This move to spill operand is not a real use. Liveness analysis - // and splitting of live ranges do not account for it. - // Thus it should be inserted to a lifetime position corresponding to - // the instruction end. - LGap* gap = GapAt(gap_index); - LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE, - chunk()->zone()); - move->AddMove(first_output, range->GetSpillOperand(), - chunk()->zone()); - } - } - - // Handle fixed input operands of second instruction. - if (second != NULL) { - for (UseIterator it(second); !it.Done(); it.Advance()) { - LUnallocated* cur_input = LUnallocated::cast(it.Current()); - if (cur_input->HasFixedPolicy()) { - LUnallocated* input_copy = cur_input->CopyUnconstrained( - chunk()->zone()); - bool is_tagged = HasTaggedValue(cur_input->virtual_register()); - AllocateFixed(cur_input, gap_index + 1, is_tagged); - AddConstraintsGapMove(gap_index, input_copy, cur_input); - } else if (cur_input->HasWritableRegisterPolicy()) { - // The live range of writable input registers always goes until the end - // of the instruction. - DCHECK(!cur_input->IsUsedAtStart()); - - LUnallocated* input_copy = cur_input->CopyUnconstrained( - chunk()->zone()); - int vreg = GetVirtualRegister(); - if (!AllocationOk()) return; - cur_input->set_virtual_register(vreg); - - if (RequiredRegisterKind(input_copy->virtual_register()) == - DOUBLE_REGISTERS) { - double_artificial_registers_.Add( - cur_input->virtual_register() - first_artificial_register_, - zone()); - } - - AddConstraintsGapMove(gap_index, input_copy, cur_input); - } - } - } - - // Handle "output same as input" for second instruction. - if (second != NULL && second->Output() != NULL) { - LUnallocated* second_output = LUnallocated::cast(second->Output()); - if (second_output->HasSameAsInputPolicy()) { - LUnallocated* cur_input = LUnallocated::cast(second->FirstInput()); - int output_vreg = second_output->virtual_register(); - int input_vreg = cur_input->virtual_register(); - - LUnallocated* input_copy = cur_input->CopyUnconstrained( - chunk()->zone()); - cur_input->set_virtual_register(second_output->virtual_register()); - AddConstraintsGapMove(gap_index, input_copy, cur_input); - - if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) { - int index = gap_index + 1; - LInstruction* instr = InstructionAt(index); - if (instr->HasPointerMap()) { - instr->pointer_map()->RecordPointer(input_copy, chunk()->zone()); - } - } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) { - // The input is assumed to immediately have a tagged representation, - // before the pointer map can be used. I.e. the pointer map at the - // instruction will include the output operand (whose value at the - // beginning of the instruction is equal to the input operand). If - // this is not desired, then the pointer map at this instruction needs - // to be adjusted manually. - } - } - } -} - - -void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) { - int block_start = block->first_instruction_index(); - int index = block->last_instruction_index(); - - LifetimePosition block_start_position = - LifetimePosition::FromInstructionIndex(block_start); - - while (index >= block_start) { - LifetimePosition curr_position = - LifetimePosition::FromInstructionIndex(index); - - if (IsGapAt(index)) { - // We have a gap at this position. - LGap* gap = GapAt(index); - LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, - chunk()->zone()); - const ZoneList* move_operands = move->move_operands(); - for (int i = 0; i < move_operands->length(); ++i) { - LMoveOperands* cur = &move_operands->at(i); - if (cur->IsIgnored()) continue; - LOperand* from = cur->source(); - LOperand* to = cur->destination(); - HPhi* phi = LookupPhi(to); - LOperand* hint = to; - if (phi != NULL) { - // This is a phi resolving move. - if (!phi->block()->IsLoopHeader()) { - hint = LiveRangeFor(phi->id())->current_hint_operand(); - } - } else { - if (to->IsUnallocated()) { - if (live->Contains(LUnallocated::cast(to)->virtual_register())) { - Define(curr_position, to, from); - live->Remove(LUnallocated::cast(to)->virtual_register()); - } else { - cur->Eliminate(); - continue; - } - } else { - Define(curr_position, to, from); - } - } - Use(block_start_position, curr_position, from, hint); - if (from->IsUnallocated()) { - live->Add(LUnallocated::cast(from)->virtual_register()); - } - } - } else { - DCHECK(!IsGapAt(index)); - LInstruction* instr = InstructionAt(index); - - if (instr != NULL) { - LOperand* output = instr->Output(); - if (output != NULL) { - if (output->IsUnallocated()) { - live->Remove(LUnallocated::cast(output)->virtual_register()); - } - Define(curr_position, output, NULL); - } - - if (instr->ClobbersRegisters()) { - for (int i = 0; i < Register::kNumRegisters; ++i) { - if (GetRegConfig()->IsAllocatableGeneralCode(i)) { - if (output == NULL || !output->IsRegister() || - output->index() != i) { - LiveRange* range = FixedLiveRangeFor(i); - range->AddUseInterval(curr_position, - curr_position.InstructionEnd(), zone()); - } - } - } - } - - if (instr->ClobbersDoubleRegisters(isolate())) { - for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { - if (GetRegConfig()->IsAllocatableDoubleCode(i)) { - if (output == NULL || !output->IsDoubleRegister() || - output->index() != i) { - LiveRange* range = FixedDoubleLiveRangeFor(i); - range->AddUseInterval(curr_position, - curr_position.InstructionEnd(), zone()); - } - } - } - } - - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LOperand* input = it.Current(); - - LifetimePosition use_pos; - if (input->IsUnallocated() && - LUnallocated::cast(input)->IsUsedAtStart()) { - use_pos = curr_position; - } else { - use_pos = curr_position.InstructionEnd(); - } - - Use(block_start_position, use_pos, input, NULL); - if (input->IsUnallocated()) { - live->Add(LUnallocated::cast(input)->virtual_register()); - } - } - - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LOperand* temp = it.Current(); - if (instr->ClobbersTemps()) { - if (temp->IsRegister()) continue; - if (temp->IsUnallocated()) { - LUnallocated* temp_unalloc = LUnallocated::cast(temp); - if (temp_unalloc->HasFixedPolicy()) { - continue; - } - } - } - Use(block_start_position, curr_position.InstructionEnd(), temp, NULL); - Define(curr_position, temp, NULL); - - if (temp->IsUnallocated()) { - LUnallocated* temp_unalloc = LUnallocated::cast(temp); - if (temp_unalloc->HasDoubleRegisterPolicy()) { - double_artificial_registers_.Add( - temp_unalloc->virtual_register() - first_artificial_register_, - zone()); - } - } - } - } - } - - index = index - 1; - } -} - - -void LAllocator::ResolvePhis(HBasicBlock* block) { - const ZoneList* phis = block->phis(); - for (int i = 0; i < phis->length(); ++i) { - HPhi* phi = phis->at(i); - LUnallocated* phi_operand = - new (chunk()->zone()) LUnallocated(LUnallocated::NONE); - phi_operand->set_virtual_register(phi->id()); - for (int j = 0; j < phi->OperandCount(); ++j) { - HValue* op = phi->OperandAt(j); - LOperand* operand = NULL; - if (op->IsConstant() && op->EmitAtUses()) { - HConstant* constant = HConstant::cast(op); - operand = chunk_->DefineConstantOperand(constant); - } else { - DCHECK(!op->EmitAtUses()); - LUnallocated* unalloc = - new(chunk()->zone()) LUnallocated(LUnallocated::ANY); - unalloc->set_virtual_register(op->id()); - operand = unalloc; - } - HBasicBlock* cur_block = block->predecessors()->at(j); - // The gap move must be added without any special processing as in - // the AddConstraintsGapMove. - chunk_->AddGapMove(cur_block->last_instruction_index() - 1, - operand, - phi_operand); - - // We are going to insert a move before the branch instruction. - // Some branch instructions (e.g. loops' back edges) - // can potentially cause a GC so they have a pointer map. - // By inserting a move we essentially create a copy of a - // value which is invisible to PopulatePointerMaps(), because we store - // it into a location different from the operand of a live range - // covering a branch instruction. - // Thus we need to manually record a pointer. - LInstruction* branch = - InstructionAt(cur_block->last_instruction_index()); - if (branch->HasPointerMap()) { - if (phi->representation().IsTagged() && !phi->type().IsSmi()) { - branch->pointer_map()->RecordPointer(phi_operand, chunk()->zone()); - } else if (!phi->representation().IsDouble()) { - branch->pointer_map()->RecordUntagged(phi_operand, chunk()->zone()); - } - } - } - - LiveRange* live_range = LiveRangeFor(phi->id()); - LLabel* label = chunk_->GetLabel(phi->block()->block_id()); - label->GetOrCreateParallelMove(LGap::START, chunk()->zone())-> - AddMove(phi_operand, live_range->GetSpillOperand(), chunk()->zone()); - live_range->SetSpillStartIndex(phi->block()->first_instruction_index()); - } -} - - -bool LAllocator::Allocate(LChunk* chunk) { - DCHECK(chunk_ == NULL); - chunk_ = static_cast(chunk); - assigned_registers_ = - new (chunk->zone()) BitVector(Register::kNumRegisters, chunk->zone()); - assigned_double_registers_ = new (chunk->zone()) - BitVector(DoubleRegister::kMaxNumRegisters, chunk->zone()); - MeetRegisterConstraints(); - if (!AllocationOk()) return false; - ResolvePhis(); - BuildLiveRanges(); - AllocateGeneralRegisters(); - if (!AllocationOk()) return false; - AllocateDoubleRegisters(); - if (!AllocationOk()) return false; - PopulatePointerMaps(); - ConnectRanges(); - ResolveControlFlow(); - return true; -} - - -void LAllocator::MeetRegisterConstraints() { - LAllocatorPhase phase("L_Register constraints", this); - const ZoneList* blocks = graph_->blocks(); - for (int i = 0; i < blocks->length(); ++i) { - HBasicBlock* block = blocks->at(i); - MeetRegisterConstraints(block); - if (!AllocationOk()) return; - } -} - - -void LAllocator::ResolvePhis() { - LAllocatorPhase phase("L_Resolve phis", this); - - // Process the blocks in reverse order. - const ZoneList* blocks = graph_->blocks(); - for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) { - HBasicBlock* block = blocks->at(block_id); - ResolvePhis(block); - } -} - - -void LAllocator::ResolveControlFlow(LiveRange* range, - HBasicBlock* block, - HBasicBlock* pred) { - LifetimePosition pred_end = - LifetimePosition::FromInstructionIndex(pred->last_instruction_index()); - LifetimePosition cur_start = - LifetimePosition::FromInstructionIndex(block->first_instruction_index()); - LiveRange* pred_cover = NULL; - LiveRange* cur_cover = NULL; - LiveRange* cur_range = range; - while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) { - if (cur_range->CanCover(cur_start)) { - DCHECK(cur_cover == NULL); - cur_cover = cur_range; - } - if (cur_range->CanCover(pred_end)) { - DCHECK(pred_cover == NULL); - pred_cover = cur_range; - } - cur_range = cur_range->next(); - } - - if (cur_cover->IsSpilled()) return; - DCHECK(pred_cover != NULL && cur_cover != NULL); - if (pred_cover != cur_cover) { - LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone()); - LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone()); - if (!pred_op->Equals(cur_op)) { - LGap* gap = NULL; - if (block->predecessors()->length() == 1) { - gap = GapAt(block->first_instruction_index()); - } else { - DCHECK(pred->end()->SecondSuccessor() == NULL); - gap = GetLastGap(pred); - - // We are going to insert a move before the branch instruction. - // Some branch instructions (e.g. loops' back edges) - // can potentially cause a GC so they have a pointer map. - // By inserting a move we essentially create a copy of a - // value which is invisible to PopulatePointerMaps(), because we store - // it into a location different from the operand of a live range - // covering a branch instruction. - // Thus we need to manually record a pointer. - LInstruction* branch = InstructionAt(pred->last_instruction_index()); - if (branch->HasPointerMap()) { - if (HasTaggedValue(range->id())) { - branch->pointer_map()->RecordPointer(cur_op, chunk()->zone()); - } else if (!cur_op->IsDoubleStackSlot() && - !cur_op->IsDoubleRegister()) { - branch->pointer_map()->RemovePointer(cur_op); - } - } - } - gap->GetOrCreateParallelMove( - LGap::START, chunk()->zone())->AddMove(pred_op, cur_op, - chunk()->zone()); - } - } -} - - -LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) { - int index = pos.InstructionIndex(); - if (IsGapAt(index)) { - LGap* gap = GapAt(index); - return gap->GetOrCreateParallelMove( - pos.IsInstructionStart() ? LGap::START : LGap::END, chunk()->zone()); - } - int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1); - return GapAt(gap_pos)->GetOrCreateParallelMove( - (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, chunk()->zone()); -} - - -HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) { - LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex())); - return gap->block(); -} - - -void LAllocator::ConnectRanges() { - LAllocatorPhase phase("L_Connect ranges", this); - for (int i = 0; i < live_ranges()->length(); ++i) { - LiveRange* first_range = live_ranges()->at(i); - if (first_range == NULL || first_range->parent() != NULL) continue; - - LiveRange* second_range = first_range->next(); - while (second_range != NULL) { - LifetimePosition pos = second_range->Start(); - - if (!second_range->IsSpilled()) { - // Add gap move if the two live ranges touch and there is no block - // boundary. - if (first_range->End().Value() == pos.Value()) { - bool should_insert = true; - if (IsBlockBoundary(pos)) { - should_insert = CanEagerlyResolveControlFlow(GetBlock(pos)); - } - if (should_insert) { - LParallelMove* move = GetConnectingParallelMove(pos); - LOperand* prev_operand = first_range->CreateAssignedOperand( - chunk()->zone()); - LOperand* cur_operand = second_range->CreateAssignedOperand( - chunk()->zone()); - move->AddMove(prev_operand, cur_operand, - chunk()->zone()); - } - } - } - - first_range = second_range; - second_range = second_range->next(); - } - } -} - - -bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const { - if (block->predecessors()->length() != 1) return false; - return block->predecessors()->first()->block_id() == block->block_id() - 1; -} - - -void LAllocator::ResolveControlFlow() { - LAllocatorPhase phase("L_Resolve control flow", this); - const ZoneList* blocks = graph_->blocks(); - for (int block_id = 1; block_id < blocks->length(); ++block_id) { - HBasicBlock* block = blocks->at(block_id); - if (CanEagerlyResolveControlFlow(block)) continue; - BitVector* live = live_in_sets_[block->block_id()]; - BitVector::Iterator iterator(live); - while (!iterator.Done()) { - int operand_index = iterator.Current(); - for (int i = 0; i < block->predecessors()->length(); ++i) { - HBasicBlock* cur = block->predecessors()->at(i); - LiveRange* cur_range = LiveRangeFor(operand_index); - ResolveControlFlow(cur_range, block, cur); - } - iterator.Advance(); - } - } -} - - -void LAllocator::BuildLiveRanges() { - LAllocatorPhase phase("L_Build live ranges", this); - InitializeLivenessAnalysis(); - // Process the blocks in reverse order. - const ZoneList* blocks = graph_->blocks(); - for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) { - HBasicBlock* block = blocks->at(block_id); - BitVector* live = ComputeLiveOut(block); - // Initially consider all live_out values live for the entire block. We - // will shorten these intervals if necessary. - AddInitialIntervals(block, live); - - // Process the instructions in reverse order, generating and killing - // live values. - ProcessInstructions(block, live); - // All phi output operands are killed by this block. - const ZoneList* phis = block->phis(); - for (int i = 0; i < phis->length(); ++i) { - // The live range interval already ends at the first instruction of the - // block. - HPhi* phi = phis->at(i); - live->Remove(phi->id()); - - LOperand* hint = NULL; - LOperand* phi_operand = NULL; - LGap* gap = GetLastGap(phi->block()->predecessors()->at(0)); - LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, - chunk()->zone()); - for (int j = 0; j < move->move_operands()->length(); ++j) { - LOperand* to = move->move_operands()->at(j).destination(); - if (to->IsUnallocated() && - LUnallocated::cast(to)->virtual_register() == phi->id()) { - hint = move->move_operands()->at(j).source(); - phi_operand = to; - break; - } - } - DCHECK(hint != NULL); - - LifetimePosition block_start = LifetimePosition::FromInstructionIndex( - block->first_instruction_index()); - Define(block_start, phi_operand, hint); - } - - // Now live is live_in for this block except not including values live - // out on backward successor edges. - live_in_sets_[block_id] = live; - - // If this block is a loop header go back and patch up the necessary - // predecessor blocks. - if (block->IsLoopHeader()) { - // TODO(kmillikin): Need to be able to get the last block of the loop - // in the loop information. Add a live range stretching from the first - // loop instruction to the last for each value live on entry to the - // header. - HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge(); - BitVector::Iterator iterator(live); - LifetimePosition start = LifetimePosition::FromInstructionIndex( - block->first_instruction_index()); - LifetimePosition end = LifetimePosition::FromInstructionIndex( - back_edge->last_instruction_index()).NextInstruction(); - while (!iterator.Done()) { - int operand_index = iterator.Current(); - LiveRange* range = LiveRangeFor(operand_index); - range->EnsureInterval(start, end, zone()); - iterator.Advance(); - } - - for (int i = block->block_id() + 1; i <= back_edge->block_id(); ++i) { - live_in_sets_[i]->Union(*live); - } - } - -#ifdef DEBUG - if (block_id == 0) { - BitVector::Iterator iterator(live); - bool found = false; - while (!iterator.Done()) { - found = true; - int operand_index = iterator.Current(); - { - AllowHandleDereference allow_deref; - PrintF("Function: %s\n", chunk_->info()->GetDebugName().get()); - } - PrintF("Value %d used before first definition!\n", operand_index); - LiveRange* range = LiveRangeFor(operand_index); - PrintF("First use is at %d\n", range->first_pos()->pos().Value()); - iterator.Advance(); - } - DCHECK(!found); - } -#endif - } - - for (int i = 0; i < live_ranges_.length(); ++i) { - if (live_ranges_[i] != NULL) { - live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id()); - } - } -} - - -bool LAllocator::SafePointsAreInOrder() const { - const ZoneList* pointer_maps = chunk_->pointer_maps(); - int safe_point = 0; - for (int i = 0; i < pointer_maps->length(); ++i) { - LPointerMap* map = pointer_maps->at(i); - if (safe_point > map->lithium_position()) return false; - safe_point = map->lithium_position(); - } - return true; -} - - -void LAllocator::PopulatePointerMaps() { - LAllocatorPhase phase("L_Populate pointer maps", this); - const ZoneList* pointer_maps = chunk_->pointer_maps(); - - DCHECK(SafePointsAreInOrder()); - - // Iterate over all safe point positions and record a pointer - // for all spilled live ranges at this point. - int first_safe_point_index = 0; - int last_range_start = 0; - for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) { - LiveRange* range = live_ranges()->at(range_idx); - if (range == NULL) continue; - // Iterate over the first parts of multi-part live ranges. - if (range->parent() != NULL) continue; - // Skip non-pointer values. - if (!HasTaggedValue(range->id())) continue; - // Skip empty live ranges. - if (range->IsEmpty()) continue; - - // Find the extent of the range and its children. - int start = range->Start().InstructionIndex(); - int end = 0; - for (LiveRange* cur = range; cur != NULL; cur = cur->next()) { - LifetimePosition this_end = cur->End(); - if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex(); - DCHECK(cur->Start().InstructionIndex() >= start); - } - - // Most of the ranges are in order, but not all. Keep an eye on when - // they step backwards and reset the first_safe_point_index so we don't - // miss any safe points. - if (start < last_range_start) { - first_safe_point_index = 0; - } - last_range_start = start; - - // Step across all the safe points that are before the start of this range, - // recording how far we step in order to save doing this for the next range. - while (first_safe_point_index < pointer_maps->length()) { - LPointerMap* map = pointer_maps->at(first_safe_point_index); - int safe_point = map->lithium_position(); - if (safe_point >= start) break; - first_safe_point_index++; - } - - // Step through the safe points to see whether they are in the range. - for (int safe_point_index = first_safe_point_index; - safe_point_index < pointer_maps->length(); - ++safe_point_index) { - LPointerMap* map = pointer_maps->at(safe_point_index); - int safe_point = map->lithium_position(); - - // The safe points are sorted so we can stop searching here. - if (safe_point - 1 > end) break; - - // Advance to the next active range that covers the current - // safe point position. - LifetimePosition safe_point_pos = - LifetimePosition::FromInstructionIndex(safe_point); - LiveRange* cur = range; - while (cur != NULL && !cur->Covers(safe_point_pos)) { - cur = cur->next(); - } - if (cur == NULL) continue; - - // Check if the live range is spilled and the safe point is after - // the spill position. - if (range->HasAllocatedSpillOperand() && - safe_point >= range->spill_start_index()) { - TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n", - range->id(), range->spill_start_index(), safe_point); - map->RecordPointer(range->GetSpillOperand(), chunk()->zone()); - } - - if (!cur->IsSpilled()) { - TraceAlloc("Pointer in register for range %d (start at %d) " - "at safe point %d\n", - cur->id(), cur->Start().Value(), safe_point); - LOperand* operand = cur->CreateAssignedOperand(chunk()->zone()); - DCHECK(!operand->IsStackSlot()); - map->RecordPointer(operand, chunk()->zone()); - } - } - } -} - - -void LAllocator::AllocateGeneralRegisters() { - LAllocatorPhase phase("L_Allocate general registers", this); - num_registers_ = GetRegConfig()->num_allocatable_general_registers(); - allocatable_register_codes_ = GetRegConfig()->allocatable_general_codes(); - mode_ = GENERAL_REGISTERS; - AllocateRegisters(); -} - - -void LAllocator::AllocateDoubleRegisters() { - LAllocatorPhase phase("L_Allocate double registers", this); - num_registers_ = GetRegConfig()->num_allocatable_double_registers(); - allocatable_register_codes_ = GetRegConfig()->allocatable_double_codes(); - mode_ = DOUBLE_REGISTERS; - AllocateRegisters(); -} - - -void LAllocator::AllocateRegisters() { - DCHECK(unhandled_live_ranges_.is_empty()); - - for (int i = 0; i < live_ranges_.length(); ++i) { - if (live_ranges_[i] != NULL) { - if (live_ranges_[i]->Kind() == mode_) { - AddToUnhandledUnsorted(live_ranges_[i]); - } - } - } - SortUnhandled(); - DCHECK(UnhandledIsSorted()); - - DCHECK(reusable_slots_.is_empty()); - DCHECK(active_live_ranges_.is_empty()); - DCHECK(inactive_live_ranges_.is_empty()); - - if (mode_ == DOUBLE_REGISTERS) { - for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) { - LiveRange* current = fixed_double_live_ranges_.at(i); - if (current != NULL) { - AddToInactive(current); - } - } - } else { - DCHECK(mode_ == GENERAL_REGISTERS); - for (int i = 0; i < fixed_live_ranges_.length(); ++i) { - LiveRange* current = fixed_live_ranges_.at(i); - if (current != NULL) { - AddToInactive(current); - } - } - } - - while (!unhandled_live_ranges_.is_empty()) { - DCHECK(UnhandledIsSorted()); - LiveRange* current = unhandled_live_ranges_.RemoveLast(); - DCHECK(UnhandledIsSorted()); - LifetimePosition position = current->Start(); -#ifdef DEBUG - allocation_finger_ = position; -#endif - TraceAlloc("Processing interval %d start=%d\n", - current->id(), - position.Value()); - - if (current->HasAllocatedSpillOperand()) { - TraceAlloc("Live range %d already has a spill operand\n", current->id()); - LifetimePosition next_pos = position; - if (IsGapAt(next_pos.InstructionIndex())) { - next_pos = next_pos.NextInstruction(); - } - UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos); - // If the range already has a spill operand and it doesn't need a - // register immediately, split it and spill the first part of the range. - if (pos == NULL) { - Spill(current); - continue; - } else if (pos->pos().Value() > - current->Start().NextInstruction().Value()) { - // Do not spill live range eagerly if use position that can benefit from - // the register is too close to the start of live range. - SpillBetween(current, current->Start(), pos->pos()); - if (!AllocationOk()) return; - DCHECK(UnhandledIsSorted()); - continue; - } - } - - for (int i = 0; i < active_live_ranges_.length(); ++i) { - LiveRange* cur_active = active_live_ranges_.at(i); - if (cur_active->End().Value() <= position.Value()) { - ActiveToHandled(cur_active); - --i; // The live range was removed from the list of active live ranges. - } else if (!cur_active->Covers(position)) { - ActiveToInactive(cur_active); - --i; // The live range was removed from the list of active live ranges. - } - } - - for (int i = 0; i < inactive_live_ranges_.length(); ++i) { - LiveRange* cur_inactive = inactive_live_ranges_.at(i); - if (cur_inactive->End().Value() <= position.Value()) { - InactiveToHandled(cur_inactive); - --i; // Live range was removed from the list of inactive live ranges. - } else if (cur_inactive->Covers(position)) { - InactiveToActive(cur_inactive); - --i; // Live range was removed from the list of inactive live ranges. - } - } - - DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled()); - - bool result = TryAllocateFreeReg(current); - if (!AllocationOk()) return; - - if (!result) AllocateBlockedReg(current); - if (!AllocationOk()) return; - - if (current->HasRegisterAssigned()) { - AddToActive(current); - } - } - - reusable_slots_.Rewind(0); - active_live_ranges_.Rewind(0); - inactive_live_ranges_.Rewind(0); -} - - -const char* LAllocator::RegisterName(int allocation_index) { - if (mode_ == GENERAL_REGISTERS) { - return GetRegConfig()->GetGeneralRegisterName(allocation_index); - } else { - return GetRegConfig()->GetDoubleRegisterName(allocation_index); - } -} - - -void LAllocator::TraceAlloc(const char* msg, ...) { - if (FLAG_trace_alloc) { - va_list arguments; - va_start(arguments, msg); - base::OS::VPrint(msg, arguments); - va_end(arguments); - } -} - - -bool LAllocator::HasTaggedValue(int virtual_register) const { - HValue* value = graph_->LookupValue(virtual_register); - if (value == NULL) return false; - return value->representation().IsTagged() && !value->type().IsSmi(); -} - - -RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const { - if (virtual_register < first_artificial_register_) { - HValue* value = graph_->LookupValue(virtual_register); - if (value != NULL && value->representation().IsDouble()) { - return DOUBLE_REGISTERS; - } - } else if (double_artificial_registers_.Contains( - virtual_register - first_artificial_register_)) { - return DOUBLE_REGISTERS; - } - - return GENERAL_REGISTERS; -} - - -void LAllocator::AddToActive(LiveRange* range) { - TraceAlloc("Add live range %d to active\n", range->id()); - active_live_ranges_.Add(range, zone()); -} - - -void LAllocator::AddToInactive(LiveRange* range) { - TraceAlloc("Add live range %d to inactive\n", range->id()); - inactive_live_ranges_.Add(range, zone()); -} - - -void LAllocator::AddToUnhandledSorted(LiveRange* range) { - if (range == NULL || range->IsEmpty()) return; - DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); - DCHECK(allocation_finger_.Value() <= range->Start().Value()); - for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) { - LiveRange* cur_range = unhandled_live_ranges_.at(i); - if (range->ShouldBeAllocatedBefore(cur_range)) { - TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1); - unhandled_live_ranges_.InsertAt(i + 1, range, zone()); - DCHECK(UnhandledIsSorted()); - return; - } - } - TraceAlloc("Add live range %d to unhandled at start\n", range->id()); - unhandled_live_ranges_.InsertAt(0, range, zone()); - DCHECK(UnhandledIsSorted()); -} - - -void LAllocator::AddToUnhandledUnsorted(LiveRange* range) { - if (range == NULL || range->IsEmpty()) return; - DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); - TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id()); - unhandled_live_ranges_.Add(range, zone()); -} - - -static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) { - DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) || - !(*b)->ShouldBeAllocatedBefore(*a)); - if ((*a)->ShouldBeAllocatedBefore(*b)) return 1; - if ((*b)->ShouldBeAllocatedBefore(*a)) return -1; - return (*a)->id() - (*b)->id(); -} - - -// Sort the unhandled live ranges so that the ranges to be processed first are -// at the end of the array list. This is convenient for the register allocation -// algorithm because it is efficient to remove elements from the end. -void LAllocator::SortUnhandled() { - TraceAlloc("Sort unhandled\n"); - unhandled_live_ranges_.Sort(&UnhandledSortHelper); -} - - -bool LAllocator::UnhandledIsSorted() { - int len = unhandled_live_ranges_.length(); - for (int i = 1; i < len; i++) { - LiveRange* a = unhandled_live_ranges_.at(i - 1); - LiveRange* b = unhandled_live_ranges_.at(i); - if (a->Start().Value() < b->Start().Value()) return false; - } - return true; -} - - -void LAllocator::FreeSpillSlot(LiveRange* range) { - // Check that we are the last range. - if (range->next() != NULL) return; - - if (!range->TopLevel()->HasAllocatedSpillOperand()) return; - - int index = range->TopLevel()->GetSpillOperand()->index(); - if (index >= 0) { - reusable_slots_.Add(range, zone()); - } -} - - -LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) { - if (reusable_slots_.is_empty()) return NULL; - if (reusable_slots_.first()->End().Value() > - range->TopLevel()->Start().Value()) { - return NULL; - } - LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand(); - reusable_slots_.Remove(0); - return result; -} - - -void LAllocator::ActiveToHandled(LiveRange* range) { - DCHECK(active_live_ranges_.Contains(range)); - active_live_ranges_.RemoveElement(range); - TraceAlloc("Moving live range %d from active to handled\n", range->id()); - FreeSpillSlot(range); -} - - -void LAllocator::ActiveToInactive(LiveRange* range) { - DCHECK(active_live_ranges_.Contains(range)); - active_live_ranges_.RemoveElement(range); - inactive_live_ranges_.Add(range, zone()); - TraceAlloc("Moving live range %d from active to inactive\n", range->id()); -} - - -void LAllocator::InactiveToHandled(LiveRange* range) { - DCHECK(inactive_live_ranges_.Contains(range)); - inactive_live_ranges_.RemoveElement(range); - TraceAlloc("Moving live range %d from inactive to handled\n", range->id()); - FreeSpillSlot(range); -} - - -void LAllocator::InactiveToActive(LiveRange* range) { - DCHECK(inactive_live_ranges_.Contains(range)); - inactive_live_ranges_.RemoveElement(range); - active_live_ranges_.Add(range, zone()); - TraceAlloc("Moving live range %d from inactive to active\n", range->id()); -} - - -bool LAllocator::TryAllocateFreeReg(LiveRange* current) { - DCHECK(DoubleRegister::kMaxNumRegisters >= Register::kNumRegisters); - - LifetimePosition free_until_pos[DoubleRegister::kMaxNumRegisters]; - - for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) { - free_until_pos[i] = LifetimePosition::MaxPosition(); - } - - for (int i = 0; i < active_live_ranges_.length(); ++i) { - LiveRange* cur_active = active_live_ranges_.at(i); - free_until_pos[cur_active->assigned_register()] = - LifetimePosition::FromInstructionIndex(0); - } - - for (int i = 0; i < inactive_live_ranges_.length(); ++i) { - LiveRange* cur_inactive = inactive_live_ranges_.at(i); - DCHECK(cur_inactive->End().Value() > current->Start().Value()); - LifetimePosition next_intersection = - cur_inactive->FirstIntersection(current); - if (!next_intersection.IsValid()) continue; - int cur_reg = cur_inactive->assigned_register(); - free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection); - } - - LOperand* hint = current->FirstHint(); - if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) { - int register_index = hint->index(); - TraceAlloc( - "Found reg hint %s (free until [%d) for live range %d (end %d[).\n", - RegisterName(register_index), - free_until_pos[register_index].Value(), - current->id(), - current->End().Value()); - - // The desired register is free until the end of the current live range. - if (free_until_pos[register_index].Value() >= current->End().Value()) { - TraceAlloc("Assigning preferred reg %s to live range %d\n", - RegisterName(register_index), - current->id()); - SetLiveRangeAssignedRegister(current, register_index); - return true; - } - } - - // Find the register which stays free for the longest time. - int reg = allocatable_register_codes_[0]; - for (int i = 1; i < RegisterCount(); ++i) { - int code = allocatable_register_codes_[i]; - if (free_until_pos[code].Value() > free_until_pos[reg].Value()) { - reg = code; - } - } - - LifetimePosition pos = free_until_pos[reg]; - - if (pos.Value() <= current->Start().Value()) { - // All registers are blocked. - return false; - } - - if (pos.Value() < current->End().Value()) { - // Register reg is available at the range start but becomes blocked before - // the range end. Split current at position where it becomes blocked. - LiveRange* tail = SplitRangeAt(current, pos); - if (!AllocationOk()) return false; - AddToUnhandledSorted(tail); - } - - - // Register reg is available at the range start and is free until - // the range end. - DCHECK(pos.Value() >= current->End().Value()); - TraceAlloc("Assigning free reg %s to live range %d\n", - RegisterName(reg), - current->id()); - SetLiveRangeAssignedRegister(current, reg); - - return true; -} - - -void LAllocator::AllocateBlockedReg(LiveRange* current) { - UsePosition* register_use = current->NextRegisterPosition(current->Start()); - if (register_use == NULL) { - // There is no use in the current live range that requires a register. - // We can just spill it. - Spill(current); - return; - } - - - LifetimePosition use_pos[DoubleRegister::kMaxNumRegisters]; - LifetimePosition block_pos[DoubleRegister::kMaxNumRegisters]; - - for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) { - use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition(); - } - - for (int i = 0; i < active_live_ranges_.length(); ++i) { - LiveRange* range = active_live_ranges_[i]; - int cur_reg = range->assigned_register(); - if (range->IsFixed() || !range->CanBeSpilled(current->Start())) { - block_pos[cur_reg] = use_pos[cur_reg] = - LifetimePosition::FromInstructionIndex(0); - } else { - UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial( - current->Start()); - if (next_use == NULL) { - use_pos[cur_reg] = range->End(); - } else { - use_pos[cur_reg] = next_use->pos(); - } - } - } - - for (int i = 0; i < inactive_live_ranges_.length(); ++i) { - LiveRange* range = inactive_live_ranges_.at(i); - DCHECK(range->End().Value() > current->Start().Value()); - LifetimePosition next_intersection = range->FirstIntersection(current); - if (!next_intersection.IsValid()) continue; - int cur_reg = range->assigned_register(); - if (range->IsFixed()) { - block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection); - use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]); - } else { - use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection); - } - } - - int reg = allocatable_register_codes_[0]; - for (int i = 1; i < RegisterCount(); ++i) { - int code = allocatable_register_codes_[i]; - if (use_pos[code].Value() > use_pos[reg].Value()) { - reg = code; - } - } - - LifetimePosition pos = use_pos[reg]; - - if (pos.Value() < register_use->pos().Value()) { - // All registers are blocked before the first use that requires a register. - // Spill starting part of live range up to that use. - SpillBetween(current, current->Start(), register_use->pos()); - return; - } - - if (block_pos[reg].Value() < current->End().Value()) { - // Register becomes blocked before the current range end. Split before that - // position. - LiveRange* tail = SplitBetween(current, - current->Start(), - block_pos[reg].InstructionStart()); - if (!AllocationOk()) return; - AddToUnhandledSorted(tail); - } - - // Register reg is not blocked for the whole range. - DCHECK(block_pos[reg].Value() >= current->End().Value()); - TraceAlloc("Assigning blocked reg %s to live range %d\n", - RegisterName(reg), - current->id()); - SetLiveRangeAssignedRegister(current, reg); - - // This register was not free. Thus we need to find and spill - // parts of active and inactive live regions that use the same register - // at the same lifetime positions as current. - SplitAndSpillIntersecting(current); -} - - -LifetimePosition LAllocator::FindOptimalSpillingPos(LiveRange* range, - LifetimePosition pos) { - HBasicBlock* block = GetBlock(pos.InstructionStart()); - HBasicBlock* loop_header = - block->IsLoopHeader() ? block : block->parent_loop_header(); - - if (loop_header == NULL) return pos; - - UsePosition* prev_use = - range->PreviousUsePositionRegisterIsBeneficial(pos); - - while (loop_header != NULL) { - // We are going to spill live range inside the loop. - // If possible try to move spilling position backwards to loop header. - // This will reduce number of memory moves on the back edge. - LifetimePosition loop_start = LifetimePosition::FromInstructionIndex( - loop_header->first_instruction_index()); - - if (range->Covers(loop_start)) { - if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) { - // No register beneficial use inside the loop before the pos. - pos = loop_start; - } - } - - // Try hoisting out to an outer loop. - loop_header = loop_header->parent_loop_header(); - } - - return pos; -} - - -void LAllocator::SplitAndSpillIntersecting(LiveRange* current) { - DCHECK(current->HasRegisterAssigned()); - int reg = current->assigned_register(); - LifetimePosition split_pos = current->Start(); - for (int i = 0; i < active_live_ranges_.length(); ++i) { - LiveRange* range = active_live_ranges_[i]; - if (range->assigned_register() == reg) { - UsePosition* next_pos = range->NextRegisterPosition(current->Start()); - LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos); - if (next_pos == NULL) { - SpillAfter(range, spill_pos); - } else { - // When spilling between spill_pos and next_pos ensure that the range - // remains spilled at least until the start of the current live range. - // This guarantees that we will not introduce new unhandled ranges that - // start before the current range as this violates allocation invariant - // and will lead to an inconsistent state of active and inactive - // live-ranges: ranges are allocated in order of their start positions, - // ranges are retired from active/inactive when the start of the - // current live-range is larger than their end. - SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos()); - } - if (!AllocationOk()) return; - ActiveToHandled(range); - --i; - } - } - - for (int i = 0; i < inactive_live_ranges_.length(); ++i) { - LiveRange* range = inactive_live_ranges_[i]; - DCHECK(range->End().Value() > current->Start().Value()); - if (range->assigned_register() == reg && !range->IsFixed()) { - LifetimePosition next_intersection = range->FirstIntersection(current); - if (next_intersection.IsValid()) { - UsePosition* next_pos = range->NextRegisterPosition(current->Start()); - if (next_pos == NULL) { - SpillAfter(range, split_pos); - } else { - next_intersection = Min(next_intersection, next_pos->pos()); - SpillBetween(range, split_pos, next_intersection); - } - if (!AllocationOk()) return; - InactiveToHandled(range); - --i; - } - } - } -} - - -bool LAllocator::IsBlockBoundary(LifetimePosition pos) { - return pos.IsInstructionStart() && - InstructionAt(pos.InstructionIndex())->IsLabel(); -} - - -LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) { - DCHECK(!range->IsFixed()); - TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value()); - - if (pos.Value() <= range->Start().Value()) return range; - - // We can't properly connect liveranges if split occured at the end - // of control instruction. - DCHECK(pos.IsInstructionStart() || - !chunk_->instructions()->at(pos.InstructionIndex())->IsControl()); - - int vreg = GetVirtualRegister(); - if (!AllocationOk()) return NULL; - LiveRange* result = LiveRangeFor(vreg); - range->SplitAt(pos, result, zone()); - return result; -} - - -LiveRange* LAllocator::SplitBetween(LiveRange* range, - LifetimePosition start, - LifetimePosition end) { - DCHECK(!range->IsFixed()); - TraceAlloc("Splitting live range %d in position between [%d, %d]\n", - range->id(), - start.Value(), - end.Value()); - - LifetimePosition split_pos = FindOptimalSplitPos(start, end); - DCHECK(split_pos.Value() >= start.Value()); - return SplitRangeAt(range, split_pos); -} - - -LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start, - LifetimePosition end) { - int start_instr = start.InstructionIndex(); - int end_instr = end.InstructionIndex(); - DCHECK(start_instr <= end_instr); - - // We have no choice - if (start_instr == end_instr) return end; - - HBasicBlock* start_block = GetBlock(start); - HBasicBlock* end_block = GetBlock(end); - - if (end_block == start_block) { - // The interval is split in the same basic block. Split at the latest - // possible position. - return end; - } - - HBasicBlock* block = end_block; - // Find header of outermost loop. - while (block->parent_loop_header() != NULL && - block->parent_loop_header()->block_id() > start_block->block_id()) { - block = block->parent_loop_header(); - } - - // We did not find any suitable outer loop. Split at the latest possible - // position unless end_block is a loop header itself. - if (block == end_block && !end_block->IsLoopHeader()) return end; - - return LifetimePosition::FromInstructionIndex( - block->first_instruction_index()); -} - - -void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) { - LiveRange* second_part = SplitRangeAt(range, pos); - if (!AllocationOk()) return; - Spill(second_part); -} - - -void LAllocator::SpillBetween(LiveRange* range, - LifetimePosition start, - LifetimePosition end) { - SpillBetweenUntil(range, start, start, end); -} - - -void LAllocator::SpillBetweenUntil(LiveRange* range, - LifetimePosition start, - LifetimePosition until, - LifetimePosition end) { - CHECK(start.Value() < end.Value()); - LiveRange* second_part = SplitRangeAt(range, start); - if (!AllocationOk()) return; - - if (second_part->Start().Value() < end.Value()) { - // The split result intersects with [start, end[. - // Split it at position between ]start+1, end[, spill the middle part - // and put the rest to unhandled. - LiveRange* third_part = SplitBetween( - second_part, - Max(second_part->Start().InstructionEnd(), until), - end.PrevInstruction().InstructionEnd()); - if (!AllocationOk()) return; - - DCHECK(third_part != second_part); - - Spill(second_part); - AddToUnhandledSorted(third_part); - } else { - // The split result does not intersect with [start, end[. - // Nothing to spill. Just put it to unhandled as whole. - AddToUnhandledSorted(second_part); - } -} - - -void LAllocator::Spill(LiveRange* range) { - DCHECK(!range->IsSpilled()); - TraceAlloc("Spilling live range %d\n", range->id()); - LiveRange* first = range->TopLevel(); - - if (!first->HasAllocatedSpillOperand()) { - LOperand* op = TryReuseSpillSlot(range); - if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind()); - first->SetSpillOperand(op); - } - range->MakeSpilled(chunk()->zone()); -} - - -int LAllocator::RegisterCount() const { - return num_registers_; -} - - -#ifdef DEBUG - - -void LAllocator::Verify() const { - for (int i = 0; i < live_ranges()->length(); ++i) { - LiveRange* current = live_ranges()->at(i); - if (current != NULL) current->Verify(); - } -} - - -#endif - - -LAllocatorPhase::LAllocatorPhase(const char* name, LAllocator* allocator) - : CompilationPhase(name, allocator->graph()->info()), - allocator_(allocator) { - if (FLAG_hydrogen_stats) { - allocator_zone_start_allocation_size_ = - allocator->zone()->allocation_size(); - } -} - - -LAllocatorPhase::~LAllocatorPhase() { - if (FLAG_hydrogen_stats) { - size_t size = allocator_->zone()->allocation_size() - - allocator_zone_start_allocation_size_; - isolate()->GetHStatistics()->SaveTiming(name(), base::TimeDelta(), size); - } - - if (ShouldProduceTraceOutput()) { - isolate()->GetHTracer()->TraceLithium(name(), allocator_->chunk()); - isolate()->GetHTracer()->TraceLiveRanges(name(), allocator_); - } - -#ifdef DEBUG - if (allocator_ != NULL) allocator_->Verify(); -#endif -} - - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/lithium-allocator.h b/src/crankshaft/lithium-allocator.h deleted file mode 100644 index d28ad7f9e7..0000000000 --- a/src/crankshaft/lithium-allocator.h +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_ -#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_ - -#include "src/allocation.h" -#include "src/base/compiler-specific.h" -#include "src/crankshaft/compilation-phase.h" -#include "src/crankshaft/lithium.h" -#include "src/zone/zone.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class HBasicBlock; -class HGraph; -class HPhi; -class HTracer; -class HValue; -class BitVector; -class StringStream; - -class LPlatformChunk; -class LOperand; -class LUnallocated; -class LGap; -class LParallelMove; -class LPointerMap; - - -// This class represents a single point of a LOperand's lifetime. -// For each lithium instruction there are exactly two lifetime positions: -// the beginning and the end of the instruction. Lifetime positions for -// different lithium instructions are disjoint. -class LifetimePosition { - public: - // Return the lifetime position that corresponds to the beginning of - // the instruction with the given index. - static LifetimePosition FromInstructionIndex(int index) { - return LifetimePosition(index * kStep); - } - - // Returns a numeric representation of this lifetime position. - int Value() const { - return value_; - } - - // Returns the index of the instruction to which this lifetime position - // corresponds. - int InstructionIndex() const { - DCHECK(IsValid()); - return value_ / kStep; - } - - // Returns true if this lifetime position corresponds to the instruction - // start. - bool IsInstructionStart() const { - return (value_ & (kStep - 1)) == 0; - } - - // Returns the lifetime position for the start of the instruction which - // corresponds to this lifetime position. - LifetimePosition InstructionStart() const { - DCHECK(IsValid()); - return LifetimePosition(value_ & ~(kStep - 1)); - } - - // Returns the lifetime position for the end of the instruction which - // corresponds to this lifetime position. - LifetimePosition InstructionEnd() const { - DCHECK(IsValid()); - return LifetimePosition(InstructionStart().Value() + kStep/2); - } - - // Returns the lifetime position for the beginning of the next instruction. - LifetimePosition NextInstruction() const { - DCHECK(IsValid()); - return LifetimePosition(InstructionStart().Value() + kStep); - } - - // Returns the lifetime position for the beginning of the previous - // instruction. - LifetimePosition PrevInstruction() const { - DCHECK(IsValid()); - DCHECK(value_ > 1); - return LifetimePosition(InstructionStart().Value() - kStep); - } - - // Constructs the lifetime position which does not correspond to any - // instruction. - LifetimePosition() : value_(-1) {} - - // Returns true if this lifetime positions corrensponds to some - // instruction. - bool IsValid() const { return value_ != -1; } - - static inline LifetimePosition Invalid() { return LifetimePosition(); } - - static inline LifetimePosition MaxPosition() { - // We have to use this kind of getter instead of static member due to - // crash bug in GDB. - return LifetimePosition(kMaxInt); - } - - private: - static const int kStep = 2; - - // Code relies on kStep being a power of two. - STATIC_ASSERT(IS_POWER_OF_TWO(kStep)); - - explicit LifetimePosition(int value) : value_(value) { } - - int value_; -}; - - -// Representation of the non-empty interval [start,end[. -class UseInterval: public ZoneObject { - public: - UseInterval(LifetimePosition start, LifetimePosition end) - : start_(start), end_(end), next_(NULL) { - DCHECK(start.Value() < end.Value()); - } - - LifetimePosition start() const { return start_; } - LifetimePosition end() const { return end_; } - UseInterval* next() const { return next_; } - - // Split this interval at the given position without effecting the - // live range that owns it. The interval must contain the position. - void SplitAt(LifetimePosition pos, Zone* zone); - - // If this interval intersects with other return smallest position - // that belongs to both of them. - LifetimePosition Intersect(const UseInterval* other) const { - if (other->start().Value() < start_.Value()) return other->Intersect(this); - if (other->start().Value() < end_.Value()) return other->start(); - return LifetimePosition::Invalid(); - } - - bool Contains(LifetimePosition point) const { - return start_.Value() <= point.Value() && point.Value() < end_.Value(); - } - - private: - void set_start(LifetimePosition start) { start_ = start; } - void set_next(UseInterval* next) { next_ = next; } - - LifetimePosition start_; - LifetimePosition end_; - UseInterval* next_; - - friend class LiveRange; // Assigns to start_. -}; - -// Representation of a use position. -class UsePosition: public ZoneObject { - public: - UsePosition(LifetimePosition pos, LOperand* operand, LOperand* hint); - - LOperand* operand() const { return operand_; } - bool HasOperand() const { return operand_ != NULL; } - - LOperand* hint() const { return hint_; } - bool HasHint() const; - bool RequiresRegister() const; - bool RegisterIsBeneficial() const; - - LifetimePosition pos() const { return pos_; } - UsePosition* next() const { return next_; } - - private: - void set_next(UsePosition* next) { next_ = next; } - - LOperand* const operand_; - LOperand* const hint_; - LifetimePosition const pos_; - UsePosition* next_; - bool requires_reg_; - bool register_beneficial_; - - friend class LiveRange; -}; - -// Representation of SSA values' live ranges as a collection of (continuous) -// intervals over the instruction ordering. -class LiveRange: public ZoneObject { - public: - static const int kInvalidAssignment = 0x7fffffff; - - LiveRange(int id, Zone* zone); - - UseInterval* first_interval() const { return first_interval_; } - UsePosition* first_pos() const { return first_pos_; } - LiveRange* parent() const { return parent_; } - LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; } - LiveRange* next() const { return next_; } - bool IsChild() const { return parent() != NULL; } - int id() const { return id_; } - bool IsFixed() const { return id_ < 0; } - bool IsEmpty() const { return first_interval() == NULL; } - LOperand* CreateAssignedOperand(Zone* zone); - int assigned_register() const { return assigned_register_; } - int spill_start_index() const { return spill_start_index_; } - void set_assigned_register(int reg, Zone* zone); - void MakeSpilled(Zone* zone); - - // Returns use position in this live range that follows both start - // and last processed use position. - // Modifies internal state of live range! - UsePosition* NextUsePosition(LifetimePosition start); - - // Returns use position for which register is required in this live - // range and which follows both start and last processed use position - // Modifies internal state of live range! - UsePosition* NextRegisterPosition(LifetimePosition start); - - // Returns use position for which register is beneficial in this live - // range and which follows both start and last processed use position - // Modifies internal state of live range! - UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start); - - // Returns use position for which register is beneficial in this live - // range and which precedes start. - UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start); - - // Can this live range be spilled at this position. - bool CanBeSpilled(LifetimePosition pos); - - // Split this live range at the given position which must follow the start of - // the range. - // All uses following the given position will be moved from this - // live range to the result live range. - void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone); - - RegisterKind Kind() const { return kind_; } - bool HasRegisterAssigned() const { - return assigned_register_ != kInvalidAssignment; - } - bool IsSpilled() const { return spilled_; } - - LOperand* current_hint_operand() const { - DCHECK(current_hint_operand_ == FirstHint()); - return current_hint_operand_; - } - LOperand* FirstHint() const { - UsePosition* pos = first_pos_; - while (pos != NULL && !pos->HasHint()) pos = pos->next(); - if (pos != NULL) return pos->hint(); - return NULL; - } - - LifetimePosition Start() const { - DCHECK(!IsEmpty()); - return first_interval()->start(); - } - - LifetimePosition End() const { - DCHECK(!IsEmpty()); - return last_interval_->end(); - } - - bool HasAllocatedSpillOperand() const; - LOperand* GetSpillOperand() const { return spill_operand_; } - void SetSpillOperand(LOperand* operand); - - void SetSpillStartIndex(int start) { - spill_start_index_ = Min(start, spill_start_index_); - } - - bool ShouldBeAllocatedBefore(const LiveRange* other) const; - bool CanCover(LifetimePosition position) const; - bool Covers(LifetimePosition position); - LifetimePosition FirstIntersection(LiveRange* other); - - // Add a new interval or a new use position to this live range. - void EnsureInterval(LifetimePosition start, - LifetimePosition end, - Zone* zone); - void AddUseInterval(LifetimePosition start, - LifetimePosition end, - Zone* zone); - void AddUsePosition(LifetimePosition pos, - LOperand* operand, - LOperand* hint, - Zone* zone); - - // Shorten the most recently added interval by setting a new start. - void ShortenTo(LifetimePosition start); - -#ifdef DEBUG - // True if target overlaps an existing interval. - bool HasOverlap(UseInterval* target) const; - void Verify() const; -#endif - - private: - void ConvertOperands(Zone* zone); - UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const; - void AdvanceLastProcessedMarker(UseInterval* to_start_of, - LifetimePosition but_not_past) const; - - int id_; - bool spilled_; - RegisterKind kind_; - int assigned_register_; - UseInterval* last_interval_; - UseInterval* first_interval_; - UsePosition* first_pos_; - LiveRange* parent_; - LiveRange* next_; - // This is used as a cache, it doesn't affect correctness. - mutable UseInterval* current_interval_; - UsePosition* last_processed_use_; - // This is used as a cache, it's invalid outside of BuildLiveRanges. - LOperand* current_hint_operand_; - LOperand* spill_operand_; - int spill_start_index_; - - friend class LAllocator; // Assigns to kind_. -}; - - -class LAllocator BASE_EMBEDDED { - public: - LAllocator(int first_virtual_register, HGraph* graph); - - static PRINTF_FORMAT(1, 2) void TraceAlloc(const char* msg, ...); - - // Checks whether the value of a given virtual register is tagged. - bool HasTaggedValue(int virtual_register) const; - - // Returns the register kind required by the given virtual register. - RegisterKind RequiredRegisterKind(int virtual_register) const; - - bool Allocate(LChunk* chunk); - - const ZoneList* live_ranges() const { return &live_ranges_; } - const Vector* fixed_live_ranges() const { - return &fixed_live_ranges_; - } - const Vector* fixed_double_live_ranges() const { - return &fixed_double_live_ranges_; - } - - LPlatformChunk* chunk() const { return chunk_; } - HGraph* graph() const { return graph_; } - Isolate* isolate() const { return graph_->isolate(); } - Zone* zone() { return &zone_; } - - int GetVirtualRegister() { - if (next_virtual_register_ >= LUnallocated::kMaxVirtualRegisters) { - allocation_ok_ = false; - // Maintain the invariant that we return something below the maximum. - return 0; - } - return next_virtual_register_++; - } - - bool AllocationOk() { return allocation_ok_; } - - void MarkAsOsrEntry() { - // There can be only one. - DCHECK(!has_osr_entry_); - // Simply set a flag to find and process instruction later. - has_osr_entry_ = true; - } - -#ifdef DEBUG - void Verify() const; -#endif - - BitVector* assigned_registers() { - return assigned_registers_; - } - BitVector* assigned_double_registers() { - return assigned_double_registers_; - } - - private: - void MeetRegisterConstraints(); - void ResolvePhis(); - void BuildLiveRanges(); - void AllocateGeneralRegisters(); - void AllocateDoubleRegisters(); - void ConnectRanges(); - void ResolveControlFlow(); - void PopulatePointerMaps(); - void AllocateRegisters(); - bool CanEagerlyResolveControlFlow(HBasicBlock* block) const; - inline bool SafePointsAreInOrder() const; - - // Liveness analysis support. - void InitializeLivenessAnalysis(); - BitVector* ComputeLiveOut(HBasicBlock* block); - void AddInitialIntervals(HBasicBlock* block, BitVector* live_out); - void ProcessInstructions(HBasicBlock* block, BitVector* live); - void MeetRegisterConstraints(HBasicBlock* block); - void MeetConstraintsBetween(LInstruction* first, - LInstruction* second, - int gap_index); - void ResolvePhis(HBasicBlock* block); - - // Helper methods for building intervals. - LOperand* AllocateFixed(LUnallocated* operand, int pos, bool is_tagged); - LiveRange* LiveRangeFor(LOperand* operand); - void Define(LifetimePosition position, LOperand* operand, LOperand* hint); - void Use(LifetimePosition block_start, - LifetimePosition position, - LOperand* operand, - LOperand* hint); - void AddConstraintsGapMove(int index, LOperand* from, LOperand* to); - - // Helper methods for updating the life range lists. - void AddToActive(LiveRange* range); - void AddToInactive(LiveRange* range); - void AddToUnhandledSorted(LiveRange* range); - void AddToUnhandledUnsorted(LiveRange* range); - void SortUnhandled(); - bool UnhandledIsSorted(); - void ActiveToHandled(LiveRange* range); - void ActiveToInactive(LiveRange* range); - void InactiveToHandled(LiveRange* range); - void InactiveToActive(LiveRange* range); - void FreeSpillSlot(LiveRange* range); - LOperand* TryReuseSpillSlot(LiveRange* range); - - // Helper methods for allocating registers. - bool TryAllocateFreeReg(LiveRange* range); - void AllocateBlockedReg(LiveRange* range); - - // Live range splitting helpers. - - // Split the given range at the given position. - // If range starts at or after the given position then the - // original range is returned. - // Otherwise returns the live range that starts at pos and contains - // all uses from the original range that follow pos. Uses at pos will - // still be owned by the original range after splitting. - LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos); - - // Split the given range in a position from the interval [start, end]. - LiveRange* SplitBetween(LiveRange* range, - LifetimePosition start, - LifetimePosition end); - - // Find a lifetime position in the interval [start, end] which - // is optimal for splitting: it is either header of the outermost - // loop covered by this interval or the latest possible position. - LifetimePosition FindOptimalSplitPos(LifetimePosition start, - LifetimePosition end); - - // Spill the given life range after position pos. - void SpillAfter(LiveRange* range, LifetimePosition pos); - - // Spill the given life range after position [start] and up to position [end]. - void SpillBetween(LiveRange* range, - LifetimePosition start, - LifetimePosition end); - - // Spill the given life range after position [start] and up to position [end]. - // Range is guaranteed to be spilled at least until position [until]. - void SpillBetweenUntil(LiveRange* range, - LifetimePosition start, - LifetimePosition until, - LifetimePosition end); - - void SplitAndSpillIntersecting(LiveRange* range); - - // If we are trying to spill a range inside the loop try to - // hoist spill position out to the point just before the loop. - LifetimePosition FindOptimalSpillingPos(LiveRange* range, - LifetimePosition pos); - - void Spill(LiveRange* range); - bool IsBlockBoundary(LifetimePosition pos); - - // Helper methods for resolving control flow. - void ResolveControlFlow(LiveRange* range, - HBasicBlock* block, - HBasicBlock* pred); - - inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg); - - // Return parallel move that should be used to connect ranges split at the - // given position. - LParallelMove* GetConnectingParallelMove(LifetimePosition pos); - - // Return the block which contains give lifetime position. - HBasicBlock* GetBlock(LifetimePosition pos); - - // Helper methods for the fixed registers. - int RegisterCount() const; - static int FixedLiveRangeID(int index) { return -index - 1; } - static int FixedDoubleLiveRangeID(int index); - LiveRange* FixedLiveRangeFor(int index); - LiveRange* FixedDoubleLiveRangeFor(int index); - LiveRange* LiveRangeFor(int index); - HPhi* LookupPhi(LOperand* operand) const; - LGap* GetLastGap(HBasicBlock* block); - - const char* RegisterName(int allocation_index); - - inline bool IsGapAt(int index); - - inline LInstruction* InstructionAt(int index); - - inline LGap* GapAt(int index); - - Zone zone_; - - LPlatformChunk* chunk_; - - // During liveness analysis keep a mapping from block id to live_in sets - // for blocks already analyzed. - ZoneList live_in_sets_; - - // Liveness analysis results. - ZoneList live_ranges_; - - // Lists of live ranges - EmbeddedVector fixed_live_ranges_; - EmbeddedVector - fixed_double_live_ranges_; - ZoneList unhandled_live_ranges_; - ZoneList active_live_ranges_; - ZoneList inactive_live_ranges_; - ZoneList reusable_slots_; - - // Next virtual register number to be assigned to temporaries. - int next_virtual_register_; - int first_artificial_register_; - GrowableBitVector double_artificial_registers_; - - RegisterKind mode_; - int num_registers_; - const int* allocatable_register_codes_; - - BitVector* assigned_registers_; - BitVector* assigned_double_registers_; - - HGraph* graph_; - - bool has_osr_entry_; - - // Indicates success or failure during register allocation. - bool allocation_ok_; - -#ifdef DEBUG - LifetimePosition allocation_finger_; -#endif - - DISALLOW_COPY_AND_ASSIGN(LAllocator); -}; - - -class LAllocatorPhase : public CompilationPhase { - public: - LAllocatorPhase(const char* name, LAllocator* allocator); - ~LAllocatorPhase(); - - private: - LAllocator* allocator_; - size_t allocator_zone_start_allocation_size_; - - DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_ diff --git a/src/crankshaft/lithium-codegen.cc b/src/crankshaft/lithium-codegen.cc deleted file mode 100644 index ad39866feb..0000000000 --- a/src/crankshaft/lithium-codegen.cc +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/lithium-codegen.h" - -#include - -#include "src/objects-inl.h" - -#if V8_TARGET_ARCH_IA32 -#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT -#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT -#elif V8_TARGET_ARCH_X64 -#include "src/crankshaft/x64/lithium-x64.h" // NOLINT -#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM -#include "src/crankshaft/arm/lithium-arm.h" // NOLINT -#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT -#elif V8_TARGET_ARCH_ARM64 -#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT -#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS -#include "src/crankshaft/mips/lithium-mips.h" // NOLINT -#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS64 -#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT -#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_X87 -#include "src/crankshaft/x87/lithium-x87.h" // NOLINT -#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT -#elif V8_TARGET_ARCH_PPC -#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT -#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT -#elif V8_TARGET_ARCH_S390 -#include "src/crankshaft/s390/lithium-s390.h" // NOLINT -#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT -#else -#error Unsupported target architecture. -#endif - -#include "src/globals.h" - -namespace v8 { -namespace internal { - - -HGraph* LCodeGenBase::graph() const { - return chunk()->graph(); -} - -LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler, - CompilationInfo* info) - : chunk_(static_cast(chunk)), - masm_(assembler), - info_(info), - zone_(info->zone()), - status_(UNUSED), - current_block_(-1), - current_instruction_(-1), - instructions_(chunk->instructions()), - deoptimizations_(4, info->zone()), - deoptimization_literals_(8, info->zone()), - translations_(info->zone()), - inlined_function_count_(0), - last_lazy_deopt_pc_(0), - osr_pc_offset_(-1), - source_position_table_builder_(info->zone(), - info->SourcePositionRecordingMode()) {} - -Isolate* LCodeGenBase::isolate() const { return info_->isolate(); } - -bool LCodeGenBase::GenerateBody() { - DCHECK(is_generating()); - bool emit_instructions = true; - LCodeGen* codegen = static_cast(this); - for (current_instruction_ = 0; - !is_aborted() && current_instruction_ < instructions_->length(); - current_instruction_++) { - LInstruction* instr = instructions_->at(current_instruction_); - - // Don't emit code for basic blocks with a replacement. - if (instr->IsLabel()) { - emit_instructions = !LLabel::cast(instr)->HasReplacement() && - (!FLAG_unreachable_code_elimination || - instr->hydrogen_value()->block()->IsReachable()); - if (FLAG_code_comments && !emit_instructions) { - Comment( - ";;; <@%d,#%d> -------------------- B%d (unreachable/replaced) " - "--------------------", - current_instruction_, - instr->hydrogen_value()->id(), - instr->hydrogen_value()->block()->block_id()); - } - } - if (!emit_instructions) continue; - - if (FLAG_code_comments && instr->HasInterestingComment(codegen)) { - Comment(";;; <@%d,#%d> %s", - current_instruction_, - instr->hydrogen_value()->id(), - instr->Mnemonic()); - } - - GenerateBodyInstructionPre(instr); - - HValue* value = instr->hydrogen_value(); - if (value->position().IsKnown()) { - RecordAndWritePosition(value->position()); - } - - instr->CompileToNative(codegen); - - GenerateBodyInstructionPost(instr); - } - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - last_lazy_deopt_pc_ = masm()->pc_offset(); - return !is_aborted(); -} - - -void LCodeGenBase::CheckEnvironmentUsage() { -#ifdef DEBUG - bool dead_block = false; - for (int i = 0; i < instructions_->length(); i++) { - LInstruction* instr = instructions_->at(i); - HValue* hval = instr->hydrogen_value(); - if (instr->IsLabel()) dead_block = LLabel::cast(instr)->HasReplacement(); - if (dead_block || !hval->block()->IsReachable()) continue; - - HInstruction* hinstr = HInstruction::cast(hval); - if (!hinstr->CanDeoptimize() && instr->HasEnvironment()) { - V8_Fatal(__FILE__, __LINE__, "CanDeoptimize is wrong for %s (%s)", - hinstr->Mnemonic(), instr->Mnemonic()); - } - - if (instr->HasEnvironment() && !instr->environment()->has_been_used()) { - V8_Fatal(__FILE__, __LINE__, "unused environment for %s (%s)", - hinstr->Mnemonic(), instr->Mnemonic()); - } - } -#endif -} - -void LCodeGenBase::RecordAndWritePosition(SourcePosition pos) { - if (!pos.IsKnown()) return; - source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false); -} - -void LCodeGenBase::Comment(const char* format, ...) { - if (!FLAG_code_comments) return; - char buffer[4 * KB]; - StringBuilder builder(buffer, arraysize(buffer)); - va_list arguments; - va_start(arguments, format); - builder.AddFormattedList(format, arguments); - va_end(arguments); - - // Copy the string before recording it in the assembler to avoid - // issues when the stack allocated buffer goes out of scope. - size_t length = builder.position(); - Vector copy = Vector::New(static_cast(length) + 1); - MemCopy(copy.start(), builder.Finalize(), copy.length()); - masm()->RecordComment(copy.start()); -} - - -void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) { - SourcePosition position = deopt_info.position; - int deopt_id = deopt_info.deopt_id; - if (masm()->isolate()->NeedsSourcePositionsForProfiling()) { - masm()->RecordDeoptReason(deopt_info.deopt_reason, position, deopt_id); - } -} - - -int LCodeGenBase::GetNextEmittedBlock() const { - for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { - if (!graph()->blocks()->at(i)->IsReachable()) continue; - if (!chunk_->GetLabel(i)->HasReplacement()) return i; - } - return -1; -} - - -void LCodeGenBase::Abort(BailoutReason reason) { - info()->AbortOptimization(reason); - status_ = ABORTED; -} - - -void LCodeGenBase::Retry(BailoutReason reason) { - info()->RetryOptimization(reason); - status_ = ABORTED; -} - - -void LCodeGenBase::AddDeprecationDependency(Handle map) { - if (map->is_deprecated()) return Retry(kMapBecameDeprecated); - chunk_->AddDeprecationDependency(map); -} - - -void LCodeGenBase::AddStabilityDependency(Handle map) { - if (!map->is_stable()) return Retry(kMapBecameUnstable); - chunk_->AddStabilityDependency(map); -} - - -int LCodeGenBase::DefineDeoptimizationLiteral(Handle literal) { - int result = deoptimization_literals_.length(); - for (int i = 0; i < deoptimization_literals_.length(); ++i) { - if (deoptimization_literals_[i].is_identical_to(literal)) return i; - } - deoptimization_literals_.Add(literal, zone()); - return result; -} - - -void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment, - Translation* translation) { - int translation_size = environment->translation_size(); -#ifdef DEBUG - // The output frame height does not include the parameters. - int height = translation_size - environment->parameter_count(); -#endif // DEBUG - - switch (environment->frame_type()) { - case JS_FUNCTION: { - UNREACHABLE(); - break; - } - case JS_CONSTRUCT: { - int shared_id = DefineDeoptimizationLiteral( - environment->entry() ? environment->entry()->shared() - : info()->shared_info()); - translation->BeginConstructStubFrame(BailoutId::ConstructStubInvoke(), - shared_id, translation_size); - if (info()->closure().is_identical_to(environment->closure())) { - translation->StoreJSFrameFunction(); - } else { - int closure_id = DefineDeoptimizationLiteral(environment->closure()); - translation->StoreLiteral(closure_id); - } - break; - } - case JS_GETTER: { - DCHECK_EQ(1, translation_size); - DCHECK_EQ(0, height); - int shared_id = DefineDeoptimizationLiteral( - environment->entry() ? environment->entry()->shared() - : info()->shared_info()); - translation->BeginGetterStubFrame(shared_id); - if (info()->closure().is_identical_to(environment->closure())) { - translation->StoreJSFrameFunction(); - } else { - int closure_id = DefineDeoptimizationLiteral(environment->closure()); - translation->StoreLiteral(closure_id); - } - break; - } - case JS_SETTER: { - DCHECK_EQ(2, translation_size); - DCHECK_EQ(0, height); - int shared_id = DefineDeoptimizationLiteral( - environment->entry() ? environment->entry()->shared() - : info()->shared_info()); - translation->BeginSetterStubFrame(shared_id); - if (info()->closure().is_identical_to(environment->closure())) { - translation->StoreJSFrameFunction(); - } else { - int closure_id = DefineDeoptimizationLiteral(environment->closure()); - translation->StoreLiteral(closure_id); - } - break; - } - case TAIL_CALLER_FUNCTION: { - DCHECK_EQ(0, translation_size); - int shared_id = DefineDeoptimizationLiteral( - environment->entry() ? environment->entry()->shared() - : info()->shared_info()); - translation->BeginTailCallerFrame(shared_id); - if (info()->closure().is_identical_to(environment->closure())) { - translation->StoreJSFrameFunction(); - } else { - int closure_id = DefineDeoptimizationLiteral(environment->closure()); - translation->StoreLiteral(closure_id); - } - break; - } - case ARGUMENTS_ADAPTOR: { - int shared_id = DefineDeoptimizationLiteral( - environment->entry() ? environment->entry()->shared() - : info()->shared_info()); - translation->BeginArgumentsAdaptorFrame(shared_id, translation_size); - if (info()->closure().is_identical_to(environment->closure())) { - translation->StoreJSFrameFunction(); - } else { - int closure_id = DefineDeoptimizationLiteral(environment->closure()); - translation->StoreLiteral(closure_id); - } - break; - } - case STUB: - translation->BeginCompiledStubFrame(translation_size); - break; - } -} - -namespace { - -Handle> CreateInliningPositions( - CompilationInfo* info) { - const CompilationInfo::InlinedFunctionList& inlined_functions = - info->inlined_functions(); - if (inlined_functions.size() == 0) { - return Handle>::cast( - info->isolate()->factory()->empty_byte_array()); - } - Handle> inl_positions = - PodArray::New( - info->isolate(), static_cast(inlined_functions.size()), TENURED); - for (size_t i = 0; i < inlined_functions.size(); ++i) { - inl_positions->set(static_cast(i), inlined_functions[i].position); - } - return inl_positions; -} - -} // namespace - -void LCodeGenBase::PopulateDeoptimizationData(Handle code) { - int length = deoptimizations_.length(); - if (length == 0) return; - Handle data = - DeoptimizationInputData::New(isolate(), length, TENURED); - - Handle translations = - translations_.CreateByteArray(isolate()->factory()); - data->SetTranslationByteArray(*translations); - data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); - data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); - if (info_->IsOptimizing()) { - // Reference to shared function info does not change between phases. - AllowDeferredHandleDereference allow_handle_dereference; - data->SetSharedFunctionInfo(*info_->shared_info()); - } else { - data->SetSharedFunctionInfo(Smi::kZero); - } - data->SetWeakCellCache(Smi::kZero); - - Handle literals = - factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); - { - AllowDeferredHandleDereference copy_handles; - for (int i = 0; i < deoptimization_literals_.length(); i++) { - literals->set(i, *deoptimization_literals_[i]); - } - data->SetLiteralArray(*literals); - } - - Handle> inl_pos = CreateInliningPositions(info_); - data->SetInliningPositions(*inl_pos); - - data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); - data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); - - // Populate the deoptimization entries. - for (int i = 0; i < length; i++) { - LEnvironment* env = deoptimizations_[i]; - data->SetAstId(i, env->ast_id()); - data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); - data->SetArgumentsStackHeight(i, - Smi::FromInt(env->arguments_stack_height())); - data->SetPc(i, Smi::FromInt(env->pc_offset())); - } - code->set_deoptimization_data(*data); -} - - -void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() { - DCHECK_EQ(0, deoptimization_literals_.length()); - for (CompilationInfo::InlinedFunctionHolder& inlined : - info()->inlined_functions()) { - if (!inlined.shared_info.is_identical_to(info()->shared_info())) { - int index = DefineDeoptimizationLiteral(inlined.shared_info); - inlined.RegisterInlinedFunctionId(index); - } - } - inlined_function_count_ = deoptimization_literals_.length(); -} - -Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo( - LInstruction* instr, DeoptimizeReason deopt_reason, int deopt_id) { - Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(), - deopt_reason, deopt_id); - return deopt_info; -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/lithium-codegen.h b/src/crankshaft/lithium-codegen.h deleted file mode 100644 index 03ece53bf4..0000000000 --- a/src/crankshaft/lithium-codegen.h +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_LITHIUM_CODEGEN_H_ -#define V8_CRANKSHAFT_LITHIUM_CODEGEN_H_ - -#include "src/bailout-reason.h" -#include "src/deoptimizer.h" -#include "src/source-position-table.h" - -namespace v8 { -namespace internal { - -class CompilationInfo; -class HGraph; -class LChunk; -class LEnvironment; -class LInstruction; -class LPlatformChunk; - -class LCodeGenBase BASE_EMBEDDED { - public: - LCodeGenBase(LChunk* chunk, - MacroAssembler* assembler, - CompilationInfo* info); - virtual ~LCodeGenBase() {} - - // Simple accessors. - MacroAssembler* masm() const { return masm_; } - CompilationInfo* info() const { return info_; } - Isolate* isolate() const; - Factory* factory() const { return isolate()->factory(); } - Heap* heap() const { return isolate()->heap(); } - Zone* zone() const { return zone_; } - LPlatformChunk* chunk() const { return chunk_; } - HGraph* graph() const; - SourcePositionTableBuilder* source_position_table_builder() { - return &source_position_table_builder_; - } - - void PRINTF_FORMAT(2, 3) Comment(const char* format, ...); - void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info); - static Deoptimizer::DeoptInfo MakeDeoptInfo(LInstruction* instr, - DeoptimizeReason deopt_reason, - int deopt_id); - - bool GenerateBody(); - virtual void GenerateBodyInstructionPre(LInstruction* instr) {} - virtual void GenerateBodyInstructionPost(LInstruction* instr) {} - - virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0; - void RecordAndWritePosition(SourcePosition position); - - int GetNextEmittedBlock() const; - - void WriteTranslationFrame(LEnvironment* environment, - Translation* translation); - int DefineDeoptimizationLiteral(Handle literal); - - void PopulateDeoptimizationData(Handle code); - void PopulateDeoptimizationLiteralsWithInlinedFunctions(); - - // Check that an environment assigned via AssignEnvironment is actually being - // used. Redundant assignments keep things alive longer than necessary, and - // consequently lead to worse code, so it's important to minimize this. - void CheckEnvironmentUsage(); - - protected: - enum Status { - UNUSED, - GENERATING, - DONE, - ABORTED - }; - - LPlatformChunk* const chunk_; - MacroAssembler* const masm_; - CompilationInfo* const info_; - Zone* zone_; - Status status_; - int current_block_; - int current_instruction_; - const ZoneList* instructions_; - ZoneList deoptimizations_; - ZoneList > deoptimization_literals_; - TranslationBuffer translations_; - int inlined_function_count_; - int last_lazy_deopt_pc_; - int osr_pc_offset_; - SourcePositionTableBuilder source_position_table_builder_; - - bool is_unused() const { return status_ == UNUSED; } - bool is_generating() const { return status_ == GENERATING; } - bool is_done() const { return status_ == DONE; } - bool is_aborted() const { return status_ == ABORTED; } - - void Abort(BailoutReason reason); - void Retry(BailoutReason reason); - - // Methods for code dependencies. - void AddDeprecationDependency(Handle map); - void AddStabilityDependency(Handle map); -}; - - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_LITHIUM_CODEGEN_H_ diff --git a/src/crankshaft/lithium-inl.h b/src/crankshaft/lithium-inl.h deleted file mode 100644 index 938588e396..0000000000 --- a/src/crankshaft/lithium-inl.h +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_LITHIUM_INL_H_ -#define V8_CRANKSHAFT_LITHIUM_INL_H_ - -#include "src/crankshaft/lithium.h" - -#if V8_TARGET_ARCH_IA32 -#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT -#elif V8_TARGET_ARCH_X64 -#include "src/crankshaft/x64/lithium-x64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM64 -#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM -#include "src/crankshaft/arm/lithium-arm.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS -#include "src/crankshaft/mips/lithium-mips.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS64 -#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_PPC -#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT -#elif V8_TARGET_ARCH_S390 -#include "src/crankshaft/s390/lithium-s390.h" // NOLINT -#elif V8_TARGET_ARCH_X87 -#include "src/crankshaft/x87/lithium-x87.h" // NOLINT -#else -#error "Unknown architecture." -#endif - -namespace v8 { -namespace internal { - -TempIterator::TempIterator(LInstruction* instr) - : instr_(instr), limit_(instr->TempCount()), current_(0) { - SkipUninteresting(); -} - - -bool TempIterator::Done() { return current_ >= limit_; } - - -LOperand* TempIterator::Current() { - DCHECK(!Done()); - return instr_->TempAt(current_); -} - - -void TempIterator::SkipUninteresting() { - while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_; -} - - -void TempIterator::Advance() { - ++current_; - SkipUninteresting(); -} - - -InputIterator::InputIterator(LInstruction* instr) - : instr_(instr), limit_(instr->InputCount()), current_(0) { - SkipUninteresting(); -} - - -bool InputIterator::Done() { return current_ >= limit_; } - - -LOperand* InputIterator::Current() { - DCHECK(!Done()); - DCHECK(instr_->InputAt(current_) != NULL); - return instr_->InputAt(current_); -} - - -void InputIterator::Advance() { - ++current_; - SkipUninteresting(); -} - - -void InputIterator::SkipUninteresting() { - while (current_ < limit_) { - LOperand* current = instr_->InputAt(current_); - if (current != NULL && !current->IsConstantOperand()) break; - ++current_; - } -} - - -UseIterator::UseIterator(LInstruction* instr) - : input_iterator_(instr), env_iterator_(instr->environment()) {} - - -bool UseIterator::Done() { - return input_iterator_.Done() && env_iterator_.Done(); -} - - -LOperand* UseIterator::Current() { - DCHECK(!Done()); - LOperand* result = input_iterator_.Done() ? env_iterator_.Current() - : input_iterator_.Current(); - DCHECK(result != NULL); - return result; -} - - -void UseIterator::Advance() { - input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance(); -} -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_LITHIUM_INL_H_ diff --git a/src/crankshaft/lithium.cc b/src/crankshaft/lithium.cc deleted file mode 100644 index 5f0e9e386d..0000000000 --- a/src/crankshaft/lithium.cc +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/lithium.h" - -#include "src/ast/scopes.h" -#include "src/codegen.h" -#include "src/objects-inl.h" - -#if V8_TARGET_ARCH_IA32 -#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT -#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT -#elif V8_TARGET_ARCH_X64 -#include "src/crankshaft/x64/lithium-x64.h" // NOLINT -#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM -#include "src/crankshaft/arm/lithium-arm.h" // NOLINT -#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT -#elif V8_TARGET_ARCH_PPC -#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT -#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS -#include "src/crankshaft/mips/lithium-mips.h" // NOLINT -#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT -#elif V8_TARGET_ARCH_ARM64 -#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT -#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS64 -#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT -#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_X87 -#include "src/crankshaft/x87/lithium-x87.h" // NOLINT -#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT -#elif V8_TARGET_ARCH_S390 -#include "src/crankshaft/s390/lithium-s390.h" // NOLINT -#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT -#else -#error "Unknown architecture." -#endif - -namespace v8 { -namespace internal { - -const auto GetRegConfig = RegisterConfiguration::Crankshaft; - -void LOperand::PrintTo(StringStream* stream) { - LUnallocated* unalloc = NULL; - switch (kind()) { - case INVALID: - stream->Add("(0)"); - break; - case UNALLOCATED: - unalloc = LUnallocated::cast(this); - stream->Add("v%d", unalloc->virtual_register()); - if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) { - stream->Add("(=%dS)", unalloc->fixed_slot_index()); - break; - } - switch (unalloc->extended_policy()) { - case LUnallocated::NONE: - break; - case LUnallocated::FIXED_REGISTER: { - int reg_index = unalloc->fixed_register_index(); - if (reg_index < 0 || reg_index >= Register::kNumRegisters) { - stream->Add("(=invalid_reg#%d)", reg_index); - } else { - const char* register_name = - GetRegConfig()->GetGeneralRegisterName(reg_index); - stream->Add("(=%s)", register_name); - } - break; - } - case LUnallocated::FIXED_DOUBLE_REGISTER: { - int reg_index = unalloc->fixed_register_index(); - if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) { - stream->Add("(=invalid_double_reg#%d)", reg_index); - } else { - const char* double_register_name = - GetRegConfig()->GetDoubleRegisterName(reg_index); - stream->Add("(=%s)", double_register_name); - } - break; - } - case LUnallocated::MUST_HAVE_REGISTER: - stream->Add("(R)"); - break; - case LUnallocated::MUST_HAVE_DOUBLE_REGISTER: - stream->Add("(D)"); - break; - case LUnallocated::WRITABLE_REGISTER: - stream->Add("(WR)"); - break; - case LUnallocated::SAME_AS_FIRST_INPUT: - stream->Add("(1)"); - break; - case LUnallocated::ANY: - stream->Add("(-)"); - break; - } - break; - case CONSTANT_OPERAND: - stream->Add("[constant:%d]", index()); - break; - case STACK_SLOT: - stream->Add("[stack:%d]", index()); - break; - case DOUBLE_STACK_SLOT: - stream->Add("[double_stack:%d]", index()); - break; - case REGISTER: { - int reg_index = index(); - if (reg_index < 0 || reg_index >= Register::kNumRegisters) { - stream->Add("(=invalid_reg#%d|R)", reg_index); - } else { - stream->Add("[%s|R]", - GetRegConfig()->GetGeneralRegisterName(reg_index)); - } - break; - } - case DOUBLE_REGISTER: { - int reg_index = index(); - if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) { - stream->Add("(=invalid_double_reg#%d|R)", reg_index); - } else { - stream->Add("[%s|R]", GetRegConfig()->GetDoubleRegisterName(reg_index)); - } - break; - } - } -} - - -template -LSubKindOperand* -LSubKindOperand::cache = NULL; - - -template -void LSubKindOperand::SetUpCache() { - if (cache) return; - cache = new LSubKindOperand[kNumCachedOperands]; - for (int i = 0; i < kNumCachedOperands; i++) { - cache[i].ConvertTo(kOperandKind, i); - } -} - - -template -void LSubKindOperand::TearDownCache() { - delete[] cache; - cache = NULL; -} - - -void LOperand::SetUpCaches() { -#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache(); - LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP) -#undef LITHIUM_OPERAND_SETUP -} - - -void LOperand::TearDownCaches() { -#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache(); - LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN) -#undef LITHIUM_OPERAND_TEARDOWN -} - - -bool LParallelMove::IsRedundant() const { - for (int i = 0; i < move_operands_.length(); ++i) { - if (!move_operands_[i].IsRedundant()) return false; - } - return true; -} - - -void LParallelMove::PrintDataTo(StringStream* stream) const { - bool first = true; - for (int i = 0; i < move_operands_.length(); ++i) { - if (!move_operands_[i].IsEliminated()) { - LOperand* source = move_operands_[i].source(); - LOperand* destination = move_operands_[i].destination(); - if (!first) stream->Add(" "); - first = false; - if (source->Equals(destination)) { - destination->PrintTo(stream); - } else { - destination->PrintTo(stream); - stream->Add(" = "); - source->PrintTo(stream); - } - stream->Add(";"); - } - } -} - - -void LEnvironment::PrintTo(StringStream* stream) { - stream->Add("[id=%d|", ast_id().ToInt()); - if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) { - stream->Add("deopt_id=%d|", deoptimization_index()); - } - stream->Add("parameters=%d|", parameter_count()); - stream->Add("arguments_stack_height=%d|", arguments_stack_height()); - for (int i = 0; i < values_.length(); ++i) { - if (i != 0) stream->Add(";"); - if (values_[i] == NULL) { - stream->Add("[hole]"); - } else { - values_[i]->PrintTo(stream); - } - } - stream->Add("]"); -} - - -void LPointerMap::RecordPointer(LOperand* op, Zone* zone) { - // Do not record arguments as pointers. - if (op->IsStackSlot() && op->index() < 0) return; - DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); - pointer_operands_.Add(op, zone); -} - - -void LPointerMap::RemovePointer(LOperand* op) { - // Do not record arguments as pointers. - if (op->IsStackSlot() && op->index() < 0) return; - DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); - for (int i = 0; i < pointer_operands_.length(); ++i) { - if (pointer_operands_[i]->Equals(op)) { - pointer_operands_.Remove(i); - --i; - } - } -} - - -void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) { - // Do not record arguments as pointers. - if (op->IsStackSlot() && op->index() < 0) return; - DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); - untagged_operands_.Add(op, zone); -} - - -void LPointerMap::PrintTo(StringStream* stream) { - stream->Add("{"); - for (int i = 0; i < pointer_operands_.length(); ++i) { - if (i != 0) stream->Add(";"); - pointer_operands_[i]->PrintTo(stream); - } - stream->Add("}"); -} - -LChunk::LChunk(CompilationInfo* info, HGraph* graph) - : base_frame_slots_(info->IsStub() - ? TypedFrameConstants::kFixedSlotCount - : StandardFrameConstants::kFixedSlotCount), - current_frame_slots_(base_frame_slots_), - info_(info), - graph_(graph), - instructions_(32, info->zone()), - pointer_maps_(8, info->zone()), - deprecation_dependencies_(32, info->zone()), - stability_dependencies_(8, info->zone()) {} - -LLabel* LChunk::GetLabel(int block_id) const { - HBasicBlock* block = graph_->blocks()->at(block_id); - int first_instruction = block->first_instruction_index(); - return LLabel::cast(instructions_[first_instruction]); -} - - -int LChunk::LookupDestination(int block_id) const { - LLabel* cur = GetLabel(block_id); - while (cur->replacement() != NULL) { - cur = cur->replacement(); - } - return cur->block_id(); -} - -Label* LChunk::GetAssemblyLabel(int block_id) const { - LLabel* label = GetLabel(block_id); - DCHECK(!label->HasReplacement()); - return label->label(); -} - - -void LChunk::MarkEmptyBlocks() { - LPhase phase("L_Mark empty blocks", this); - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - int first = block->first_instruction_index(); - int last = block->last_instruction_index(); - LInstruction* first_instr = instructions()->at(first); - LInstruction* last_instr = instructions()->at(last); - - LLabel* label = LLabel::cast(first_instr); - if (last_instr->IsGoto()) { - LGoto* goto_instr = LGoto::cast(last_instr); - if (label->IsRedundant() && - !label->is_loop_header()) { - bool can_eliminate = true; - for (int i = first + 1; i < last && can_eliminate; ++i) { - LInstruction* cur = instructions()->at(i); - if (cur->IsGap()) { - LGap* gap = LGap::cast(cur); - if (!gap->IsRedundant()) { - can_eliminate = false; - } - } else { - can_eliminate = false; - } - } - if (can_eliminate) { - label->set_replacement(GetLabel(goto_instr->block_id())); - } - } - } - } -} - - -void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { - LInstructionGap* gap = new (zone()) LInstructionGap(block); - gap->set_hydrogen_value(instr->hydrogen_value()); - int index = -1; - if (instr->IsControl()) { - instructions_.Add(gap, zone()); - index = instructions_.length(); - instructions_.Add(instr, zone()); - } else { - index = instructions_.length(); - instructions_.Add(instr, zone()); - instructions_.Add(gap, zone()); - } - if (instr->HasPointerMap()) { - pointer_maps_.Add(instr->pointer_map(), zone()); - instr->pointer_map()->set_lithium_position(index); - } -} - -LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) { - return LConstantOperand::Create(constant->id(), zone()); -} - - -int LChunk::GetParameterStackSlot(int index) const { - // The receiver is at index 0, the first parameter at index 1, so we - // shift all parameter indexes down by the number of parameters, and - // make sure they end up negative so they are distinguishable from - // spill slots. - int result = index - info()->num_parameters() - 1; - - DCHECK(result < 0); - return result; -} - - -// A parameter relative to ebp in the arguments stub. -int LChunk::ParameterAt(int index) { - DCHECK(-1 <= index); // -1 is the receiver. - return (1 + info()->scope()->num_parameters() - index) * - kPointerSize; -} - - -LGap* LChunk::GetGapAt(int index) const { - return LGap::cast(instructions_[index]); -} - - -bool LChunk::IsGapAt(int index) const { - return instructions_[index]->IsGap(); -} - - -int LChunk::NearestGapPos(int index) const { - while (!IsGapAt(index)) index--; - return index; -} - - -void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) { - GetGapAt(index)->GetOrCreateParallelMove( - LGap::START, zone())->AddMove(from, to, zone()); -} - - -HConstant* LChunk::LookupConstant(LConstantOperand* operand) const { - return HConstant::cast(graph_->LookupValue(operand->index())); -} - - -Representation LChunk::LookupLiteralRepresentation( - LConstantOperand* operand) const { - return graph_->LookupValue(operand->index())->representation(); -} - - -void LChunk::CommitDependencies(Handle code) const { - if (!code->is_optimized_code()) return; - HandleScope scope(isolate()); - - for (Handle map : deprecation_dependencies_) { - DCHECK(!map->is_deprecated()); - DCHECK(map->CanBeDeprecated()); - Map::AddDependentCode(map, DependentCode::kTransitionGroup, code); - } - - for (Handle map : stability_dependencies_) { - DCHECK(map->is_stable()); - DCHECK(map->CanTransition()); - Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code); - } - - info_->dependencies()->Commit(code); -} - - -LChunk* LChunk::NewChunk(HGraph* graph) { - DisallowHandleAllocation no_handles; - DisallowHeapAllocation no_gc; - graph->DisallowAddingNewValues(); - int values = graph->GetMaximumValueID(); - CompilationInfo* info = graph->info(); - if (values > LUnallocated::kMaxVirtualRegisters) { - info->AbortOptimization(kNotEnoughVirtualRegistersForValues); - return NULL; - } - LAllocator allocator(values, graph); - LChunkBuilder builder(info, graph, &allocator); - LChunk* chunk = builder.Build(); - if (chunk == NULL) return NULL; - - if (!allocator.Allocate(chunk)) { - info->AbortOptimization(kNotEnoughVirtualRegistersRegalloc); - return NULL; - } - - chunk->set_allocated_double_registers( - allocator.assigned_double_registers()); - - return chunk; -} - - -Handle LChunk::Codegen() { - MacroAssembler assembler(info()->isolate(), NULL, 0, - CodeObjectRequired::kYes); - // Code serializer only takes unoptimized code. - DCHECK(!info()->will_serialize()); - LCodeGen generator(this, &assembler, info()); - - MarkEmptyBlocks(); - - if (generator.GenerateCode()) { - generator.CheckEnvironmentUsage(); - CodeGenerator::MakeCodePrologue(info(), "optimized"); - Handle code = CodeGenerator::MakeCodeEpilogue( - &assembler, nullptr, info(), assembler.CodeObject()); - generator.FinishCode(code); - CommitDependencies(code); - Handle source_positions = - generator.source_position_table_builder()->ToSourcePositionTable( - info()->isolate(), Handle::cast(code)); - code->set_source_position_table(*source_positions); - code->set_is_crankshafted(true); - - CodeGenerator::PrintCode(code, info()); - return code; - } - assembler.AbortedCodeGeneration(); - return Handle::null(); -} - - -void LChunk::set_allocated_double_registers(BitVector* allocated_registers) { - allocated_double_registers_ = allocated_registers; - BitVector* doubles = allocated_double_registers(); - BitVector::Iterator iterator(doubles); - while (!iterator.Done()) { - if (info()->saves_caller_doubles()) { - if (kDoubleSize == kPointerSize * 2) { - current_frame_slots_ += 2; - } else { - current_frame_slots_++; - } - } - iterator.Advance(); - } -} - - -void LChunkBuilderBase::Abort(BailoutReason reason) { - info()->AbortOptimization(reason); - status_ = ABORTED; -} - - -void LChunkBuilderBase::Retry(BailoutReason reason) { - info()->RetryOptimization(reason); - status_ = ABORTED; -} - -void LChunkBuilderBase::CreateLazyBailoutForCall(HBasicBlock* current_block, - LInstruction* instr, - HInstruction* hydrogen_val) { - if (!instr->IsCall()) return; - - HEnvironment* hydrogen_env = current_block->last_environment(); - HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; - DCHECK_NOT_NULL(hydrogen_env); - if (instr->IsSyntacticTailCall()) { - // If it was a syntactic tail call we need to drop the current frame and - // all the frames on top of it that are either an arguments adaptor frame - // or a tail caller frame. - hydrogen_env = hydrogen_env->outer(); - while (hydrogen_env != nullptr && - (hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR || - hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION)) { - hydrogen_env = hydrogen_env->outer(); - } - if (hydrogen_env != nullptr) { - if (hydrogen_env->frame_type() == JS_FUNCTION) { - // In case an outer frame is a function frame we have to replay - // environment manually because - // 1) it does not contain a result of inlined function yet, - // 2) we can't find the proper simulate that corresponds to the point - // after inlined call to do a ReplayEnvironment() on. - // So we push return value on top of outer environment. - // As for JS_GETTER/JS_SETTER/JS_CONSTRUCT nothing has to be done here, - // the deoptimizer ensures that the result of the callee is correctly - // propagated to result register during deoptimization. - hydrogen_env = hydrogen_env->Copy(); - hydrogen_env->Push(hydrogen_val); - } - } else { - // Although we don't need this lazy bailout for normal execution - // (because when we tail call from the outermost function we should pop - // its frame) we still need it when debugger is on. - hydrogen_env = current_block->last_environment(); - } - } else { - if (hydrogen_val->HasObservableSideEffects()) { - HSimulate* sim = HSimulate::cast(hydrogen_val->next()); - sim->ReplayEnvironment(hydrogen_env); - hydrogen_value_for_lazy_bailout = sim; - } - } - LInstruction* bailout = LChunkBuilderBase::AssignEnvironment( - new (zone()) LLazyBailout(), hydrogen_env); - bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); - chunk_->AddInstruction(bailout, current_block); -} - -LInstruction* LChunkBuilderBase::AssignEnvironment(LInstruction* instr, - HEnvironment* hydrogen_env) { - int argument_index_accumulator = 0; - ZoneList objects_to_materialize(0, zone()); - DCHECK_NE(TAIL_CALLER_FUNCTION, hydrogen_env->frame_type()); - instr->set_environment(CreateEnvironment( - hydrogen_env, &argument_index_accumulator, &objects_to_materialize)); - return instr; -} - -LEnvironment* LChunkBuilderBase::CreateEnvironment( - HEnvironment* hydrogen_env, int* argument_index_accumulator, - ZoneList* objects_to_materialize) { - if (hydrogen_env == NULL) return NULL; - - BailoutId ast_id = hydrogen_env->ast_id(); - DCHECK(!ast_id.IsNone() || - (hydrogen_env->frame_type() != JS_FUNCTION && - hydrogen_env->frame_type() != TAIL_CALLER_FUNCTION)); - - if (hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION) { - // Skip potential outer arguments adaptor frame. - HEnvironment* outer_hydrogen_env = hydrogen_env->outer(); - if (outer_hydrogen_env != nullptr && - outer_hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR) { - outer_hydrogen_env = outer_hydrogen_env->outer(); - } - LEnvironment* outer = CreateEnvironment( - outer_hydrogen_env, argument_index_accumulator, objects_to_materialize); - return new (zone()) - LEnvironment(hydrogen_env->closure(), hydrogen_env->frame_type(), - ast_id, 0, 0, 0, outer, hydrogen_env->entry(), zone()); - } - - LEnvironment* outer = - CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator, - objects_to_materialize); - - int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION) - ? 0 - : hydrogen_env->specials_count(); - - int value_count = hydrogen_env->length() - omitted_count; - LEnvironment* result = - new(zone()) LEnvironment(hydrogen_env->closure(), - hydrogen_env->frame_type(), - ast_id, - hydrogen_env->parameter_count(), - argument_count_, - value_count, - outer, - hydrogen_env->entry(), - zone()); - int argument_index = *argument_index_accumulator; - - // Store the environment description into the environment - // (with holes for nested objects) - for (int i = 0; i < hydrogen_env->length(); ++i) { - if (hydrogen_env->is_special_index(i) && - hydrogen_env->frame_type() != JS_FUNCTION) { - continue; - } - LOperand* op; - HValue* value = hydrogen_env->values()->at(i); - CHECK(!value->IsPushArguments()); // Do not deopt outgoing arguments - if (value->IsArgumentsObject() || value->IsCapturedObject()) { - op = LEnvironment::materialization_marker(); - } else { - op = UseAny(value); - } - result->AddValue(op, - value->representation(), - value->CheckFlag(HInstruction::kUint32)); - } - - // Recursively store the nested objects into the environment - for (int i = 0; i < hydrogen_env->length(); ++i) { - if (hydrogen_env->is_special_index(i)) continue; - - HValue* value = hydrogen_env->values()->at(i); - if (value->IsArgumentsObject() || value->IsCapturedObject()) { - AddObjectToMaterialize(value, objects_to_materialize, result); - } - } - - if (hydrogen_env->frame_type() == JS_FUNCTION) { - *argument_index_accumulator = argument_index; - } - - return result; -} - - -// Add an object to the supplied environment and object materialization list. -// -// Notes: -// -// We are building three lists here: -// -// 1. In the result->object_mapping_ list (added to by the -// LEnvironment::Add*Object methods), we store the lengths (number -// of fields) of the captured objects in depth-first traversal order, or -// in case of duplicated objects, we store the index to the duplicate object -// (with a tag to differentiate between captured and duplicated objects). -// -// 2. The object fields are stored in the result->values_ list -// (added to by the LEnvironment.AddValue method) sequentially as lists -// of fields with holes for nested objects (the holes will be expanded -// later by LCodegen::AddToTranslation according to the -// LEnvironment.object_mapping_ list). -// -// 3. The auxiliary objects_to_materialize array stores the hydrogen values -// in the same order as result->object_mapping_ list. This is used -// to detect duplicate values and calculate the corresponding object index. -void LChunkBuilderBase::AddObjectToMaterialize(HValue* value, - ZoneList* objects_to_materialize, LEnvironment* result) { - int object_index = objects_to_materialize->length(); - // Store the hydrogen value into the de-duplication array - objects_to_materialize->Add(value, zone()); - // Find out whether we are storing a duplicated value - int previously_materialized_object = -1; - for (int prev = 0; prev < object_index; ++prev) { - if (objects_to_materialize->at(prev) == value) { - previously_materialized_object = prev; - break; - } - } - // Store the captured object length (or duplicated object index) - // into the environment. For duplicated objects, we stop here. - int length = value->OperandCount(); - bool is_arguments = value->IsArgumentsObject(); - if (previously_materialized_object >= 0) { - result->AddDuplicateObject(previously_materialized_object); - return; - } else { - result->AddNewObject(is_arguments ? length - 1 : length, is_arguments); - } - // Store the captured object's fields into the environment - for (int i = is_arguments ? 1 : 0; i < length; ++i) { - LOperand* op; - HValue* arg_value = value->OperandAt(i); - if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) { - // Insert a hole for nested objects - op = LEnvironment::materialization_marker(); - } else { - DCHECK(!arg_value->IsPushArguments()); - // For ordinary values, tell the register allocator we need the value - // to be alive here - op = UseAny(arg_value); - } - result->AddValue(op, - arg_value->representation(), - arg_value->CheckFlag(HInstruction::kUint32)); - } - // Recursively store all the nested captured objects into the environment - for (int i = is_arguments ? 1 : 0; i < length; ++i) { - HValue* arg_value = value->OperandAt(i); - if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) { - AddObjectToMaterialize(arg_value, objects_to_materialize, result); - } - } -} - - -LPhase::~LPhase() { - if (ShouldProduceTraceOutput()) { - isolate()->GetHTracer()->TraceLithium(name(), chunk_); - } -} - - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/lithium.h b/src/crankshaft/lithium.h deleted file mode 100644 index d67c4908eb..0000000000 --- a/src/crankshaft/lithium.h +++ /dev/null @@ -1,847 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_LITHIUM_H_ -#define V8_CRANKSHAFT_LITHIUM_H_ - -#include - -#include "src/allocation.h" -#include "src/bailout-reason.h" -#include "src/crankshaft/compilation-phase.h" -#include "src/crankshaft/hydrogen.h" -#include "src/safepoint-table.h" -#include "src/zone/zone-allocator.h" - -namespace v8 { -namespace internal { - -#define LITHIUM_OPERAND_LIST(V) \ - V(ConstantOperand, CONSTANT_OPERAND, 128) \ - V(StackSlot, STACK_SLOT, 128) \ - V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \ - V(Register, REGISTER, 16) \ - V(DoubleRegister, DOUBLE_REGISTER, 16) - -class LOperand : public ZoneObject { - public: - enum Kind { - INVALID, - UNALLOCATED, - CONSTANT_OPERAND, - STACK_SLOT, - DOUBLE_STACK_SLOT, - REGISTER, - DOUBLE_REGISTER - }; - - LOperand() : value_(KindField::encode(INVALID)) { } - - Kind kind() const { return KindField::decode(value_); } - int index() const { return static_cast(value_) >> kKindFieldWidth; } -#define LITHIUM_OPERAND_PREDICATE(name, type, number) \ - bool Is##name() const { return kind() == type; } - LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE) - LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0) - LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0) -#undef LITHIUM_OPERAND_PREDICATE - bool Equals(LOperand* other) const { return value_ == other->value_; } - - void PrintTo(StringStream* stream); - void ConvertTo(Kind kind, int index) { - if (kind == REGISTER) DCHECK(index >= 0); - value_ = KindField::encode(kind); - value_ |= index << kKindFieldWidth; - DCHECK(this->index() == index); - } - - // Calls SetUpCache()/TearDownCache() for each subclass. - static void SetUpCaches(); - static void TearDownCaches(); - - protected: - static const int kKindFieldWidth = 3; - class KindField : public BitField { }; - - LOperand(Kind kind, int index) { ConvertTo(kind, index); } - - unsigned value_; -}; - - -class LUnallocated : public LOperand { - public: - enum BasicPolicy { - FIXED_SLOT, - EXTENDED_POLICY - }; - - enum ExtendedPolicy { - NONE, - ANY, - FIXED_REGISTER, - FIXED_DOUBLE_REGISTER, - MUST_HAVE_REGISTER, - MUST_HAVE_DOUBLE_REGISTER, - WRITABLE_REGISTER, - SAME_AS_FIRST_INPUT - }; - - // Lifetime of operand inside the instruction. - enum Lifetime { - // USED_AT_START operand is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - USED_AT_START, - - // USED_AT_END operand is treated as live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - USED_AT_END - }; - - explicit LUnallocated(ExtendedPolicy policy) : LOperand(UNALLOCATED, 0) { - value_ |= BasicPolicyField::encode(EXTENDED_POLICY); - value_ |= ExtendedPolicyField::encode(policy); - value_ |= LifetimeField::encode(USED_AT_END); - } - - LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) { - DCHECK(policy == FIXED_SLOT); - value_ |= BasicPolicyField::encode(policy); - value_ |= index << FixedSlotIndexField::kShift; - DCHECK(this->fixed_slot_index() == index); - } - - LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) { - DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER); - value_ |= BasicPolicyField::encode(EXTENDED_POLICY); - value_ |= ExtendedPolicyField::encode(policy); - value_ |= LifetimeField::encode(USED_AT_END); - value_ |= FixedRegisterField::encode(index); - } - - LUnallocated(ExtendedPolicy policy, Lifetime lifetime) - : LOperand(UNALLOCATED, 0) { - value_ |= BasicPolicyField::encode(EXTENDED_POLICY); - value_ |= ExtendedPolicyField::encode(policy); - value_ |= LifetimeField::encode(lifetime); - } - - LUnallocated* CopyUnconstrained(Zone* zone) { - LUnallocated* result = new(zone) LUnallocated(ANY); - result->set_virtual_register(virtual_register()); - return result; - } - - static LUnallocated* cast(LOperand* op) { - DCHECK(op->IsUnallocated()); - return reinterpret_cast(op); - } - - // The encoding used for LUnallocated operands depends on the policy that is - // stored within the operand. The FIXED_SLOT policy uses a compact encoding - // because it accommodates a larger pay-load. - // - // For FIXED_SLOT policy: - // +------------------------------------------+ - // | slot_index | vreg | 0 | 001 | - // +------------------------------------------+ - // - // For all other (extended) policies: - // +------------------------------------------+ - // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime - // +------------------------------------------+ P ... Policy - // - // The slot index is a signed value which requires us to decode it manually - // instead of using the BitField utility class. - - // The superclass has a KindField. - STATIC_ASSERT(kKindFieldWidth == 3); - - // BitFields for all unallocated operands. - class BasicPolicyField : public BitField {}; - class VirtualRegisterField : public BitField {}; - - // BitFields specific to BasicPolicy::FIXED_SLOT. - class FixedSlotIndexField : public BitField {}; - - // BitFields specific to BasicPolicy::EXTENDED_POLICY. - class ExtendedPolicyField : public BitField {}; - class LifetimeField : public BitField {}; - class FixedRegisterField : public BitField {}; - - static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1; - static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize; - static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1; - static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1)); - - // Predicates for the operand policy. - bool HasAnyPolicy() const { - return basic_policy() == EXTENDED_POLICY && - extended_policy() == ANY; - } - bool HasFixedPolicy() const { - return basic_policy() == FIXED_SLOT || - extended_policy() == FIXED_REGISTER || - extended_policy() == FIXED_DOUBLE_REGISTER; - } - bool HasRegisterPolicy() const { - return basic_policy() == EXTENDED_POLICY && ( - extended_policy() == WRITABLE_REGISTER || - extended_policy() == MUST_HAVE_REGISTER); - } - bool HasDoubleRegisterPolicy() const { - return basic_policy() == EXTENDED_POLICY && - extended_policy() == MUST_HAVE_DOUBLE_REGISTER; - } - bool HasSameAsInputPolicy() const { - return basic_policy() == EXTENDED_POLICY && - extended_policy() == SAME_AS_FIRST_INPUT; - } - bool HasFixedSlotPolicy() const { - return basic_policy() == FIXED_SLOT; - } - bool HasFixedRegisterPolicy() const { - return basic_policy() == EXTENDED_POLICY && - extended_policy() == FIXED_REGISTER; - } - bool HasFixedDoubleRegisterPolicy() const { - return basic_policy() == EXTENDED_POLICY && - extended_policy() == FIXED_DOUBLE_REGISTER; - } - bool HasWritableRegisterPolicy() const { - return basic_policy() == EXTENDED_POLICY && - extended_policy() == WRITABLE_REGISTER; - } - - // [basic_policy]: Distinguish between FIXED_SLOT and all other policies. - BasicPolicy basic_policy() const { - return BasicPolicyField::decode(value_); - } - - // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy. - ExtendedPolicy extended_policy() const { - DCHECK(basic_policy() == EXTENDED_POLICY); - return ExtendedPolicyField::decode(value_); - } - - // [fixed_slot_index]: Only for FIXED_SLOT. - int fixed_slot_index() const { - DCHECK(HasFixedSlotPolicy()); - return static_cast(value_) >> FixedSlotIndexField::kShift; - } - - // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER. - int fixed_register_index() const { - DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy()); - return FixedRegisterField::decode(value_); - } - - // [virtual_register]: The virtual register ID for this operand. - int virtual_register() const { - return VirtualRegisterField::decode(value_); - } - void set_virtual_register(unsigned id) { - value_ = VirtualRegisterField::update(value_, id); - } - - // [lifetime]: Only for non-FIXED_SLOT. - bool IsUsedAtStart() { - DCHECK(basic_policy() == EXTENDED_POLICY); - return LifetimeField::decode(value_) == USED_AT_START; - } - - static bool TooManyParameters(int num_parameters) { - const int parameter_limit = -LUnallocated::kMinFixedSlotIndex; - return num_parameters + 1 > parameter_limit; - } - - static bool TooManyParametersOrStackSlots(int num_parameters, - int num_stack_slots) { - const int locals_limit = LUnallocated::kMaxFixedSlotIndex; - return num_parameters + 1 + num_stack_slots > locals_limit; - } -}; - - -class LMoveOperands final BASE_EMBEDDED { - public: - LMoveOperands(LOperand* source, LOperand* destination) - : source_(source), destination_(destination) { - } - - LOperand* source() const { return source_; } - void set_source(LOperand* operand) { source_ = operand; } - - LOperand* destination() const { return destination_; } - void set_destination(LOperand* operand) { destination_ = operand; } - - // The gap resolver marks moves as "in-progress" by clearing the - // destination (but not the source). - bool IsPending() const { - return destination_ == NULL && source_ != NULL; - } - - // True if this move a move into the given destination operand. - bool Blocks(LOperand* operand) const { - return !IsEliminated() && source()->Equals(operand); - } - - // A move is redundant if it's been eliminated, if its source and - // destination are the same, or if its destination is unneeded or constant. - bool IsRedundant() const { - return IsEliminated() || source_->Equals(destination_) || IsIgnored() || - (destination_ != NULL && destination_->IsConstantOperand()); - } - - bool IsIgnored() const { - return destination_ != NULL && destination_->IsIgnored(); - } - - // We clear both operands to indicate move that's been eliminated. - void Eliminate() { source_ = destination_ = NULL; } - bool IsEliminated() const { - DCHECK(source_ != NULL || destination_ == NULL); - return source_ == NULL; - } - - private: - LOperand* source_; - LOperand* destination_; -}; - - -template -class LSubKindOperand final : public LOperand { - public: - static LSubKindOperand* Create(int index, Zone* zone) { - DCHECK(index >= 0); - if (index < kNumCachedOperands) return &cache[index]; - return new(zone) LSubKindOperand(index); - } - - static LSubKindOperand* cast(LOperand* op) { - DCHECK(op->kind() == kOperandKind); - return reinterpret_cast(op); - } - - static void SetUpCache(); - static void TearDownCache(); - - private: - static LSubKindOperand* cache; - - LSubKindOperand() : LOperand() { } - explicit LSubKindOperand(int index) : LOperand(kOperandKind, index) { } -}; - - -#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \ -typedef LSubKindOperand L##name; -LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS) -#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS - - -class LParallelMove final : public ZoneObject { - public: - explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { } - - void AddMove(LOperand* from, LOperand* to, Zone* zone) { - move_operands_.Add(LMoveOperands(from, to), zone); - } - - bool IsRedundant() const; - - ZoneList* move_operands() { return &move_operands_; } - - void PrintDataTo(StringStream* stream) const; - - private: - ZoneList move_operands_; -}; - - -class LPointerMap final : public ZoneObject { - public: - explicit LPointerMap(Zone* zone) - : pointer_operands_(8, zone), - untagged_operands_(0, zone), - lithium_position_(-1) { } - - const ZoneList* GetNormalizedOperands() { - for (int i = 0; i < untagged_operands_.length(); ++i) { - RemovePointer(untagged_operands_[i]); - } - untagged_operands_.Clear(); - return &pointer_operands_; - } - int lithium_position() const { return lithium_position_; } - - void set_lithium_position(int pos) { - DCHECK(lithium_position_ == -1); - lithium_position_ = pos; - } - - void RecordPointer(LOperand* op, Zone* zone); - void RemovePointer(LOperand* op); - void RecordUntagged(LOperand* op, Zone* zone); - void PrintTo(StringStream* stream); - - private: - ZoneList pointer_operands_; - ZoneList untagged_operands_; - int lithium_position_; -}; - - -class LEnvironment final : public ZoneObject { - public: - LEnvironment(Handle closure, - FrameType frame_type, - BailoutId ast_id, - int parameter_count, - int argument_count, - int value_count, - LEnvironment* outer, - HEnterInlined* entry, - Zone* zone) - : closure_(closure), - frame_type_(frame_type), - arguments_stack_height_(argument_count), - deoptimization_index_(Safepoint::kNoDeoptimizationIndex), - translation_index_(-1), - ast_id_(ast_id), - translation_size_(value_count), - parameter_count_(parameter_count), - pc_offset_(-1), - values_(value_count, zone), - is_tagged_(value_count, zone), - is_uint32_(value_count, zone), - object_mapping_(0, zone), - outer_(outer), - entry_(entry), - zone_(zone), - has_been_used_(false) { } - - Handle closure() const { return closure_; } - FrameType frame_type() const { return frame_type_; } - int arguments_stack_height() const { return arguments_stack_height_; } - int deoptimization_index() const { return deoptimization_index_; } - int translation_index() const { return translation_index_; } - BailoutId ast_id() const { return ast_id_; } - int translation_size() const { return translation_size_; } - int parameter_count() const { return parameter_count_; } - int pc_offset() const { return pc_offset_; } - const ZoneList* values() const { return &values_; } - LEnvironment* outer() const { return outer_; } - HEnterInlined* entry() { return entry_; } - Zone* zone() const { return zone_; } - - bool has_been_used() const { return has_been_used_; } - void set_has_been_used() { has_been_used_ = true; } - - void AddValue(LOperand* operand, - Representation representation, - bool is_uint32) { - values_.Add(operand, zone()); - if (representation.IsSmiOrTagged()) { - DCHECK(!is_uint32); - is_tagged_.Add(values_.length() - 1, zone()); - } - - if (is_uint32) { - is_uint32_.Add(values_.length() - 1, zone()); - } - } - - bool HasTaggedValueAt(int index) const { - return is_tagged_.Contains(index); - } - - bool HasUint32ValueAt(int index) const { - return is_uint32_.Contains(index); - } - - void AddNewObject(int length, bool is_arguments) { - uint32_t encoded = LengthOrDupeField::encode(length) | - IsArgumentsField::encode(is_arguments) | - IsDuplicateField::encode(false); - object_mapping_.Add(encoded, zone()); - } - - void AddDuplicateObject(int dupe_of) { - uint32_t encoded = LengthOrDupeField::encode(dupe_of) | - IsDuplicateField::encode(true); - object_mapping_.Add(encoded, zone()); - } - - int ObjectDuplicateOfAt(int index) { - DCHECK(ObjectIsDuplicateAt(index)); - return LengthOrDupeField::decode(object_mapping_[index]); - } - - int ObjectLengthAt(int index) { - DCHECK(!ObjectIsDuplicateAt(index)); - return LengthOrDupeField::decode(object_mapping_[index]); - } - - bool ObjectIsArgumentsAt(int index) { - DCHECK(!ObjectIsDuplicateAt(index)); - return IsArgumentsField::decode(object_mapping_[index]); - } - - bool ObjectIsDuplicateAt(int index) { - return IsDuplicateField::decode(object_mapping_[index]); - } - - void Register(int deoptimization_index, - int translation_index, - int pc_offset) { - DCHECK(!HasBeenRegistered()); - deoptimization_index_ = deoptimization_index; - translation_index_ = translation_index; - pc_offset_ = pc_offset; - } - bool HasBeenRegistered() const { - return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex; - } - - void PrintTo(StringStream* stream); - - // Marker value indicating a de-materialized object. - static LOperand* materialization_marker() { return NULL; } - - // Encoding used for the object_mapping map below. - class LengthOrDupeField : public BitField { }; - class IsArgumentsField : public BitField { }; - class IsDuplicateField : public BitField { }; - - private: - Handle closure_; - FrameType frame_type_; - int arguments_stack_height_; - int deoptimization_index_; - int translation_index_; - BailoutId ast_id_; - int translation_size_; - int parameter_count_; - int pc_offset_; - - // Value array: [parameters] [locals] [expression stack] [de-materialized]. - // |>--------- translation_size ---------<| - ZoneList values_; - GrowableBitVector is_tagged_; - GrowableBitVector is_uint32_; - - // Map with encoded information about materialization_marker operands. - ZoneList object_mapping_; - - LEnvironment* outer_; - HEnterInlined* entry_; - Zone* zone_; - bool has_been_used_; -}; - - -// Iterates over the non-null, non-constant operands in an environment. -class ShallowIterator final BASE_EMBEDDED { - public: - explicit ShallowIterator(LEnvironment* env) - : env_(env), - limit_(env != NULL ? env->values()->length() : 0), - current_(0) { - SkipUninteresting(); - } - - bool Done() { return current_ >= limit_; } - - LOperand* Current() { - DCHECK(!Done()); - DCHECK(env_->values()->at(current_) != NULL); - return env_->values()->at(current_); - } - - void Advance() { - DCHECK(!Done()); - ++current_; - SkipUninteresting(); - } - - LEnvironment* env() { return env_; } - - private: - bool ShouldSkip(LOperand* op) { - return op == NULL || op->IsConstantOperand(); - } - - // Skip until something interesting, beginning with and including current_. - void SkipUninteresting() { - while (current_ < limit_ && ShouldSkip(env_->values()->at(current_))) { - ++current_; - } - } - - LEnvironment* env_; - int limit_; - int current_; -}; - - -// Iterator for non-null, non-constant operands incl. outer environments. -class DeepIterator final BASE_EMBEDDED { - public: - explicit DeepIterator(LEnvironment* env) - : current_iterator_(env) { - SkipUninteresting(); - } - - bool Done() { return current_iterator_.Done(); } - - LOperand* Current() { - DCHECK(!current_iterator_.Done()); - DCHECK(current_iterator_.Current() != NULL); - return current_iterator_.Current(); - } - - void Advance() { - current_iterator_.Advance(); - SkipUninteresting(); - } - - private: - void SkipUninteresting() { - while (current_iterator_.env() != NULL && current_iterator_.Done()) { - current_iterator_ = ShallowIterator(current_iterator_.env()->outer()); - } - } - - ShallowIterator current_iterator_; -}; - - -class LPlatformChunk; -class LGap; -class LLabel; - -// Superclass providing data and behavior common to all the -// arch-specific LPlatformChunk classes. -class LChunk : public ZoneObject { - public: - static LChunk* NewChunk(HGraph* graph); - - void AddInstruction(LInstruction* instruction, HBasicBlock* block); - LConstantOperand* DefineConstantOperand(HConstant* constant); - HConstant* LookupConstant(LConstantOperand* operand) const; - Representation LookupLiteralRepresentation(LConstantOperand* operand) const; - - int ParameterAt(int index); - int GetParameterStackSlot(int index) const; - bool HasAllocatedStackSlots() const { - return current_frame_slots_ != base_frame_slots_; - } - int GetSpillSlotCount() const { - return current_frame_slots_ - base_frame_slots_; - } - int GetTotalFrameSlotCount() const { return current_frame_slots_; } - CompilationInfo* info() const { return info_; } - HGraph* graph() const { return graph_; } - Isolate* isolate() const { return graph_->isolate(); } - const ZoneList* instructions() const { return &instructions_; } - void AddGapMove(int index, LOperand* from, LOperand* to); - LGap* GetGapAt(int index) const; - bool IsGapAt(int index) const; - int NearestGapPos(int index) const; - void MarkEmptyBlocks(); - const ZoneList* pointer_maps() const { return &pointer_maps_; } - LLabel* GetLabel(int block_id) const; - int LookupDestination(int block_id) const; - Label* GetAssemblyLabel(int block_id) const; - - void AddDeprecationDependency(Handle map) { - DCHECK(!map->is_deprecated()); - if (!map->CanBeDeprecated()) return; - DCHECK(!info_->IsStub()); - deprecation_dependencies_.Add(map, zone()); - } - - void AddStabilityDependency(Handle map) { - DCHECK(map->is_stable()); - if (!map->CanTransition()) return; - DCHECK(!info_->IsStub()); - stability_dependencies_.Add(map, zone()); - } - - Zone* zone() const { return info_->zone(); } - - Handle Codegen(); - - void set_allocated_double_registers(BitVector* allocated_registers); - BitVector* allocated_double_registers() { - return allocated_double_registers_; - } - - protected: - LChunk(CompilationInfo* info, HGraph* graph); - - int base_frame_slots_; - int current_frame_slots_; - - private: - void CommitDependencies(Handle code) const; - - CompilationInfo* info_; - HGraph* const graph_; - BitVector* allocated_double_registers_; - ZoneList instructions_; - ZoneList pointer_maps_; - ZoneList> deprecation_dependencies_; - ZoneList> stability_dependencies_; -}; - - -class LChunkBuilderBase BASE_EMBEDDED { - public: - explicit LChunkBuilderBase(CompilationInfo* info, HGraph* graph) - : argument_count_(0), - chunk_(NULL), - info_(info), - graph_(graph), - status_(UNUSED), - zone_(graph->zone()) {} - - virtual ~LChunkBuilderBase() { } - - void Abort(BailoutReason reason); - void Retry(BailoutReason reason); - - protected: - enum Status { UNUSED, BUILDING, DONE, ABORTED }; - - LPlatformChunk* chunk() const { return chunk_; } - CompilationInfo* info() const { return info_; } - HGraph* graph() const { return graph_; } - int argument_count() const { return argument_count_; } - Isolate* isolate() const { return graph_->isolate(); } - Heap* heap() const { return isolate()->heap(); } - - bool is_unused() const { return status_ == UNUSED; } - bool is_building() const { return status_ == BUILDING; } - bool is_done() const { return status_ == DONE; } - bool is_aborted() const { return status_ == ABORTED; } - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0; - - // Constructs proper environment for a lazy bailout point after call, creates - // LLazyBailout instruction and adds it to current block. - void CreateLazyBailoutForCall(HBasicBlock* current_block, LInstruction* instr, - HInstruction* hydrogen_val); - - // Assigns given environment to an instruction. An instruction which can - // deoptimize must have an environment. - LInstruction* AssignEnvironment(LInstruction* instr, - HEnvironment* hydrogen_env); - - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, - int* argument_index_accumulator, - ZoneList* objects_to_materialize); - void AddObjectToMaterialize(HValue* value, - ZoneList* objects_to_materialize, - LEnvironment* result); - - Zone* zone() const { return zone_; } - - int argument_count_; - LPlatformChunk* chunk_; - CompilationInfo* info_; - HGraph* const graph_; - Status status_; - - private: - Zone* zone_; -}; - - -enum NumberUntagDMode { - NUMBER_CANDIDATE_IS_SMI, - NUMBER_CANDIDATE_IS_ANY_TAGGED -}; - - -class LPhase : public CompilationPhase { - public: - LPhase(const char* name, LChunk* chunk) - : CompilationPhase(name, chunk->info()), - chunk_(chunk) { } - ~LPhase(); - - private: - LChunk* chunk_; - - DISALLOW_COPY_AND_ASSIGN(LPhase); -}; - - -// A register-allocator view of a Lithium instruction. It contains the id of -// the output operand and a list of input operand uses. - -enum RegisterKind { - UNALLOCATED_REGISTERS, - GENERAL_REGISTERS, - DOUBLE_REGISTERS -}; - -// Iterator for non-null temp operands. -class TempIterator BASE_EMBEDDED { - public: - inline explicit TempIterator(LInstruction* instr); - inline bool Done(); - inline LOperand* Current(); - inline void Advance(); - - private: - inline void SkipUninteresting(); - LInstruction* instr_; - int limit_; - int current_; -}; - - -// Iterator for non-constant input operands. -class InputIterator BASE_EMBEDDED { - public: - inline explicit InputIterator(LInstruction* instr); - inline bool Done(); - inline LOperand* Current(); - inline void Advance(); - - private: - inline void SkipUninteresting(); - LInstruction* instr_; - int limit_; - int current_; -}; - - -class UseIterator BASE_EMBEDDED { - public: - inline explicit UseIterator(LInstruction* instr); - inline bool Done(); - inline LOperand* Current(); - inline void Advance(); - - private: - InputIterator input_iterator_; - DeepIterator env_iterator_; -}; - -class LInstruction; -class LCodeGen; -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_LITHIUM_H_ diff --git a/src/crankshaft/mips/OWNERS b/src/crankshaft/mips/OWNERS deleted file mode 100644 index 3f8fbfc7c8..0000000000 --- a/src/crankshaft/mips/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -ivica.bogosavljevic@imgtec.com -Miran.Karic@imgtec.com -dusan.simicic@imgtec.com diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc deleted file mode 100644 index 75ad941af5..0000000000 --- a/src/crankshaft/mips/lithium-codegen-mips.cc +++ /dev/null @@ -1,5357 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved.7 -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "src/crankshaft/mips/lithium-codegen-mips.h" - -#include "src/base/bits.h" -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/crankshaft/mips/lithium-gap-resolver-mips.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" - -namespace v8 { -namespace internal { - - -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, - LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), - pointers_(pointers), - deopt_mode_(mode) { } - virtual ~SafepointGenerator() {} - - void BeforeCall(int call_size) const override {} - - void AfterCall() const override { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - -LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope( - LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->info()->is_calling()); - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - - StoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->push(ra); - codegen_->masm_->CallStub(&stub); -} - -LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - RestoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->push(ra); - codegen_->masm_->CallStub(&stub); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; -} - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // NONE indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::NONE); - - return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && - GenerateJumpTable() && GenerateSafepointTable(); -} - - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); -} - - -void LCodeGen::SaveCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ Sdc1(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -void LCodeGen::RestoreCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Restore clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ Ldc1(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - - // a1: Callee's JS function. - // cp: Callee's context. - // fp: Caller's frame pointer. - // lr: Caller's pc. - } - - info()->set_prologue_offset(masm_->pc_offset()); - if (NeedsEagerFrame()) { - if (info()->IsStub()) { - __ StubPrologue(StackFrame::STUB); - } else { - __ Prologue(info()->GeneratePreagedPrologue()); - } - frame_is_built_ = true; - } - - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - if (slots > 0) { - if (FLAG_debug_code) { - __ Subu(sp, sp, Operand(slots * kPointerSize)); - __ Push(a0, a1); - __ Addu(a0, sp, Operand(slots * kPointerSize)); - __ li(a1, Operand(kSlotsZapValue)); - Label loop; - __ bind(&loop); - __ Subu(a0, a0, Operand(kPointerSize)); - __ sw(a1, MemOperand(a0, 2 * kPointerSize)); - __ Branch(&loop, ne, a0, Operand(sp)); - __ Pop(a0, a1); - } else { - __ Subu(sp, sp, Operand(slots * kPointerSize)); - } - } - - if (info()->saves_caller_doubles()) { - SaveCallerDoubles(); - } - return !is_aborted(); -} - - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Possibly allocate a local context. - if (info()->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is in a1. - int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ push(a1); - __ Push(info()->scope()->scope_info()); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ li(FastNewFunctionContextDescriptor::SlotsRegister(), - Operand(slots)); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of the FastNewFunctionContext builtin is always in new space. - need_write_barrier = false; - } else { - __ push(a1); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - - // Context is returned in both v0. It replaces the context passed to us. - // It's saved in the stack and kept live in cp. - __ mov(cp, v0); - __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset)); - // Copy any necessary parameters into the context. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ lw(a0, MemOperand(fp, parameter_offset)); - // Store it in the context. - MemOperand target = ContextMemOperand(cp, var->index()); - __ sw(a0, target); - // Update the write barrier. This clobbers a3 and a0. - if (need_write_barrier) { - __ RecordWriteContextSlot( - cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(cp, a0, &done); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } -} - - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment(";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), - code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - __ bind(code->entry()); - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ PushCommonFrame(scratch0()); - Comment(";;; Deferred code"); - } - code->Generate(); - if (NeedsDeferredFrame()) { - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - __ PopCommonFrame(scratch0()); - frame_is_built_ = false; - } - __ jmp(code->exit()); - } - } - // Deferred code is the last part of the instruction sequence. Mark - // the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateJumpTable() { - if (jump_table_.length() > 0) { - Label needs_frame, call_deopt_entry; - - Comment(";;; -------------------- Jump table --------------------"); - Address base = jump_table_[0].address; - - Register entry_offset = t9; - - int length = jump_table_.length(); - for (int i = 0; i < length; i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); - - DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type); - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - - // Second-level deopt table entries are contiguous and small, so instead - // of loading the full, absolute address of each one, load an immediate - // offset which will be added to the base address later. - __ li(entry_offset, Operand(entry - base)); - - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - Comment(";;; call deopt with frame"); - __ PushCommonFrame(); - __ Call(&needs_frame); - } else { - __ Call(&call_deopt_entry); - } - } - - if (needs_frame.is_linked()) { - __ bind(&needs_frame); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ push(at); - DCHECK(info()->IsStub()); - } - - Comment(";;; call deopt"); - __ bind(&call_deopt_entry); - - if (info()->saves_caller_doubles()) { - DCHECK(info()->IsStub()); - RestoreCallerDoubles(); - } - - // Add the base address to the offset previously loaded in entry_offset. - __ Jump(entry_offset, Operand(ExternalReference::ForDeoptEntry(base))); - } - __ RecordComment("]"); - - // The deoptimization jump table is the last part of the instruction - // sequence. Mark the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - - -Register LCodeGen::ToRegister(int index) const { - return Register::from_code(index); -} - - -DoubleRegister LCodeGen::ToDoubleRegister(int index) const { - return DoubleRegister::from_code(index); -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - DCHECK(op->IsRegister()); - return ToRegister(op->index()); -} - - -Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { - if (op->IsRegister()) { - return ToRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk_->LookupConstant(const_op); - Handle literal = constant->handle(isolate()); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - AllowDeferredHandleDereference get_number; - DCHECK(literal->IsNumber()); - __ li(scratch, Operand(static_cast(literal->Number()))); - } else if (r.IsSmi()) { - DCHECK(constant->HasSmiValue()); - __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value()))); - } else if (r.IsDouble()) { - Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); - } else { - DCHECK(r.IsSmiOrTagged()); - __ li(scratch, literal); - } - return scratch; - } else if (op->IsStackSlot()) { - __ lw(scratch, ToMemOperand(op)); - return scratch; - } - UNREACHABLE(); -} - - -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - DCHECK(op->IsDoubleRegister()); - return ToDoubleRegister(op->index()); -} - - -DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, - FloatRegister flt_scratch, - DoubleRegister dbl_scratch) { - if (op->IsDoubleRegister()) { - return ToDoubleRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk_->LookupConstant(const_op); - Handle literal = constant->handle(isolate()); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - DCHECK(literal->IsNumber()); - __ li(at, Operand(static_cast(literal->Number()))); - __ mtc1(at, flt_scratch); - __ cvt_d_w(dbl_scratch, flt_scratch); - return dbl_scratch; - } else if (r.IsDouble()) { - Abort(kUnsupportedDoubleImmediate); - } else if (r.IsTagged()) { - Abort(kUnsupportedTaggedImmediate); - } - } else if (op->IsStackSlot()) { - MemOperand mem_op = ToMemOperand(op); - __ Ldc1(dbl_scratch, mem_op); - return dbl_scratch; - } - UNREACHABLE(); -} - - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - - -bool LCodeGen::IsInteger32(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - - -bool LCodeGen::IsSmi(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - return ToRepresentation(op, Representation::Integer32()); -} - - -int32_t LCodeGen::ToRepresentation(LConstantOperand* op, - const Representation& r) const { - HConstant* constant = chunk_->LookupConstant(op); - int32_t value = constant->Integer32Value(); - if (r.IsInteger32()) return value; - DCHECK(r.IsSmiOrTagged()); - return reinterpret_cast(Smi::FromInt(value)); -} - - -Smi* LCodeGen::ToSmi(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - return Smi::FromInt(constant->Integer32Value()); -} - - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - - -Operand LCodeGen::ToOperand(LOperand* op) { - if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk()->LookupConstant(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsSmi()) { - DCHECK(constant->HasSmiValue()); - return Operand(Smi::FromInt(constant->Integer32Value())); - } else if (r.IsInteger32()) { - DCHECK(constant->HasInteger32Value()); - return Operand(constant->Integer32Value()); - } else if (r.IsDouble()) { - Abort(kToOperandUnsupportedDoubleImmediate); - } - DCHECK(r.IsTagged()); - return Operand(constant->handle(isolate())); - } else if (op->IsRegister()) { - return Operand(ToRegister(op)); - } else if (op->IsDoubleRegister()) { - Abort(kToOperandIsDoubleRegisterUnimplemented); - return Operand(0); - } - // Stack slots not implemented, use ToMemOperand instead. - UNREACHABLE(); -} - - -static int ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize; -} - - -MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index())); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); - } -} - - -MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { - DCHECK(op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand( - sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); - } -} - - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation( - environment, translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); - } -} - - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, - dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - DoubleRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode) { - DCHECK(instr != NULL); - __ Call(code, mode); - RecordSafepointWithLazyDeopt(instr, safepoint_mode); -} - - -void LCodeGen::CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - - __ CallRuntime(function, num_arguments, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - __ Move(cp, ToRegister(context)); - } else if (context->IsStackSlot()) { - __ lw(cp, ToMemOperand(context)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ li(cp, Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context) { - LoadContextFromDeferred(context); - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters( - instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, - translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, zone()); - } -} - -void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type, - Register src1, const Operand& src2) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - return; - } - - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { - Register scratch = scratch0(); - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - Label no_deopt; - __ Push(a1, scratch); - __ li(scratch, Operand(count)); - __ lw(a1, MemOperand(scratch)); - __ Subu(a1, a1, Operand(1)); - __ Branch(&no_deopt, ne, a1, Operand(zero_reg)); - __ li(a1, Operand(FLAG_deopt_every_n_times)); - __ sw(a1, MemOperand(scratch)); - __ Pop(a1, scratch); - - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&no_deopt); - __ sw(a1, MemOperand(scratch)); - __ Pop(a1, scratch); - } - - if (info()->ShouldTrapOnDeopt()) { - Label skip; - if (condition != al) { - __ Branch(&skip, NegateCondition(condition), src1, src2); - } - __ stop("trap_on_deopt"); - __ bind(&skip); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - // Go through jump table if we need to handle condition, build frame, or - // restore caller doubles. - if (condition == al && frame_is_built_ && - !info()->saves_caller_doubles()) { - DeoptComment(deopt_info); - __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); - } else { - Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, - !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - __ Branch(&jump_table_.last().label, condition, src1, src2); - } -} - -void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, Register src1, - const Operand& src2) { - Deoptimizer::BailoutType bailout_type = info()->IsStub() - ? Deoptimizer::LAZY - : Deoptimizer::EAGER; - DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2); -} - - -void LCodeGen::RecordSafepointWithLazyDeopt( - LInstruction* instr, SafepointMode safepoint_mode) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kLazyDeopt); - } -} - - -void LCodeGen::RecordSafepoint( - LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode deopt_mode) { - DCHECK(expected_safepoint_kind_ == kind); - - const ZoneList* operands = pointers->GetNormalizedOperands(); - Safepoint safepoint = safepoints_.DefineSafepoint(masm(), - kind, arguments, deopt_mode); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); -} - - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, deopt_mode); -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint( - pointers, Safepoint::kWithRegisters, arguments, deopt_mode); -} - - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, - label->hydrogen_value()->id(), - label->block_id(), - LabelType(label)); - __ bind(label->label()); - current_block_ = label->block_id(); - DoGap(label); -} - - -void LCodeGen::DoParallelMove(LParallelMove* move) { - resolver_.Resolve(move); -} - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; - i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } -} - - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { - DoGap(instr); -} - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - Label dividend_is_not_negative, done; - - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); - // Note: The code below even works when right contains kMinInt. - __ subu(dividend, zero_reg, dividend); - __ And(dividend, dividend, Operand(mask)); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - } - __ Branch(USE_DELAY_SLOT, &done); - __ subu(dividend, zero_reg, dividend); - } - - __ bind(÷nd_is_not_negative); - __ And(dividend, dividend, Operand(mask)); - __ bind(&done); -} - - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr); - return; - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - __ Mul(result, result, Operand(Abs(divisor))); - __ Subu(result, dividend, Operand(result)); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - __ bind(&remainder_not_zero); - } -} - - -void LCodeGen::DoModI(LModI* instr) { - HMod* hmod = instr->hydrogen(); - const Register left_reg = ToRegister(instr->left()); - const Register right_reg = ToRegister(instr->right()); - const Register result_reg = ToRegister(instr->result()); - - // div runs in the background while we check for special cases. - __ Mod(result_reg, left_reg, right_reg); - - Label done; - // Check for x % 0, we have to deopt in this case because we can't return a - // NaN. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg, - Operand(zero_reg)); - } - - // Check for kMinInt % -1, div will return kMinInt, which is not what we - // want. We have to deopt if we care about -0, because we can't return that. - if (hmod->CheckFlag(HValue::kCanOverflow)) { - Label no_overflow_possible; - __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg, - Operand(-1)); - } else { - __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); - __ Branch(USE_DELAY_SLOT, &done); - __ mov(result_reg, zero_reg); - } - __ bind(&no_overflow_possible); - } - - // If we care about -0, test if the dividend is <0 and the result is 0. - __ Branch(&done, ge, left_reg, Operand(zero_reg)); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg, - Operand(zero_reg)); - } - __ bind(&done); -} - - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend, - Operand(kMinInt)); - } - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1) { - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - __ And(at, dividend, Operand(mask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at, - Operand(zero_reg)); - } - - if (divisor == -1) { // Nice shortcut, not needed for correctness. - __ Subu(result, zero_reg, dividend); - return; - } - uint16_t shift = WhichPowerOf2Abs(divisor); - if (shift == 0) { - __ Move(result, dividend); - } else if (shift == 1) { - __ srl(result, dividend, 31); - __ Addu(result, dividend, Operand(result)); - } else { - __ sra(result, dividend, 31); - __ srl(result, result, 32 - shift); - __ Addu(result, dividend, Operand(result)); - } - if (shift > 0) __ sra(result, result, shift); - if (divisor < 0) __ Subu(result, zero_reg, result); -} - - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Subu(result, zero_reg, result); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - __ Mul(scratch0(), result, Operand(divisor)); - __ Subu(scratch0(), scratch0(), dividend); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(), - Operand(zero_reg)); - } -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - const Register result = ToRegister(instr->result()); - Register remainder = ToRegister(instr->temp()); - - // On MIPS div is asynchronous - it will run in the background while we - // check for special cases. - __ Div(remainder, result, dividend, divisor); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor, - Operand(zero_reg)); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label left_not_zero; - __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor, - Operand(zero_reg)); - __ bind(&left_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && - !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - Label left_not_min_int; - __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1)); - __ bind(&left_not_min_int); - } - - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder, - Operand(zero_reg)); - } -} - - -void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { - DoubleRegister addend = ToDoubleRegister(instr->addend()); - DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); - DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - - // This is computed in-place. - DCHECK(addend.is(ToDoubleRegister(instr->result()))); - - __ madd_d(addend, addend, multiplier, multiplicand); -} - - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - Register result = ToRegister(instr->result()); - int32_t divisor = instr->divisor(); - Register scratch = result.is(dividend) ? scratch0() : dividend; - DCHECK(!result.is(dividend) || !scratch.is(dividend)); - - // If the divisor is 1, return the dividend. - if (divisor == 1) { - __ Move(result, dividend); - return; - } - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - uint16_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 1) { - __ sra(result, dividend, shift); - return; - } - - // If the divisor is negative, we have to negate and handle edge cases. - - // dividend can be the same register as result so save the value of it - // for checking overflow. - __ Move(scratch, dividend); - - __ Subu(result, zero_reg, dividend); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result, - Operand(zero_reg)); - } - - // Dividing by -1 is basically negation, unless we overflow. - __ Xor(scratch, scratch, result); - if (divisor == -1) { - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch, - Operand(zero_reg)); - } - return; - } - - // If the negation could not overflow, simply shifting is OK. - if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - __ sra(result, result, shift); - return; - } - - Label no_overflow, done; - __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); - __ li(result, Operand(kMinInt / divisor)); - __ Branch(&done); - __ bind(&no_overflow); - __ sra(result, result, shift); - __ bind(&done); -} - - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Subu(result, zero_reg, result); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister(instr->temp()); - DCHECK(!temp.is(dividend) && !temp.is(result)); - Label needs_adjustment, done; - __ Branch(&needs_adjustment, divisor > 0 ? lt : gt, - dividend, Operand(zero_reg)); - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Subu(result, zero_reg, result); - __ jmp(&done); - __ bind(&needs_adjustment); - __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1)); - __ TruncatingDiv(result, temp, Abs(divisor)); - if (divisor < 0) __ Subu(result, zero_reg, result); - __ Subu(result, result, Operand(1)); - __ bind(&done); -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - const Register result = ToRegister(instr->result()); - Register remainder = scratch0(); - // On MIPS div is asynchronous - it will run in the background while we - // check for special cases. - __ Div(remainder, result, dividend, divisor); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor, - Operand(zero_reg)); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label left_not_zero; - __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor, - Operand(zero_reg)); - __ bind(&left_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && - !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - Label left_not_min_int; - __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1)); - __ bind(&left_not_min_int); - } - - // We performed a truncating division. Correct the result if necessary. - Label done; - __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); - __ Xor(remainder, remainder, Operand(divisor)); - __ Branch(&done, ge, remainder, Operand(zero_reg)); - __ Subu(result, result, Operand(1)); - __ bind(&done); -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - // Note that result may alias left. - Register left = ToRegister(instr->left()); - LOperand* right_op = instr->right(); - - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (right_op->IsConstantOperand()) { - int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); - - if (bailout_on_minus_zero && (constant < 0)) { - // The case of a null constant will be handled separately. - // If constant is negative and left is null, the result should be -0. - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left, - Operand(zero_reg)); - } - - switch (constant) { - case -1: - if (overflow) { - Label no_overflow; - __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow); - DeoptimizeIf(al, instr); - __ bind(&no_overflow); - } else { - __ Subu(result, zero_reg, left); - } - break; - case 0: - if (bailout_on_minus_zero) { - // If left is strictly negative and the constant is null, the - // result is -0. Deoptimize if required, otherwise return 0. - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left, - Operand(zero_reg)); - } - __ mov(result, zero_reg); - break; - case 1: - // Nothing to do. - __ Move(result, left); - break; - default: - // Multiplying by powers of two and powers of two plus or minus - // one can be done faster with shifted operands. - // For other constants we emit standard code. - int32_t mask = constant >> 31; - uint32_t constant_abs = (constant + mask) ^ mask; - - if (base::bits::IsPowerOfTwo32(constant_abs)) { - int32_t shift = WhichPowerOf2(constant_abs); - __ sll(result, left, shift); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Subu(result, zero_reg, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { - int32_t shift = WhichPowerOf2(constant_abs - 1); - __ Lsa(result, left, left, shift); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Subu(result, zero_reg, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { - int32_t shift = WhichPowerOf2(constant_abs + 1); - __ sll(scratch, left, shift); - __ Subu(result, scratch, left); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Subu(result, zero_reg, result); - } else { - // Generate standard code. - __ li(at, constant); - __ Mul(result, left, at); - } - } - - } else { - DCHECK(right_op->IsRegister()); - Register right = ToRegister(right_op); - - if (overflow) { - // hi:lo = left * right. - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ Mul(scratch, result, result, right); - } else { - __ Mul(scratch, result, left, right); - } - __ sra(at, result, 31); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch, - Operand(at)); - } else { - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ Mul(result, result, right); - } else { - __ Mul(result, left, right); - } - } - - if (bailout_on_minus_zero) { - Label done; - __ Xor(at, left, right); - __ Branch(&done, ge, at, Operand(zero_reg)); - // Bail out if the result is minus zero. - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result, - Operand(zero_reg)); - __ bind(&done); - } - } -} - - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left_op = instr->left(); - LOperand* right_op = instr->right(); - DCHECK(left_op->IsRegister()); - Register left = ToRegister(left_op); - Register result = ToRegister(instr->result()); - Operand right(no_reg); - - if (right_op->IsStackSlot()) { - right = Operand(EmitLoadRegister(right_op, at)); - } else { - DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); - right = ToOperand(right_op); - } - - switch (instr->op()) { - case Token::BIT_AND: - __ And(result, left, right); - break; - case Token::BIT_OR: - __ Or(result, left, right); - break; - case Token::BIT_XOR: - if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { - __ Nor(result, zero_reg, left); - } else { - __ Xor(result, left, right); - } - break; - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so - // result may alias either of them. - LOperand* right_op = instr->right(); - Register left = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - if (right_op->IsRegister()) { - // No need to mask the right operand on MIPS, it is built into the variable - // shift instructions. - switch (instr->op()) { - case Token::ROR: - __ Ror(result, left, Operand(ToRegister(right_op))); - break; - case Token::SAR: - __ srav(result, left, ToRegister(right_op)); - break; - case Token::SHR: - __ srlv(result, left, ToRegister(right_op)); - if (instr->can_deopt()) { - DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result, - Operand(zero_reg)); - } - break; - case Token::SHL: - __ sllv(result, left, ToRegister(right_op)); - break; - default: - UNREACHABLE(); - break; - } - } else { - // Mask the right_op operand. - int value = ToInteger32(LConstantOperand::cast(right_op)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::ROR: - if (shift_count != 0) { - __ Ror(result, left, Operand(shift_count)); - } else { - __ Move(result, left); - } - break; - case Token::SAR: - if (shift_count != 0) { - __ sra(result, left, shift_count); - } else { - __ Move(result, left); - } - break; - case Token::SHR: - if (shift_count != 0) { - __ srl(result, left, shift_count); - } else { - if (instr->can_deopt()) { - __ And(at, left, Operand(0x80000000)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at, - Operand(zero_reg)); - } - __ Move(result, left); - } - break; - case Token::SHL: - if (shift_count != 0) { - if (instr->hydrogen_value()->representation().IsSmi() && - instr->can_deopt()) { - if (shift_count != 1) { - __ sll(result, left, shift_count - 1); - __ SmiTagCheckOverflow(result, result, scratch); - } else { - __ SmiTagCheckOverflow(result, left, scratch); - } - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch, - Operand(zero_reg)); - } else { - __ sll(result, left, shift_count); - } - } else { - __ Move(result, left); - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (!can_overflow) { - if (right->IsStackSlot()) { - Register right_reg = EmitLoadRegister(right, at); - __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg)); - } else { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ Subu(ToRegister(result), ToRegister(left), ToOperand(right)); - } - } else { // can_overflow. - Register scratch = scratch0(); - Label no_overflow_label; - if (right->IsStackSlot()) { - Register right_reg = EmitLoadRegister(right, scratch); - __ SubBranchNoOvf(ToRegister(result), ToRegister(left), - Operand(right_reg), &no_overflow_label); - } else { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right), - &no_overflow_label, scratch); - } - DeoptimizeIf(al, instr); - __ bind(&no_overflow_label); - } -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - __ li(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ li(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - DCHECK(instr->result()->IsDoubleRegister()); - DoubleRegister result = ToDoubleRegister(instr->result()); - double v = instr->value(); - __ Move(result, v); -} - - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ li(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ li(ToRegister(instr->result()), object); -} - - -MemOperand LCodeGen::BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToInteger32(LConstantOperand::cast(index)); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldMemOperand(string, SeqString::kHeaderSize + offset); - } - Register scratch = scratch0(); - DCHECK(!scratch.is(string)); - DCHECK(!scratch.is(ToRegister(index))); - if (encoding == String::ONE_BYTE_ENCODING) { - __ Addu(scratch, string, ToRegister(index)); - } else { - STATIC_ASSERT(kUC16Size == 2); - __ sll(scratch, ToRegister(index), 1); - __ Addu(scratch, string, scratch); - } - return FieldMemOperand(scratch, SeqString::kHeaderSize); -} - - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - if (FLAG_debug_code) { - Register scratch = scratch0(); - __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); - __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - __ And(scratch, scratch, - Operand(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg)); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ lbu(result, operand); - } else { - __ lhu(result, operand); - } -} - - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register value = ToRegister(instr->value()); - - if (FLAG_debug_code) { - Register scratch = scratch0(); - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ sb(value, operand); - } else { - __ sh(value, operand); - } -} - - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (!can_overflow) { - if (right->IsStackSlot()) { - Register right_reg = EmitLoadRegister(right, at); - __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg)); - } else { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ Addu(ToRegister(result), ToRegister(left), ToOperand(right)); - } - } else { // can_overflow. - Register scratch = scratch1(); - Label no_overflow_label; - if (right->IsStackSlot()) { - Register right_reg = EmitLoadRegister(right, scratch); - __ AddBranchNoOvf(ToRegister(result), ToRegister(left), - Operand(right_reg), &no_overflow_label); - } else { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right), - &no_overflow_label, scratch); - } - DeoptimizeIf(al, instr); - __ bind(&no_overflow_label); - } -} - - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - HMathMinMax::Operation operation = instr->hydrogen()->operation(); - Register scratch = scratch1(); - if (instr->hydrogen()->representation().IsSmiOrInteger32()) { - Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; - Register left_reg = ToRegister(left); - Register right_reg = EmitLoadRegister(right, scratch0()); - Register result_reg = ToRegister(instr->result()); - Label return_right, done; - __ Slt(scratch, left_reg, Operand(right_reg)); - if (condition == ge) { - __ Movz(result_reg, left_reg, scratch); - __ Movn(result_reg, right_reg, scratch); - } else { - DCHECK(condition == le); - __ Movn(result_reg, left_reg, scratch); - __ Movz(result_reg, right_reg, scratch); - } - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - FPURegister left_reg = ToDoubleRegister(left); - FPURegister right_reg = ToDoubleRegister(right); - FPURegister result_reg = ToDoubleRegister(instr->result()); - - Label nan, done; - if (operation == HMathMinMax::kMathMax) { - __ Float64Max(result_reg, left_reg, right_reg, &nan); - } else { - DCHECK(operation == HMathMinMax::kMathMin); - __ Float64Min(result_reg, left_reg, right_reg, &nan); - } - __ Branch(&done); - - __ bind(&nan); - __ add_d(result_reg, left_reg, right_reg); - - __ bind(&done); - } -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - DoubleRegister result = ToDoubleRegister(instr->result()); - switch (instr->op()) { - case Token::ADD: - __ add_d(result, left, right); - break; - case Token::SUB: - __ sub_d(result, left, right); - break; - case Token::MUL: - __ mul_d(result, left, right); - break; - case Token::DIV: - __ div_d(result, left, right); - break; - case Token::MOD: { - // Save a0-a3 on the stack. - RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit(); - __ MultiPush(saved_regs); - - __ PrepareCallCFunction(0, 2, scratch0()); - __ MovToFloatParameters(left, right); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), - 0, 2); - // Move the result in the double result register. - __ MovFromFloatResult(result); - - // Restore saved register. - __ MultiPop(saved_regs); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(a1)); - DCHECK(ToRegister(instr->right()).is(a0)); - DCHECK(ToRegister(instr->result()).is(v0)); - - UNREACHABLE(); -} - - -template -void LCodeGen::EmitBranch(InstrType instr, - Condition condition, - Register src1, - const Operand& src2) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - if (right_block == left_block || condition == al) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ Branch(chunk_->GetAssemblyLabel(right_block), - NegateCondition(condition), src1, src2); - } else if (right_block == next_block) { - __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2); - } else { - __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2); - __ Branch(chunk_->GetAssemblyLabel(right_block)); - } -} - - -template -void LCodeGen::EmitBranchF(InstrType instr, - Condition condition, - FPURegister src1, - FPURegister src2) { - int right_block = instr->FalseDestination(chunk_); - int left_block = instr->TrueDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - if (right_block == left_block) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL, - NegateFpuCondition(condition), src1, src2); - } else if (right_block == next_block) { - __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, - condition, src1, src2); - } else { - __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, - condition, src1, src2); - __ Branch(chunk_->GetAssemblyLabel(right_block)); - } -} - - -template -void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition, - Register src1, const Operand& src2) { - int true_block = instr->TrueDestination(chunk_); - __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2); -} - - -template -void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition, - Register src1, const Operand& src2) { - int false_block = instr->FalseDestination(chunk_); - __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2); -} - - -template -void LCodeGen::EmitFalseBranchF(InstrType instr, - Condition condition, - FPURegister src1, - FPURegister src2) { - int false_block = instr->FalseDestination(chunk_); - __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL, - condition, src1, src2); -} - - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { - __ stop("LDebugBreak"); -} - - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsInteger32() || r.IsSmi()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - EmitBranch(instr, ne, reg, Operand(zero_reg)); - } else if (r.IsDouble()) { - DCHECK(!info()->IsStub()); - DoubleRegister reg = ToDoubleRegister(instr->value()); - // Test the double value. Zero and NaN are false. - EmitBranchF(instr, ogl, reg, kDoubleRegZero); - } else { - DCHECK(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ LoadRoot(at, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq, reg, Operand(at)); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, ne, reg, Operand(zero_reg)); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, al, zero_reg, Operand(zero_reg)); - } else if (type.IsHeapNumber()) { - DCHECK(!info()->IsStub()); - DoubleRegister dbl_scratch = double_scratch0(); - __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); - // Test the double value. Zero and NaN are false. - EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); - EmitBranch(instr, ne, at, Operand(zero_reg)); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - // Avoid deopts in the case where we've never executed this path before. - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); - } - if (expected & ToBooleanHint::kBoolean) { - // Boolean -> its value. - __ LoadRoot(at, Heap::kTrueValueRootIndex); - __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at)); - __ LoadRoot(at, Heap::kFalseValueRootIndex); - __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); - } - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a Smi -> deopt. - __ SmiTst(reg, at); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); - } - - const Register map = scratch0(); - if (expected & ToBooleanHint::kNeedsMap) { - __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); - __ And(at, at, Operand(1 << Map::kIsUndetectable)); - __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(instr->TrueLabel(chunk_), - ge, at, Operand(FIRST_JS_RECEIVER_TYPE)); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); - __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); - __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg)); - __ Branch(instr->FalseLabel(chunk_)); - __ bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - const Register scratch = scratch1(); - __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE)); - } - - if (expected & ToBooleanHint::kHeapNumber) { - // heap number -> false iff +0, -0, or NaN. - DoubleRegister dbl_scratch = double_scratch0(); - Label not_heap_number; - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - __ Branch(¬_heap_number, ne, map, Operand(at)); - __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); - __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - ne, dbl_scratch, kDoubleRegZero); - // Falls through if dbl_scratch == 0. - __ Branch(instr->FalseLabel(chunk_)); - __ bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg, - Operand(zero_reg)); - } - } - } -} - - -void LCodeGen::EmitGoto(int block) { - if (!IsNextEmittedBlock(block)) { - __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); - } -} - - -void LCodeGen::DoGoto(LGoto* instr) { - EmitGoto(instr->block_id()); -} - - -Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { - Condition cond = kNoCondition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = eq; - break; - case Token::NE: - case Token::NE_STRICT: - cond = ne; - break; - case Token::LT: - cond = is_unsigned ? lo : lt; - break; - case Token::GT: - cond = is_unsigned ? hi : gt; - break; - case Token::LTE: - cond = is_unsigned ? ls : le; - break; - case Token::GTE: - cond = is_unsigned ? hs : ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cond = TokenToCondition(instr->op(), is_unsigned); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - // Compare left and right as doubles and load the - // resulting flags into the normal status register. - FPURegister left_reg = ToDoubleRegister(left); - FPURegister right_reg = ToDoubleRegister(right); - - // If a NaN is involved, i.e. the result is unordered, - // jump to false block label. - __ BranchF(NULL, instr->FalseLabel(chunk_), eq, - left_reg, right_reg); - - EmitBranchF(instr, cond, left_reg, right_reg); - } else { - Register cmp_left; - Operand cmp_right = Operand(0); - - if (right->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(right)); - if (instr->hydrogen_value()->representation().IsSmi()) { - cmp_left = ToRegister(left); - cmp_right = Operand(Smi::FromInt(value)); - } else { - cmp_left = ToRegister(left); - cmp_right = Operand(value); - } - } else if (left->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(left)); - if (instr->hydrogen_value()->representation().IsSmi()) { - cmp_left = ToRegister(right); - cmp_right = Operand(Smi::FromInt(value)); - } else { - cmp_left = ToRegister(right); - cmp_right = Operand(value); - } - // We commuted the operands, so commute the condition. - cond = CommuteCondition(cond); - } else { - cmp_left = ToRegister(left); - cmp_right = Operand(ToRegister(right)); - } - - EmitBranch(instr, cond, cmp_left, cmp_right); - } - } -} - - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - - EmitBranch(instr, eq, left, Operand(right)); -} - - -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ li(at, Operand(factory()->the_hole_value())); - EmitBranch(instr, eq, input_reg, Operand(at)); - return; - } - - DoubleRegister input_reg = ToDoubleRegister(instr->object()); - EmitFalseBranchF(instr, eq, input_reg, input_reg); - - Register scratch = scratch0(); - __ FmoveHigh(scratch, input_reg); - EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32)); -} - - -Condition LCodeGen::EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - __ GetObjectType(input, temp1, temp1); - - return lt; -} - - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp1 = ToRegister(instr->temp()); - - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - Condition true_cond = - EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond, temp1, - Operand(FIRST_NONSTRING_TYPE)); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Register input_reg = EmitLoadRegister(instr->value(), at); - __ And(at, input_reg, kSmiTagMask); - EmitBranch(instr, eq, at, Operand(zero_reg)); -} - - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); - __ And(at, temp, Operand(1 << Map::kIsUndetectable)); - EmitBranch(instr, ne, at, Operand(zero_reg)); -} - - -static Condition ComputeCompareCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return eq; - case Token::LT: - return lt; - case Token::GT: - return gt; - case Token::LTE: - return le; - case Token::GTE: - return ge; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(a1)); - DCHECK(ToRegister(instr->right()).is(a0)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ LoadRoot(at, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq, v0, Operand(at)); -} - - -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK(from == to || to == LAST_TYPE); - return from; -} - - -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return eq; - if (to == LAST_TYPE) return hs; - if (from == FIRST_TYPE) return ls; - UNREACHABLE(); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register scratch = scratch0(); - Register input = ToRegister(instr->value()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - - __ GetObjectType(input, scratch, scratch); - EmitBranch(instr, - BranchCondition(instr->hydrogen()), - scratch, - Operand(TestType(instr->hydrogen()))); -} - -// Branches to a label or falls through with the answer in flags. Trashes -// the temp registers, but not the input. -void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handle class_name, Register input, - Register temp, Register temp2) { - DCHECK(!input.is(temp)); - DCHECK(!input.is(temp2)); - DCHECK(!temp.is(temp2)); - - __ JumpIfSmi(input, is_false); - __ GetObjectType(input, temp, temp2); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE)); - } else { - __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE)); - } - - // Check if the constructor in the map is a function. - Register instance_type = scratch1(); - DCHECK(!instance_type.is(temp)); - __ GetMapConstructor(temp, temp, temp2, instance_type); - - // Objects with a non-function constructor have class 'Object'. - if (String::Equals(class_name, isolate()->factory()->Object_string())) { - __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE)); - } else { - __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE)); - } - - // temp now contains the constructor function. Grab the - // instance class name from there. - __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); - __ lw(temp, - FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - - // End with the address of this class_name instance in temp register. - // On MIPS, the caller must do the comparison with Handleclass_name. -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = scratch0(); - Register temp2 = ToRegister(instr->temp()); - Handle class_name = instr->hydrogen()->class_name(); - - EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - class_name, input, temp, temp2); - - EmitBranch(instr, eq, temp, Operand(class_name)); -} - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); - EmitBranch(instr, eq, temp, Operand(instr->map())); -} - - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = scratch0(); - Register const object_instance_type = scratch1(); - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - __ SmiTst(object, at); - EmitFalseBranch(instr, eq, at, Operand(zero_reg)); - } - - // Loop through the {object}s prototype chain looking for the {prototype}. - __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); - Label loop; - __ bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ lbu(object_instance_type, - FieldMemOperand(object_map, Map::kBitFieldOffset)); - __ And(object_instance_type, object_instance_type, - Operand(1 << Map::kIsAccessCheckNeeded)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type, - Operand(zero_reg)); - // Deoptimize for proxies. - __ lbu(object_instance_type, - FieldMemOperand(object_map, Map::kInstanceTypeOffset)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type, - Operand(JS_PROXY_TYPE)); - - __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); - __ LoadRoot(at, Heap::kNullValueRootIndex); - EmitFalseBranch(instr, eq, object_prototype, Operand(at)); - EmitTrueBranch(instr, eq, object_prototype, Operand(prototype)); - __ Branch(USE_DELAY_SLOT, &loop); - __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - Token::Value op = instr->op(); - - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - // On MIPS there is no need for a "no inlined smi code" marker (nop). - - Condition condition = ComputeCompareCondition(op); - // A minor optimization that relies on LoadRoot always emitting one - // instruction. - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); - Label done, check; - __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); - __ bind(&check); - __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); - DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check)); - __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); - __ bind(&done); -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in v0. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ push(v0); - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - if (info()->saves_caller_doubles()) { - RestoreCallerDoubles(); - } - if (NeedsEagerFrame()) { - __ mov(sp, fp); - __ Pop(ra, fp); - } - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - int32_t sp_delta = (parameter_count + 1) * kPointerSize; - if (sp_delta != 0) { - __ Addu(sp, sp, Operand(sp_delta)); - } - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register reg = ToRegister(instr->parameter_count()); - // The argument count parameter is a smi - __ SmiUntag(reg); - __ Lsa(sp, sp, reg, kPointerSizeLog2); - } - - __ Jump(ra); -} - - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - - __ lw(result, ContextMemOperand(context, instr->slot_index())); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at)); - } else { - Label is_not_hole; - __ Branch(&is_not_hole, ne, result, Operand(at)); - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ bind(&is_not_hole); - } - } -} - - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - Register scratch = scratch0(); - MemOperand target = ContextMemOperand(context, instr->slot_index()); - - Label skip_assignment; - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ lw(scratch, target); - __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at)); - } else { - __ Branch(&skip_assignment, ne, scratch, Operand(at)); - } - } - - __ sw(value, target); - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - __ RecordWriteContextSlot(context, - target.offset(), - value, - scratch0(), - GetRAState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } - - __ bind(&skip_assignment); -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - Register object = ToRegister(instr->object()); - - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - MemOperand operand = MemOperand(object, offset); - __ Load(result, operand, access.representation()); - return; - } - - if (instr->hydrogen()->representation().IsDouble()) { - DoubleRegister result = ToDoubleRegister(instr->result()); - __ Ldc1(result, FieldMemOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - if (!access.IsInobject()) { - __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - object = result; - } - MemOperand operand = FieldMemOperand(object, offset); - __ Load(result, operand, access.representation()); -} - - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register scratch = scratch0(); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - - // Get the prototype or initial map from the function. - __ lw(result, - FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at)); - - // If the function does not have an initial map, we're done. - Label done; - __ GetObjectType(result, scratch, scratch); - __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); - - // Get the prototype from the initial map. - __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); - - // All done. - __ bind(&done); -} - - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - if (instr->length()->IsConstantOperand()) { - int const_length = ToInteger32(LConstantOperand::cast(instr->length())); - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int index = (const_length - const_index) + 1; - __ lw(result, MemOperand(arguments, index * kPointerSize)); - } else { - Register index = ToRegister(instr->index()); - __ li(at, Operand(const_length + 1)); - __ Subu(result, at, index); - __ Lsa(at, arguments, result, kPointerSizeLog2); - __ lw(result, MemOperand(at)); - } - } else if (instr->index()->IsConstantOperand()) { - Register length = ToRegister(instr->length()); - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int loc = const_index - 1; - if (loc != 0) { - __ Subu(result, length, Operand(loc)); - __ Lsa(at, arguments, result, kPointerSizeLog2); - __ lw(result, MemOperand(at)); - } else { - __ Lsa(at, arguments, length, kPointerSizeLog2); - __ lw(result, MemOperand(at)); - } - } else { - Register length = ToRegister(instr->length()); - Register index = ToRegister(instr->index()); - __ Subu(result, length, index); - __ Addu(result, result, 1); - __ Lsa(at, arguments, result, kPointerSizeLog2); - __ lw(result, MemOperand(at)); - } -} - - -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - FPURegister result = ToDoubleRegister(instr->result()); - if (key_is_constant) { - __ Addu(scratch0(), external_pointer, constant_key << element_size_shift); - } else { - __ sll(scratch0(), key, shift_size); - __ Addu(scratch0(), scratch0(), external_pointer); - } - if (elements_kind == FLOAT32_ELEMENTS) { - __ lwc1(result, MemOperand(scratch0(), base_offset)); - __ cvt_d_s(result, result); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ Ldc1(result, MemOperand(scratch0(), base_offset)); - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, base_offset); - switch (elements_kind) { - case INT8_ELEMENTS: - __ lb(result, mem_operand); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ lbu(result, mem_operand); - break; - case INT16_ELEMENTS: - __ lh(result, mem_operand); - break; - case UINT16_ELEMENTS: - __ lhu(result, mem_operand); - break; - case INT32_ELEMENTS: - __ lw(result, mem_operand); - break; - case UINT32_ELEMENTS: - __ lw(result, mem_operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue, - result, Operand(0x80000000)); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { - Register elements = ToRegister(instr->elements()); - bool key_is_constant = instr->key()->IsConstantOperand(); - Register key = no_reg; - DoubleRegister result = ToDoubleRegister(instr->result()); - Register scratch = scratch0(); - - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - - int base_offset = instr->base_offset(); - if (key_is_constant) { - int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - base_offset += constant_key * kDoubleSize; - } - __ Addu(scratch, elements, Operand(base_offset)); - - if (!key_is_constant) { - key = ToRegister(instr->key()); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - __ Lsa(scratch, scratch, key, shift_size); - } - - __ Ldc1(result, MemOperand(scratch)); - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, - Operand(kHoleNanUpper32)); - } -} - - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - Register elements = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - Register store_base = scratch; - int offset = instr->base_offset(); - - if (instr->key()->IsConstantOperand()) { - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - store_base = elements; - } else { - Register key = ToRegister(instr->key()); - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsSmi()) { - __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize); - } else { - __ Lsa(scratch, elements, key, kPointerSizeLog2); - } - } - __ lw(result, MemOperand(store_base, offset)); - - // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ SmiTst(result, scratch); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch, - Operand(zero_reg)); - } else { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, - Operand(scratch)); - } - } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ Branch(&done, ne, result, Operand(scratch)); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise - // it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - __ lw(result, FieldMemOperand(result, PropertyCell::kValueOffset)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result, - Operand(Smi::FromInt(Isolate::kProtectorValid))); - } - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ bind(&done); - } -} - - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } -} - - -MemOperand LCodeGen::PrepareKeyedOperand(Register key, - Register base, - bool key_is_constant, - int constant_key, - int element_size, - int shift_size, - int base_offset) { - if (key_is_constant) { - return MemOperand(base, (constant_key << element_size) + base_offset); - } - - if (base_offset == 0) { - if (shift_size >= 0) { - __ sll(scratch0(), key, shift_size); - __ Addu(scratch0(), base, scratch0()); - return MemOperand(scratch0()); - } else { - DCHECK_EQ(-1, shift_size); - __ srl(scratch0(), key, 1); - __ Addu(scratch0(), base, scratch0()); - return MemOperand(scratch0()); - } - } - - if (shift_size >= 0) { - __ sll(scratch0(), key, shift_size); - __ Addu(scratch0(), base, scratch0()); - return MemOperand(scratch0(), base_offset); - } else { - DCHECK_EQ(-1, shift_size); - __ sra(scratch0(), key, 1); - __ Addu(scratch0(), base, scratch0()); - return MemOperand(scratch0(), base_offset); - } -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register scratch = scratch0(); - Register temp = scratch1(); - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - __ Subu(result, sp, 2 * kPointerSize); - } else if (instr->hydrogen()->arguments_adaptor()) { - // Check if the calling frame is an arguments adaptor frame. - Label done, adapted; - __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ lw(result, - MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ Xor(temp, result, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne). - __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq). - } else { - __ mov(result, fp); - } -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register elem = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - __ Addu(result, zero_reg, Operand(scope()->num_parameters())); - __ Branch(&done, eq, fp, Operand(elem)); - - // Arguments adaptor frame present. Get argument length from there. - __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ lw(result, - MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(result); - - // Argument length is in result register. - __ bind(&done); -} - - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. - Label global_object, result_in_receiver; - - if (!instr->hydrogen()->known_function()) { - // Do not transform the receiver to object for strict mode functions or - // builtins. - __ lw(scratch, - FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ lw(scratch, - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); - __ And(scratch, scratch, - Operand(SharedFunctionInfo::IsStrictBit::kMask | - SharedFunctionInfo::IsNativeBit::kMask)); - __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg)); - } - - // Normal function. Replace undefined or null with global receiver. - __ LoadRoot(scratch, Heap::kNullValueRootIndex); - __ Branch(&global_object, eq, receiver, Operand(scratch)); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); - __ Branch(&global_object, eq, receiver, Operand(scratch)); - - // Deoptimize if the receiver is not a JS object. - __ SmiTst(receiver, scratch); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg)); - - __ GetObjectType(receiver, scratch, scratch); - DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch, - Operand(FIRST_JS_RECEIVER_TYPE)); - - __ Branch(&result_in_receiver); - __ bind(&global_object); - __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset)); - __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); - __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); - - if (result.is(receiver)) { - __ bind(&result_in_receiver); - } else { - Label result_ok; - __ Branch(&result_ok); - __ bind(&result_in_receiver); - __ mov(result, receiver); - __ bind(&result_ok); - } -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); - Register scratch = scratch0(); - DCHECK(receiver.is(a0)); // Used for parameter count. - DCHECK(function.is(a1)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).is(v0)); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length, - Operand(kArgumentsLimit)); - - // Push the receiver and use the register to keep the original - // number of arguments. - __ push(receiver); - __ Move(receiver, length); - // The arguments are at a one pointer size offset from elements. - __ Addu(elements, elements, Operand(1 * kPointerSize)); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); - __ sll(scratch, length, 2); - __ bind(&loop); - __ Addu(scratch, elements, scratch); - __ lw(scratch, MemOperand(scratch)); - __ push(scratch); - __ Subu(length, length, Operand(1)); - __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); - __ sll(scratch, length, 2); - - __ bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(a0); - // It is safe to use t0, t1 and t2 as scratch registers here given that - // we are not going to return to caller function anyway. - PrepareForTailCall(actual, t0, t1, t2); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - // The number of arguments is stored in receiver which is a0, as expected - // by InvokeFunction. - ParameterCount actual(receiver); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { - Abort(kDoPushArgumentNotImplementedForDoubleType); - } else { - Register argument_reg = EmitLoadRegister(argument, at); - __ push(argument_reg); - } -} - - -void LCodeGen::DoDrop(LDrop* instr) { - __ Drop(instr->count()); -} - - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); -} - - -void LCodeGen::DoContext(LContext* instr) { - // If there is a non-return use, the context must be moved to a register. - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in cp. - DCHECK(result.is(cp)); - } -} - - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - __ li(scratch0(), instr->hydrogen()->declarations()); - __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); - __ Push(scratch0(), scratch1()); - __ li(scratch0(), instr->hydrogen()->feedback_vector()); - __ Push(scratch0()); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - Register function_reg = a1; - LPointerMap* pointers = instr->pointer_map(); - - if (can_invoke_directly) { - // Change context. - __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ LoadRoot(a3, Heap::kUndefinedValueRootIndex); - __ li(a0, Operand(arity)); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); - if (is_tail_call) { - __ Jump(at); - } else { - __ Call(at); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - } - } else { - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, expected, actual, flag, generator); - } -} - - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - DCHECK(instr->context() != NULL); - DCHECK(ToRegister(instr->context()).is(cp)); - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // Deoptimize if not a heap number. - __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch, - Operand(at)); - - Label done; - Register exponent = scratch0(); - scratch = no_reg; - __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, just - // return it. - __ Move(result, input); - __ And(at, exponent, Operand(HeapNumber::kSignMask)); - __ Branch(&done, eq, at, Operand(zero_reg)); - - // Input is negative. Reverse its sign. - // Preserve the value of all registers. - { - PushSafepointRegistersScope scope(this); - - // Registers were saved at the safepoint, so we can use - // many scratch registers. - Register tmp1 = input.is(a1) ? a0 : a1; - Register tmp2 = input.is(a2) ? a0 : a2; - Register tmp3 = input.is(a3) ? a0 : a3; - Register tmp4 = input.is(t0) ? a0 : t0; - - // exponent: floating point exponent value. - - Label allocated, slow; - __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); - __ Branch(&allocated); - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, - instr->context()); - // Set the pointer to the new heap number in tmp. - if (!tmp1.is(v0)) - __ mov(tmp1, v0); - // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input, input); - __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - - __ bind(&allocated); - // exponent: floating point exponent value. - // tmp1: allocated heap number. - __ And(exponent, exponent, Operand(~HeapNumber::kSignMask)); - __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); - __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); - __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - - __ StoreToSafepointRegisterSlot(tmp1, result); - } - - __ bind(&done); -} - - -void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - Label done; - __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); - __ mov(result, input); - __ subu(result, zero_reg, input); - // Overflow if result is still negative, i.e. 0x80000000. - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result, - Operand(zero_reg)); - __ bind(&done); -} - - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - // Class for deferred case. - class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { - public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LMathAbs* instr_; - }; - - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsDouble()) { - FPURegister input = ToDoubleRegister(instr->value()); - FPURegister result = ToDoubleRegister(instr->result()); - __ abs_d(result, input); - } else if (r.IsSmiOrInteger32()) { - EmitIntegerMathAbs(instr); - } else { - // Representation is tagged. - DeferredMathAbsTaggedHeapNumber* deferred = - new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); - Register input = ToRegister(instr->value()); - // Smi check. - __ JumpIfNotSmi(input, deferred->entry()); - // If smi, handle it directly. - EmitIntegerMathAbs(instr); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoMathFloor(LMathFloor* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register scratch1 = scratch0(); - Register except_flag = ToRegister(instr->temp()); - - __ EmitFPUTruncate(kRoundToMinusInf, - result, - input, - scratch1, - double_scratch0(), - except_flag); - - // Deopt if the operation did not succeed. - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - Label done; - __ Branch(&done, ne, result, Operand(zero_reg)); - __ Mfhc1(scratch1, input); - __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, - Operand(zero_reg)); - __ bind(&done); - } -} - - -void LCodeGen::DoMathRound(LMathRound* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); - Register scratch = scratch0(); - Label done, check_sign_on_zero; - - // Extract exponent bits. - __ Mfhc1(result, input); - __ Ext(scratch, - result, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - - // If the number is in ]-0.5, +0.5[, the result is +/- 0. - Label skip1; - __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2)); - __ mov(result, zero_reg); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Branch(&check_sign_on_zero); - } else { - __ Branch(&done); - } - __ bind(&skip1); - - // The following conversion will not work with numbers - // outside of ]-2^32, 2^32[. - DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch, - Operand(HeapNumber::kExponentBias + 32)); - - // Save the original sign for later comparison. - __ And(scratch, result, Operand(HeapNumber::kSignMask)); - - __ Move(double_scratch0(), 0.5); - __ add_d(double_scratch0(), input, double_scratch0()); - - // Check sign of the result: if the sign changed, the input - // value was in ]0.5, 0[ and the result should be -0. - __ Mfhc1(result, double_scratch0()); - __ Xor(result, result, Operand(scratch)); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // ARM uses 'mi' here, which is 'lt' - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result, - Operand(zero_reg)); - } else { - Label skip2; - // ARM uses 'mi' here, which is 'lt' - // Negating it results in 'ge' - __ Branch(&skip2, ge, result, Operand(zero_reg)); - __ mov(result, zero_reg); - __ Branch(&done); - __ bind(&skip2); - } - - Register except_flag = scratch; - __ EmitFPUTruncate(kRoundToMinusInf, - result, - double_scratch0(), - at, - double_scratch1, - except_flag); - - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - __ Branch(&done, ne, result, Operand(zero_reg)); - __ bind(&check_sign_on_zero); - __ Mfhc1(scratch, input); - __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch, - Operand(zero_reg)); - } - __ bind(&done); -} - - -void LCodeGen::DoMathFround(LMathFround* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ cvt_s_d(result.low(), input); - __ cvt_d_s(result, result.low()); -} - - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ sqrt_d(result, input); -} - - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister temp = ToDoubleRegister(instr->temp()); - - DCHECK(!input.is(result)); - - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label done; - __ Move(temp, static_cast(-V8_INFINITY)); - // Set up Infinity. - __ Neg_d(result, temp); - // result is overwritten if the branch is not taken. - __ BranchF(&done, NULL, eq, temp, input); - - // Add +0 to convert -0 to +0. - __ add_d(result, input, kDoubleRegZero); - __ sqrt_d(result, result); - __ bind(&done); -} - - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - // Having marked this as a call, we can use any registers. - // Just make sure that the input/output registers are the expected ones. - Register tagged_exponent = MathPowTaggedDescriptor::exponent(); - DCHECK(!instr->right()->IsDoubleRegister() || - ToDoubleRegister(instr->right()).is(f4)); - DCHECK(!instr->right()->IsRegister() || - ToRegister(instr->right()).is(tagged_exponent)); - DCHECK(ToDoubleRegister(instr->left()).is(f2)); - DCHECK(ToDoubleRegister(instr->result()).is(f0)); - - if (exponent_type.IsSmi()) { - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsTagged()) { - Label no_deopt; - __ JumpIfSmi(tagged_exponent, &no_deopt); - DCHECK(!t3.is(tagged_exponent)); - __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, t3, Operand(at)); - __ bind(&no_deopt); - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsInteger32()) { - MathPowStub stub(isolate(), MathPowStub::INTEGER); - __ CallStub(&stub); - } else { - DCHECK(exponent_type.IsDouble()); - MathPowStub stub(isolate(), MathPowStub::DOUBLE); - __ CallStub(&stub); - } -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - - -void LCodeGen::DoMathLog(LMathLog* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - __ Clz(result, input); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset)); - __ Branch(&no_arguments_adaptor, ne, scratch3, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ mov(fp, scratch2); - __ lw(caller_args_count_reg, - MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(caller_args_count_reg); - __ Branch(&formal_parameter_count_loaded); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count - __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ lw(scratch1, - FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset)); - __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3); - - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->function()).is(a1)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use t0, t1 and t2 as scratch registers here given that - // we are not going to return to caller function anyway. - PrepareForTailCall(actual, t0, t1, t2); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(a1, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } -} - - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(ToRegister(instr->result()).is(v0)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - __ Jump(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - __ Jump(target, Code::kHeaderSize - kHeapObjectTag); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - __ Call(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(target)); - __ Call(target, Code::kHeaderSize - kHeapObjectTag); - } - generator.AfterCall(); - } -} - - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->constructor()).is(a1)); - DCHECK(ToRegister(instr->result()).is(v0)); - - __ li(a0, Operand(instr->arity())); - __ li(a2, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - // We might need a change here, - // look at the first argument. - __ lw(t1, MemOperand(sp, 0)); - __ Branch(&packed_case, eq, t1, Operand(zero_reg)); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), - holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ jmp(&done); - __ bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); -} - - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - __ Addu(code_object, code_object, - Operand(Code::kHeaderSize - kHeapObjectTag)); - __ sw(code_object, - FieldMemOperand(function, JSFunction::kCodeEntryOffset)); -} - - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ Addu(result, base, Operand(ToInteger32(offset))); - } else { - Register offset = ToRegister(instr->offset()); - __ Addu(result, base, offset); - } -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - Representation representation = instr->representation(); - - Register object = ToRegister(instr->object()); - Register scratch = scratch0(); - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - Register value = ToRegister(instr->value()); - MemOperand operand = MemOperand(object, offset); - __ Store(value, operand, representation); - return; - } - - __ AssertNotSmi(object); - - DCHECK(!representation.IsSmi() || - !instr->value()->IsConstantOperand() || - IsSmi(LConstantOperand::cast(instr->value()))); - if (representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!instr->hydrogen()->has_transition()); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - DoubleRegister value = ToDoubleRegister(instr->value()); - __ Sdc1(value, FieldMemOperand(object, offset)); - return; - } - - if (instr->hydrogen()->has_transition()) { - Handle transition = instr->hydrogen()->transition_map(); - AddDeprecationDependency(transition); - __ li(scratch, Operand(transition)); - __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); - if (instr->hydrogen()->NeedsWriteBarrierForMap()) { - Register temp = ToRegister(instr->temp()); - // Update the write barrier for the map field. - __ RecordWriteForMap(object, - scratch, - temp, - GetRAState(), - kSaveFPRegs); - } - } - - // Do the store. - Register value = ToRegister(instr->value()); - if (access.IsInobject()) { - MemOperand operand = FieldMemOperand(object, offset); - __ Store(value, operand, representation); - if (instr->hydrogen()->NeedsWriteBarrier()) { - // Update the write barrier for the object for in-object properties. - __ RecordWriteField(object, - offset, - value, - scratch, - GetRAState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - instr->hydrogen()->SmiCheckForWriteBarrier(), - instr->hydrogen()->PointersToHereCheckForValue()); - } - } else { - __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); - MemOperand operand = FieldMemOperand(scratch, offset); - __ Store(value, operand, representation); - if (instr->hydrogen()->NeedsWriteBarrier()) { - // Update the write barrier for the properties array. - // object is used as a scratch register. - __ RecordWriteField(scratch, - offset, - value, - object, - GetRAState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - instr->hydrogen()->SmiCheckForWriteBarrier(), - instr->hydrogen()->PointersToHereCheckForValue()); - } - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; - Operand operand(0); - Register reg; - if (instr->index()->IsConstantOperand()) { - operand = ToOperand(instr->index()); - reg = ToRegister(instr->length()); - cc = CommuteCondition(cc); - } else { - reg = ToRegister(instr->index()); - operand = ToOperand(instr->length()); - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - Label done; - __ Branch(&done, NegateCondition(cc), reg, operand); - __ stop("eliminated bounds check failed"); - __ bind(&done); - } else { - DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand); - } -} - - -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - Register address = scratch0(); - FPURegister value(ToDoubleRegister(instr->value())); - if (key_is_constant) { - if (constant_key != 0) { - __ Addu(address, external_pointer, - Operand(constant_key << element_size_shift)); - } else { - address = external_pointer; - } - } else { - __ Lsa(address, external_pointer, key, shift_size); - } - - if (elements_kind == FLOAT32_ELEMENTS) { - __ cvt_s_d(double_scratch0(), value); - __ swc1(double_scratch0(), MemOperand(address, base_offset)); - } else { // Storing doubles, not floats. - __ Sdc1(value, MemOperand(address, base_offset)); - } - } else { - Register value(ToRegister(instr->value())); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - base_offset); - switch (elements_kind) { - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - case INT8_ELEMENTS: - __ sb(value, mem_operand); - break; - case INT16_ELEMENTS: - case UINT16_ELEMENTS: - __ sh(value, mem_operand); - break; - case INT32_ELEMENTS: - case UINT32_ELEMENTS: - __ sw(value, mem_operand); - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - DoubleRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register scratch = scratch0(); - Register scratch_1 = scratch1(); - DoubleRegister double_scratch = double_scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int base_offset = instr->base_offset(); - Label not_nan, done; - - // Calculate the effective address of the slot in the array to store the - // double value. - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - if (key_is_constant) { - int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - __ Addu(scratch, elements, - Operand((constant_key << element_size_shift) + base_offset)); - } else { - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - __ Addu(scratch, elements, Operand(base_offset)); - __ sll(at, ToRegister(instr->key()), shift_size); - __ Addu(scratch, scratch, at); - } - - if (instr->NeedsCanonicalization()) { - Label is_nan; - // Check for NaN. All NaNs must be canonicalized. - __ BranchF(NULL, &is_nan, eq, value, value); - __ Branch(¬_nan); - - // Only load canonical NaN if the comparison above set the overflow. - __ bind(&is_nan); - __ LoadRoot(scratch_1, Heap::kNanValueRootIndex); - __ Ldc1(double_scratch, - FieldMemOperand(scratch_1, HeapNumber::kValueOffset)); - __ Sdc1(double_scratch, MemOperand(scratch, 0)); - __ Branch(&done); - } - - __ bind(¬_nan); - __ Sdc1(value, MemOperand(scratch, 0)); - __ bind(&done); -} - - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) - : no_reg; - Register scratch = scratch0(); - Register store_base = scratch; - int offset = instr->base_offset(); - - // Do the store. - if (instr->key()->IsConstantOperand()) { - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - store_base = elements; - } else { - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsSmi()) { - __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize); - } else { - __ Lsa(scratch, elements, key, kPointerSizeLog2); - } - } - __ sw(value, MemOperand(store_base, offset)); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ Addu(key, store_base, Operand(offset)); - __ RecordWrite(elements, - key, - value, - GetRAState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed, - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - // By cases: external, fast double - if (instr->is_fixed_typed_array()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = v0; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ jmp(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ Branch(deferred->entry(), le, ToRegister(current_capacity), - Operand(constant_key)); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ Branch(deferred->entry(), ge, ToRegister(key), - Operand(constant_capacity)); - } else { - __ Branch(deferred->entry(), ge, ToRegister(key), - Operand(ToRegister(current_capacity))); - } - - if (instr->elements()->IsRegister()) { - __ mov(result, ToRegister(instr->elements())); - } else { - __ lw(result, ToMemOperand(instr->elements())); - } - - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = v0; - __ mov(result, zero_reg); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - if (instr->object()->IsRegister()) { - __ mov(result, ToRegister(instr->object())); - } else { - __ lw(result, ToMemOperand(instr->object())); - } - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - LConstantOperand* constant_key = LConstantOperand::cast(key); - int32_t int_key = ToInteger32(constant_key); - if (Smi::IsValid(int_key)) { - __ li(a3, Operand(Smi::FromInt(int_key))); - } else { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - Label is_smi; - __ SmiTagCheckOverflow(a3, ToRegister(key), at); - // Deopt if the key is outside Smi range. The stub expects Smi and would - // bump the elements into dictionary mode (and trigger a deopt) anyways. - __ BranchOnNoOverflow(&is_smi, at); - RestoreRegistersStateStub stub(isolate()); - __ push(ra); - __ CallStub(&stub); - DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow); - __ bind(&is_smi); - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ mov(a0, result); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - __ SmiTst(result, at); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); -} - - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp = ToRegister(instr->temp()); - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); - DeoptimizeIf(al, instr); - __ bind(&no_memento_found); -} - - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(a1)); - DCHECK(ToRegister(instr->right()).is(a0)); - StringAddStub stub(isolate(), - instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt final : public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new(zone()) DeferredStringCharCodeAt(this, instr); - StringCharLoadGenerator::Generate(masm(), - ToRegister(instr->string()), - ToRegister(instr->index()), - ToRegister(instr->result()), - deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, zero_reg); - - PushSafepointRegistersScope scope(this); - __ push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index))); - __ push(scratch); - } else { - Register index = ToRegister(instr->index()); - __ SmiTag(index); - __ push(index); - } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, - instr->context()); - __ AssertSmi(v0); - __ SmiUntag(v0); - __ StoreToSafepointRegisterSlot(v0, result); -} - - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode final : public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredStringCharFromCode(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new(zone()) DeferredStringCharFromCode(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - DCHECK(!char_code.is(result)); - - __ Branch(deferred->entry(), hi, - char_code, Operand(String::kMaxOneByteCharCode)); - __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ Lsa(result, result, char_code, kPointerSizeLog2); - __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize)); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); - __ Branch(deferred->entry(), eq, result, Operand(scratch)); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, zero_reg); - - PushSafepointRegistersScope scope(this); - __ SmiTag(char_code); - __ push(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(v0, result); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister() || input->IsStackSlot()); - LOperand* output = instr->result(); - DCHECK(output->IsDoubleRegister()); - FPURegister single_scratch = double_scratch0().low(); - if (input->IsStackSlot()) { - Register scratch = scratch0(); - __ lw(scratch, ToMemOperand(input)); - __ mtc1(scratch, single_scratch); - } else { - __ mtc1(ToRegister(input), single_scratch); - } - __ cvt_d_w(ToDoubleRegister(output), single_scratch); -} - - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - - __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22); -} - - -void LCodeGen::DoNumberTagI(LNumberTagI* instr) { - class DeferredNumberTagI final : public LDeferredCode { - public: - DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, - instr_->value(), - instr_->temp1(), - instr_->temp2(), - SIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagI* instr_; - }; - - Register src = ToRegister(instr->value()); - Register dst = ToRegister(instr->result()); - Register overflow = scratch0(); - - DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); - __ SmiTagCheckOverflow(dst, src, overflow); - __ BranchOnOverflow(deferred->entry(), overflow); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU final : public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, - instr_->value(), - instr_->temp1(), - instr_->temp2(), - UNSIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagU* instr_; - }; - - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - - DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); - __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue)); - __ SmiTag(result, input); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2, - IntegerSignedness signedness) { - Label done, slow; - Register src = ToRegister(value); - Register dst = ToRegister(instr->result()); - Register tmp1 = scratch0(); - Register tmp2 = ToRegister(temp1); - Register tmp3 = ToRegister(temp2); - DoubleRegister dbl_scratch = double_scratch0(); - - if (signedness == SIGNED_INT32) { - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - if (dst.is(src)) { - __ SmiUntag(src, dst); - __ Xor(src, src, Operand(0x80000000)); - } - __ mtc1(src, dbl_scratch); - __ cvt_d_w(dbl_scratch, dbl_scratch); - } else { - __ Cvt_d_uw(dbl_scratch, src, f22); - } - - if (FLAG_inline_new) { - __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); - __ Branch(&done); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // TODO(3095996): Put a valid pointer value in the stack slot where the - // result register is stored, as this register is in the pointer map, but - // contains an integer value. - __ mov(dst, zero_reg); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!dst.is(cp)) { - __ mov(cp, zero_reg); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(v0, dst); - } - - // Done. Put the value in dbl_scratch into the value of the allocated heap - // number. - __ bind(&done); - __ Sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD final : public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagD* instr_; - }; - - DoubleRegister input_reg = ToDoubleRegister(instr->value()); - Register scratch = scratch0(); - Register reg = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); - - DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); - } else { - __ Branch(deferred->entry()); - } - __ bind(deferred->exit()); - __ Sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); - // Now that we have finished with the object's real address tag it -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ mov(reg, zero_reg); - - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(cp)) { - __ mov(cp, zero_reg); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(v0, reg); -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - Register output = ToRegister(instr->result()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - __ And(at, input, Operand(0xc0000000)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg)); - } - if (hchange->CheckFlag(HValue::kCanOverflow) && - !hchange->value()->CheckFlag(HValue::kUint32)) { - __ SmiTagCheckOverflow(output, input, at); - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg)); - } else { - __ SmiTag(output, input); - } -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - Register scratch = scratch0(); - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - if (instr->needs_check()) { - STATIC_ASSERT(kHeapObjectTag == 1); - // If the input is a HeapObject, value of scratch won't be zero. - __ And(scratch, input, Operand(kHeapObjectTag)); - __ SmiUntag(result, input); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch, - Operand(zero_reg)); - } else { - __ SmiUntag(result, input); - } -} - - -void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, - DoubleRegister result_reg, - NumberUntagDMode mode) { - bool can_convert_undefined_to_nan = instr->truncating(); - bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); - - Register scratch = scratch0(); - Label convert, load_smi, done; - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - // Heap number map check. - __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - if (can_convert_undefined_to_nan) { - __ Branch(&convert, ne, scratch, Operand(at)); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch, - Operand(at)); - } - // Load heap number. - __ Ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - __ mfc1(at, result_reg.low()); - __ Branch(&done, ne, at, Operand(zero_reg)); - __ Mfhc1(scratch, result_reg); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch, - Operand(HeapNumber::kSignMask)); - } - __ Branch(&done); - if (can_convert_undefined_to_nan) { - __ bind(&convert); - // Convert undefined (and hole) to NaN. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, - input_reg, Operand(at)); - __ LoadRoot(scratch, Heap::kNanValueRootIndex); - __ Ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); - __ Branch(&done); - } - } else { - __ SmiUntag(scratch, input_reg); - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - } - // Smi to double register conversion - __ bind(&load_smi); - // scratch: untagged value of input_reg - __ mtc1(scratch, result_reg); - __ cvt_d_w(result_reg, result_reg); - __ bind(&done); -} - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { - Register input_reg = ToRegister(instr->value()); - Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->temp()); - DoubleRegister double_scratch = double_scratch0(); - DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); - - DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); - DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); - - Label done; - - // The input is a tagged HeapObject. - // Heap number map check. - __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - // This 'at' value and scratch1 map value are used for tests in both clauses - // of the if. - - if (instr->truncating()) { - Label truncate; - __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at)); - __ mov(scratch2, input_reg); // In delay slot. - __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1, - Operand(ODDBALL_TYPE)); - __ bind(&truncate); - __ TruncateHeapNumberToI(input_reg, scratch2); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1, - Operand(at)); - - // Load the double value. - __ Ldc1(double_scratch, - FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - - Register except_flag = scratch2; - __ EmitFPUTruncate(kRoundToZero, - input_reg, - double_scratch, - scratch1, - double_scratch2, - except_flag, - kCheckForInexactConversion); - - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Branch(&done, ne, input_reg, Operand(zero_reg)); - - __ Mfhc1(scratch1, double_scratch); - __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, - Operand(zero_reg)); - } - } - __ bind(&done); -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI final : public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LTaggedToI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - DCHECK(input->Equals(instr->result())); - - Register input_reg = ToRegister(input); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiUntag(input_reg); - } else { - DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); - - // Let the deferred code handle the HeapObject case. - __ JumpIfNotSmi(input_reg, deferred->entry()); - - // Smi to int32 conversion. - __ SmiUntag(input_reg); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - DoubleRegister result_reg = ToDoubleRegister(result); - - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - EmitNumberUntagD(instr, input_reg, result_reg, mode); -} - - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = scratch0(); - DoubleRegister double_input = ToDoubleRegister(instr->value()); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - Register except_flag = LCodeGen::scratch1(); - - __ EmitFPUTruncate(kRoundToMinusInf, - result_reg, - double_input, - scratch1, - double_scratch0(), - except_flag, - kCheckForInexactConversion); - - // Deopt if the operation did not succeed (except_flag != 0). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ Branch(&done, ne, result_reg, Operand(zero_reg)); - __ Mfhc1(scratch1, double_input); - __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, - Operand(zero_reg)); - __ bind(&done); - } - } -} - - -void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = LCodeGen::scratch0(); - DoubleRegister double_input = ToDoubleRegister(instr->value()); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - Register except_flag = LCodeGen::scratch1(); - - __ EmitFPUTruncate(kRoundToMinusInf, - result_reg, - double_input, - scratch1, - double_scratch0(), - except_flag, - kCheckForInexactConversion); - - // Deopt if the operation did not succeed (except_flag != 0). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ Branch(&done, ne, result_reg, Operand(zero_reg)); - __ Mfhc1(scratch1, double_input); - __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, - Operand(zero_reg)); - __ bind(&done); - } - } - __ SmiTagCheckOverflow(result_reg, result_reg, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch1, - Operand(zero_reg)); -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->value(); - __ SmiTst(ToRegister(input), at); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg)); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - LOperand* input = instr->value(); - __ SmiTst(ToRegister(input), at); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); - } -} - - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - Register view = ToRegister(instr->view()); - Register scratch = scratch0(); - - __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); - __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); - __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at, - Operand(zero_reg)); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - Register scratch = scratch0(); - - __ GetObjectType(input, scratch, scratch); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch, - Operand(first)); - } else { - DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch, - Operand(first)); - // Omit check for the last type. - if (last != LAST_TYPE) { - DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch, - Operand(last)); - } - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); - __ And(at, scratch, mask); - DeoptimizeIf(tag == 0 ? ne : eq, instr, - DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg)); - } else { - __ And(scratch, scratch, Operand(mask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch, - Operand(tag)); - } - } -} - - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Register reg = ToRegister(instr->value()); - Handle object = instr->hydrogen()->object().handle(); - AllowDeferredHandleDereference smi_check; - if (isolate()->heap()->InNewSpace(*object)) { - Register reg = ToRegister(instr->value()); - Handle cell = isolate()->factory()->NewCell(object); - __ li(at, Operand(cell)); - __ lw(at, FieldMemOperand(at, Cell::kValueOffset)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at)); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, - Operand(object)); - } -} - - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); - __ lw(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset)); - __ And(at, scratch0(), Operand(Map::Deprecated::kMask)); - __ Branch(&deopt, eq, at, Operand(zero_reg)); - - { - PushSafepointRegistersScope scope(this); - __ push(object); - __ mov(cp, zero_reg); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters( - instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(v0, scratch0()); - } - __ SmiTst(scratch0(), at); - __ Branch(&done, ne, at, Operand(zero_reg)); - - __ bind(&deopt); - // In case of "al" condition the operands are not used so just pass zero_reg - // there. - DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg, - Operand(zero_reg)); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps final : public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - void Generate() override { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - LInstruction* instr() override { return instr_; } - - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - Register map_reg = scratch0(); - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register reg = ToRegister(input); - __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, reg); - __ bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMapAndBranch(map_reg, map, &success, eq, &success); - } - Handle map = maps->at(maps->size() - 1).handle(); - // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). - if (instr->hydrogen()->HasMigrationTarget()) { - __ Branch(deferred->entry(), ne, map_reg, Operand(map)); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map)); - } - - __ bind(&success); -} - - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); - __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - Register unclamped_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampUint8(result_reg, unclamped_reg); -} - - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - Register scratch = scratch0(); - Register input_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); - Label is_smi, done, heap_number; - - // Both smi and heap number cases are handled. - __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); - - // Check for heap number - __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg, - Operand(factory()->undefined_value())); - __ mov(result_reg, zero_reg); - __ jmp(&done); - - // Heap number - __ bind(&heap_number); - __ Ldc1(double_scratch0(), - FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); - __ jmp(&done); - - __ bind(&is_smi); - __ ClampUint8(result_reg, scratch); - - __ bind(&done); -} - - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate final : public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, LAllocate* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredAllocate(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = - new(zone()) DeferredAllocate(this, instr); - - Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } else { - Register size = ToRegister(instr->size()); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } - - __ bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - STATIC_ASSERT(kHeapObjectTag == 1); - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ li(scratch, Operand(size - kHeapObjectTag)); - } else { - __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); - } - __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); - Label loop; - __ bind(&loop); - __ Subu(scratch, scratch, Operand(kPointerSize)); - __ Addu(at, result, Operand(scratch)); - __ sw(scratch2, MemOperand(at)); - __ Branch(&loop, ge, scratch, Operand(zero_reg)); - } -} - - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, zero_reg); - - PushSafepointRegistersScope scope(this); - if (instr->size()->IsRegister()) { - Register size = ToRegister(instr->size()); - DCHECK(!size.is(result)); - __ SmiTag(size); - __ push(size); - } else { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - if (size >= 0 && size <= Smi::kMaxValue) { - __ Push(Smi::FromInt(size)); - } else { - // We should never get here at runtime => abort - __ stop("invalid allocation size"); - return; - } - } - - int flags = AllocateDoubleAlignFlag::encode( - instr->hydrogen()->MustAllocateDoubleAligned()); - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ Push(Smi::FromInt(flags)); - - CallRuntimeFromDeferred( - Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); - __ StoreToSafepointRegisterSlot(v0, result); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - Register top_address = scratch0(); - __ Subu(v0, v0, Operand(kHeapObjectTag)); - __ li(top_address, Operand(allocation_top)); - __ sw(v0, MemOperand(top_address)); - __ Addu(v0, v0, Operand(kHeapObjectTag)); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register scratch1 = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->value()).is(a3)); - DCHECK(ToRegister(instr->result()).is(v0)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ li(v0, Operand(isolate()->factory()->number_string())); - __ jmp(&end); - __ bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ bind(&end); -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->value()); - - Register cmp1 = no_reg; - Operand cmp2 = Operand(no_reg); - - Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), - instr->FalseLabel(chunk_), - input, - instr->type_literal(), - &cmp1, - &cmp2); - - DCHECK(cmp1.is_valid()); - DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid()); - - if (final_branch_condition != kNoCondition) { - EmitBranch(instr, final_branch_condition, cmp1, cmp2); - } -} - - -Condition LCodeGen::EmitTypeofIs(Label* true_label, - Label* false_label, - Register input, - Handle type_name, - Register* cmp1, - Operand* cmp2) { - // This function utilizes the delay slot heavily. This is used to load - // values that are always usable without depending on the type of the input - // register. - Condition final_branch_condition = kNoCondition; - Register scratch = scratch0(); - Factory* factory = isolate()->factory(); - if (String::Equals(type_name, factory->number_string())) { - __ JumpIfSmi(input, true_label); - __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - *cmp1 = input; - *cmp2 = Operand(at); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->string_string())) { - __ JumpIfSmi(input, false_label); - __ GetObjectType(input, input, scratch); - *cmp1 = scratch; - *cmp2 = Operand(FIRST_NONSTRING_TYPE); - final_branch_condition = lt; - - } else if (String::Equals(type_name, factory->symbol_string())) { - __ JumpIfSmi(input, false_label); - __ GetObjectType(input, input, scratch); - *cmp1 = scratch; - *cmp2 = Operand(SYMBOL_TYPE); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->boolean_string())) { - __ LoadRoot(at, Heap::kTrueValueRootIndex); - __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); - __ LoadRoot(at, Heap::kFalseValueRootIndex); - *cmp1 = at; - *cmp2 = Operand(input); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->undefined_string())) { - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input)); - // The first instruction of JumpIfSmi is an And - it is safe in the delay - // slot. - __ JumpIfSmi(input, false_label); - // Check for undetectable objects => true. - __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); - __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); - __ And(at, at, 1 << Map::kIsUndetectable); - *cmp1 = at; - *cmp2 = Operand(zero_reg); - final_branch_condition = ne; - - } else if (String::Equals(type_name, factory->function_string())) { - __ JumpIfSmi(input, false_label); - __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ And(scratch, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - *cmp1 = scratch; - *cmp2 = Operand(1 << Map::kIsCallable); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->object_string())) { - __ JumpIfSmi(input, false_label); - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ GetObjectType(input, scratch, scratch1()); - __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE)); - // Check for callable or undetectable objects => false. - __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ And(at, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - *cmp1 = at; - *cmp2 = Operand(zero_reg); - final_branch_condition = eq; - - } else { - *cmp1 = at; - *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion. - __ Branch(false_label); - } - - return final_branch_condition; -} - - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - DCHECK_EQ(0, padding_size % Assembler::kInstrSize); - while (padding_size > 0) { - __ nop(); - padding_size -= Assembler::kInstrSize; - } - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && type == Deoptimizer::EAGER) { - type = Deoptimizer::LAZY; - } - - DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg, - Operand(zero_reg)); -} - - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck final : public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStackCheck(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - __ LoadRoot(at, Heap::kStackLimitRootIndex); - __ Branch(&done, hs, sp, Operand(at)); - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(cp)); - CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new(zone()) DeferredStackCheck(this, instr); - __ LoadRoot(at, Heap::kStackLimitRootIndex); - __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - Register result = ToRegister(instr->result()); - Register object = ToRegister(instr->object()); - - Label use_cache, call_runtime; - DCHECK(object.is(a0)); - __ CheckEnumCache(&call_runtime); - - __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset)); - __ Branch(&use_cache); - - // Get the set of properties to enumerate. - __ bind(&call_runtime); - __ push(object); - CallRuntime(Runtime::kForInEnumerate, instr); - __ bind(&use_cache); -} - - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - __ EnumLength(result, map); - __ Branch(&load_cache, ne, result, Operand(Smi::kZero)); - __ li(result, Operand(isolate()->factory()->empty_fixed_array())); - __ jmp(&done); - - __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ lw(result, - FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ lw(result, - FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); - DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result, - Operand(zero_reg)); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - Register map = ToRegister(instr->map()); - __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map, - Operand(scratch0())); -} - - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, - Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ Push(object, index); - __ mov(cp, zero_reg); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters( - instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(v0, result); -} - - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, - LLoadFieldByIndex* instr, - Register result, - Register object, - Register index) - : LDeferredCode(codegen), - instr_(instr), - result_(result), - object_(object), - index_(index) { - } - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register result_; - Register object_; - Register index_; - }; - - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - DeferredLoadMutableDouble* deferred; - deferred = new(zone()) DeferredLoadMutableDouble( - this, instr, result, object, index); - - Label out_of_object, done; - - __ And(scratch, index, Operand(Smi::FromInt(1))); - __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg)); - __ sra(index, index, 1); - - __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg)); - __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot. - - STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); - __ Addu(scratch, object, scratch); - __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); - - __ Branch(&done); - - __ bind(&out_of_object); - __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - // Index is equal to negated out of object property index plus 1. - __ Subu(scratch, result, scratch); - __ lw(result, FieldMemOperand(scratch, - FixedArray::kHeaderSize - kPointerSize)); - __ bind(deferred->exit()); - __ bind(&done); -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/mips/lithium-codegen-mips.h b/src/crankshaft/mips/lithium-codegen-mips.h deleted file mode 100644 index 7d471ebbb8..0000000000 --- a/src/crankshaft/mips/lithium-codegen-mips.h +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_ -#define V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_ - -#include "src/ast/scopes.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/crankshaft/mips/lithium-gap-resolver-mips.h" -#include "src/crankshaft/mips/lithium-mips.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class SafepointGenerator; - -class LCodeGen: public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - RAStatus GetRAState() const { - return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved; - } - - // Support for converting LOperands to assembler types. - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - FloatRegister flt_scratch, - DoubleRegister dbl_scratch); - int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; - int32_t ToInteger32(LConstantOperand* op) const; - Smi* ToSmi(LConstantOperand* op) const; - double ToDouble(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Returns a MemOperand pointing to the high word of a DoubleStackSlot. - MemOperand ToHighMemOperand(LOperand* op) const; - - bool IsInteger32(LConstantOperand* op) const; - bool IsSmi(LConstantOperand* op) const; - Handle ToHandle(LConstantOperand* op) const; - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - void DoDeferredNumberTagD(LNumberTagD* instr); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - void DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2, - IntegerSignedness signedness); - - void DoDeferredTaggedToI(LTaggedToI* instr); - void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocate(LAllocate* instr); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, - Register object, - Register index); - - // Parallel move support. - void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); - - MemOperand PrepareKeyedOperand(Register key, - Register base, - bool key_is_constant, - int constant_key, - int element_size, - int shift_size, - int base_offset); - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - Scope* scope() const { return scope_; } - - Register scratch0() { return kLithiumScratchReg; } - Register scratch1() { return kLithiumScratchReg2; } - DoubleRegister double_scratch0() { return kLithiumScratchDouble; } - - LInstruction* GetNextInstruction(); - - void EmitClassOfTest(Label* if_true, Label* if_false, - Handle class_name, Register input, - Register temporary, Register temporary2); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - void SaveCallerDoubles(); - void RestoreCallerDoubles(); - - // Code generation passes. Returns true if code generation should - // continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS - }; - - void CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr); - - void CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode); - - void CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, - int num_arguments, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, num_arguments, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void LoadContextFromDeferred(LOperand* context); - void CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in a1. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode); - - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type, - Register src1 = zero_reg, - const Operand& src2 = Operand(zero_reg)); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason, - Register src1 = zero_reg, - const Operand& src2 = Operand(zero_reg)); - - void AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - Register ToRegister(int index) const; - DoubleRegister ToDoubleRegister(int index) const; - - MemOperand BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding); - - void EmitIntegerMathAbs(LMathAbs* instr); - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); - - static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block); - - // EmitBranch expects to be the last instruction of a block. - template - void EmitBranch(InstrType instr, - Condition condition, - Register src1, - const Operand& src2); - template - void EmitBranchF(InstrType instr, - Condition condition, - FPURegister src1, - FPURegister src2); - template - void EmitTrueBranch(InstrType instr, Condition condition, Register src1, - const Operand& src2); - template - void EmitFalseBranch(InstrType instr, Condition condition, Register src1, - const Operand& src2); - template - void EmitFalseBranchF(InstrType instr, - Condition condition, - FPURegister src1, - FPURegister src2); - void EmitCmpI(LOperand* left, LOperand* right); - void EmitNumberUntagD(LNumberUntagD* instr, Register input, - DoubleRegister result, NumberUntagDMode mode); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - // Returns two registers in cmp1 and cmp2 that can be used in the - // Branch instruction after EmitTypeofIs. - Condition EmitTypeofIs(Label* true_label, - Label* false_label, - Register input, - Handle type_name, - Register* cmp1, - Operand* cmp2); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed); - - // Emits optimized code to deep-copy the contents of statically known - // object graphs (e.g. object literal boilerplate). - void EmitDeepCopy(Handle object, - Register result, - Register source, - int* offset, - AllocationSiteMode mode); - // Emit optimized code for integer division. - // Inputs are signed. - // All registers are clobbered. - // If 'remainder' is no_reg, it is not computed. - void EmitSignedIntegerDivisionByConstant(Register result, - Register dividend, - int32_t divisor, - Register remainder, - Register scratch, - LEnvironment* environment); - - - void EnsureSpaceForLazyDeopt(int space_needed) override; - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); - - template - void EmitVectorLoadICRegisters(T* instr); - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - class PushSafepointRegistersScope final BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen); - - ~PushSafepointRegistersScope(); - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode : public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() {} - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; - int instruction_index_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_ diff --git a/src/crankshaft/mips/lithium-gap-resolver-mips.cc b/src/crankshaft/mips/lithium-gap-resolver-mips.cc deleted file mode 100644 index 12e1ae77e9..0000000000 --- a/src/crankshaft/mips/lithium-gap-resolver-mips.cc +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/mips/lithium-gap-resolver-mips.h" - -#include "src/crankshaft/mips/lithium-codegen-mips.h" - -namespace v8 { -namespace internal { - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), - moves_(32, owner->zone()), - root_index_(0), - in_cycle_(false), - saved_destination_(NULL) {} - - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(moves_.is_empty()); - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - root_index_ = i; // Any cycle is found when by reaching this move again. - PerformMove(i); - if (in_cycle_) { - RestoreValue(); - } - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated()) { - DCHECK(moves_[i].source()->IsConstantOperand()); - EmitMove(i); - } - } - - moves_.Rewind(0); -} - - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); - } - Verify(); -} - - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. - - // We can only find a cycle, when doing a depth-first traversal of moves, - // be encountering the starting move again. So by spilling the source of - // the starting move, we break the cycle. All moves are then unblocked, - // and the starting move is completed by writing the spilled value to - // its destination. All other moves from the spilled source have been - // completed prior to breaking the cycle. - // An additional complication is that moves to MemOperands with large - // offsets (more than 1K or 4K) require us to spill this spilled value to - // the stack, to free up the register. - DCHECK(!moves_[index].IsPending()); - DCHECK(!moves_[index].IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved in a stack allocated local. Multiple moves can - // be pending because this function is recursive. - DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. - LOperand* destination = moves_[index].destination(); - moves_[index].set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - PerformMove(i); - // If there is a blocking, pending move it must be moves_[root_index_] - // and all other moves with the same source as moves_[root_index_] are - // sucessfully executed (because they are cycle-free) by this loop. - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - moves_[index].set_destination(destination); - - // The move may be blocked on a pending move, which must be the starting move. - // In this case, we have a cycle, and we save the source of this move to - // a scratch register to break it. - LMoveOperands other_move = moves_[root_index_]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - BreakCycle(index); - return; - } - - // This move is no longer blocked. - EmitMove(index); -} - - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - -#define __ ACCESS_MASM(cgen_->masm()) - -void LGapResolver::BreakCycle(int index) { - // We save in a register the value that should end up in the source of - // moves_[root_index]. After performing all moves in the tree rooted - // in that move, we save the value to that source. - DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); - DCHECK(!in_cycle_); - in_cycle_ = true; - LOperand* source = moves_[index].source(); - saved_destination_ = moves_[index].destination(); - if (source->IsRegister()) { - __ mov(kLithiumScratchReg, cgen_->ToRegister(source)); - } else if (source->IsStackSlot()) { - __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source)); - } else if (source->IsDoubleRegister()) { - __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); - } else if (source->IsDoubleStackSlot()) { - __ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); - } else { - UNREACHABLE(); - } - // This move will be done by restoring the saved value to the destination. - moves_[index].Eliminate(); -} - - -void LGapResolver::RestoreValue() { - DCHECK(in_cycle_); - DCHECK(saved_destination_ != NULL); - - // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble. - if (saved_destination_->IsRegister()) { - __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg); - } else if (saved_destination_->IsStackSlot()) { - __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_)); - } else if (saved_destination_->IsDoubleRegister()) { - __ mov_d(cgen_->ToDoubleRegister(saved_destination_), - kLithiumScratchDouble); - } else if (saved_destination_->IsDoubleStackSlot()) { - __ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_)); - } else { - UNREACHABLE(); - } - - in_cycle_ = false; - saved_destination_ = NULL; -} - - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - - if (source->IsRegister()) { - Register source_register = cgen_->ToRegister(source); - if (destination->IsRegister()) { - __ mov(cgen_->ToRegister(destination), source_register); - } else { - DCHECK(destination->IsStackSlot()); - __ sw(source_register, cgen_->ToMemOperand(destination)); - } - } else if (source->IsStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsRegister()) { - __ lw(cgen_->ToRegister(destination), source_operand); - } else { - DCHECK(destination->IsStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - if (!destination_operand.OffsetIsInt16Encodable()) { - // 'at' is overwritten while saving the value to the destination. - // Therefore we can't use 'at'. It is OK if the read from the source - // destroys 'at', since that happens before the value is read. - // This uses only a single reg of the double reg-pair. - __ lwc1(kLithiumScratchDouble, source_operand); - __ swc1(kLithiumScratchDouble, destination_operand); - } else { - __ lw(at, source_operand); - __ sw(at, destination_operand); - } - } else { - __ lw(kLithiumScratchReg, source_operand); - __ sw(kLithiumScratchReg, destination_operand); - } - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - Representation r = cgen_->IsSmi(constant_source) - ? Representation::Smi() : Representation::Integer32(); - if (cgen_->IsInteger32(constant_source)) { - __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r))); - } else { - __ li(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - DoubleRegister result = cgen_->ToDoubleRegister(destination); - double v = cgen_->ToDouble(constant_source); - __ Move(result, v); - } else { - DCHECK(destination->IsStackSlot()); - DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. - Representation r = cgen_->IsSmi(constant_source) - ? Representation::Smi() : Representation::Integer32(); - if (cgen_->IsInteger32(constant_source)) { - __ li(kLithiumScratchReg, - Operand(cgen_->ToRepresentation(constant_source, r))); - } else { - __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source)); - } - __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsDoubleRegister()) { - DoubleRegister source_register = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - __ mov_d(cgen_->ToDoubleRegister(destination), source_register); - } else { - DCHECK(destination->IsDoubleStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - __ Sdc1(source_register, destination_operand); - } - - } else if (source->IsDoubleStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsDoubleRegister()) { - __ Ldc1(cgen_->ToDoubleRegister(destination), source_operand); - } else { - DCHECK(destination->IsDoubleStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - // kLithiumScratchDouble was used to break the cycle, - // but kLithiumScratchReg is free. - MemOperand source_high_operand = - cgen_->ToHighMemOperand(source); - MemOperand destination_high_operand = - cgen_->ToHighMemOperand(destination); - __ lw(kLithiumScratchReg, source_operand); - __ sw(kLithiumScratchReg, destination_operand); - __ lw(kLithiumScratchReg, source_high_operand); - __ sw(kLithiumScratchReg, destination_high_operand); - } else { - __ Ldc1(kLithiumScratchDouble, source_operand); - __ Sdc1(kLithiumScratchDouble, destination_operand); - } - } - } else { - UNREACHABLE(); - } - - moves_[index].Eliminate(); -} - - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/mips/lithium-gap-resolver-mips.h b/src/crankshaft/mips/lithium-gap-resolver-mips.h deleted file mode 100644 index 6c5fd037a3..0000000000 --- a/src/crankshaft/mips/lithium-gap-resolver-mips.h +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_ -#define V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class LGapResolver final BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // If a cycle is found in the series of moves, save the blocking value to - // a scratch register. The cycle must be found by hitting the root of the - // depth-first search. - void BreakCycle(int index); - - // After a cycle has been resolved, restore the value from the scratch - // register to its proper destination. - void RestoreValue(); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Verify the move list before performing moves. - void Verify(); - - LCodeGen* cgen_; - - // List of moves not yet resolved. - ZoneList moves_; - - int root_index_; - bool in_cycle_; - LOperand* saved_destination_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_ diff --git a/src/crankshaft/mips/lithium-mips.cc b/src/crankshaft/mips/lithium-mips.cc deleted file mode 100644 index b38f4a3fe4..0000000000 --- a/src/crankshaft/mips/lithium-mips.cc +++ /dev/null @@ -1,2329 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/mips/lithium-mips.h" - -#include - -#if V8_TARGET_ARCH_MIPS - -#include "src/crankshaft/lithium-inl.h" -#include "src/crankshaft/mips/lithium-codegen-mips.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || - LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || - operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); - } -} -#endif - - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-d"; - case Token::SUB: return "sub-d"; - case Token::MUL: return "mul-d"; - case Token::DIV: return "div-d"; - case Token::MOD: return "mod-d"; - default: - UNREACHABLE(); - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-t"; - case Token::SUB: return "sub-t"; - case Token::MUL: return "mul-t"; - case Token::MOD: return "mod-t"; - case Token::DIV: return "div-t"; - case Token::BIT_AND: return "bit-and-t"; - case Token::BIT_OR: return "bit-or-t"; - case Token::BIT_XOR: return "bit-xor-t"; - case Token::ROR: return "ror-t"; - case Token::SHL: return "sll-t"; - case Token::SAR: return "sra-t"; - case Token::SHR: return "srl-t"; - default: - UNREACHABLE(); - } -} - - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new(zone()) LDebugBreak(); -} - - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), - true_block_id(), false_block_id()); -} - - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - stream->Add(" length "); - length()->PrintTo(stream); - stream->Add(" index "); - index()->PrintTo(stream); -} - - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access() << " <- "; - stream->Add(os.str().c_str()); - value()->PrintTo(stream); -} - - -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", base_offset()); - } else { - stream->Add("]"); - } -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", base_offset()); - } else { - stream->Add("] <- "); - } - - if (value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - value()->PrintTo(stream); - } -} - - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); -} - - -int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { - // Skip a slot if for a double-width slot. - if (kind == DOUBLE_REGISTERS) current_frame_slots_++; - return current_frame_slots_++; -} - - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - int index = GetNextSpillIndex(kind); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new(zone()) LPlatformChunk(info(), graph()); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) { - return Use(value, ToUnallocated(reg)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE)); -} - - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - - -LOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, int index) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed( - LTemplateResultInstruction<1>* instr, Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixedDouble( - LTemplateResultInstruction<1>* instr, DoubleRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = - (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(zone())); - return instr; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LUnallocated* LChunkBuilder::TempDoubleRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new(zone()) LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new(zone()) LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); - } - } else { - right = UseRegisterAtStart(right_value); - } - - // Shift operations can only deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result = - DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; - } else { - return DoArithmeticT(op, instr); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (op == Token::MOD) { - LOperand* left = UseFixedDouble(instr->left(), f2); - LOperand* right = UseFixedDouble(instr->right(), f4); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - // We call a C function for double modulo. It can't trigger a GC. We need - // to use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - return MarkAsCall(DefineFixedDouble(result, f2), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineAsRegister(result); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - HValue* left = instr->left(); - HValue* right = instr->right(); - DCHECK(left->representation().IsTagged()); - DCHECK(right->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left_operand = UseFixed(left, a1); - LOperand* right_operand = UseFixed(right, a0); - LArithmeticT* result = - new(zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - DCHECK(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new(zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new(zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new(zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - instr = new(zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { -// Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()); -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - ToBooleanHints expected = instr->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || - type.IsJSArray() || type.IsHeapNumber() || type.IsString(); - LInstruction* branch = new(zone()) LBranch(UseRegister(value)); - if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) && - (expected & ToBooleanHint::kNeedsMap)) || - expected != ToBooleanHint::kAny)) { - branch = AssignEnvironment(branch); - } - return branch; -} - - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new(zone()) LCmpMapAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister( - new(zone()) LArgumentsLength(UseRegister(length->value()))); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsElements); -} - - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LHasInPrototypeChainAndBranch* result = - new (zone()) LHasInPrototypeChainAndBranch(object, prototype); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegisterAtStart(instr->receiver()); - LOperand* function = UseRegisterAtStart(instr->function()); - LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), a1); - LOperand* receiver = UseFixed(instr->receiver(), a0); - LOperand* length = UseFixed(instr->length(), a2); - LOperand* elements = UseFixed(instr->elements(), a3); - LApplyArguments* result = new(zone()) LApplyArguments(function, - receiver, - length, - elements); - return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - for (int i = 0; i < argc; ++i) { - LOperand* argument = Use(instr->argument(i)); - AddInstruction(new(zone()) LPushArgument(argument), instr); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoStoreCodeEntry( - HStoreCodeEntry* store_code_entry) { - LOperand* function = UseRegister(store_code_entry->function()); - LOperand* code_object = UseTempRegister(store_code_entry->code_object()); - return new(zone()) LStoreCodeEntry(function, code_object); -} - - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister( - new(zone()) LInnerAllocatedObject(base_object, offset)); -} - - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() - ? NULL - : DefineAsRegister(new(zone()) LThisFunction); -} - - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new(zone()) LContext, cp); - } - - return DefineAsRegister(new(zone()) LContext); -} - - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); -} - - -LInstruction* LChunkBuilder::DoCallWithDescriptor( - HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), cp); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - for (; i < descriptor.GetParameterCount(); i++) { - op = UseAny(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - AddInstruction(new (zone()) LPushArgument(op), instr); - } - - LCallWithDescriptor* result = new(zone()) LCallWithDescriptor( - descriptor, ops, zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* function = UseFixed(instr->function(), a1); - LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathFloor: - return DoMathFloor(instr); - case kMathRound: - return DoMathRound(instr); - case kMathFround: - return DoMathFround(instr); - case kMathAbs: - return DoMathAbs(instr); - case kMathLog: - return DoMathLog(instr); - case kMathCos: - return DoMathCos(instr); - case kMathSin: - return DoMathSin(instr); - case kMathExp: - return DoMathExp(instr); - case kMathSqrt: - return DoMathSqrt(instr); - case kMathPowHalf: - return DoMathPowHalf(instr); - case kMathClz32: - return DoMathClz32(instr); - default: - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), f4); - return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr); -} - - -LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathClz32* result = new(zone()) LMathClz32(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), f4); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), f4), instr); -} - -LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), f4); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), f4), instr); -} - -LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), f4); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), f4), instr); -} - - -LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { - // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf. - LOperand* input = UseFixedDouble(instr->value(), f8); - LOperand* temp = TempDoubleRegister(); - LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp); - return DefineFixedDouble(result, f4); -} - - -LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { - Representation r = instr->value()->representation(); - LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) - ? NULL - : UseFixed(instr->context(), cp); - LOperand* input = UseRegister(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LMathAbs(context, input)); - if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LOperand* temp = TempRegister(); - LMathFloor* result = new(zone()) LMathFloor(input, temp); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); -} - - -LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathSqrt* result = new(zone()) LMathSqrt(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LOperand* temp = TempDoubleRegister(); - LMathRound* result = new(zone()) LMathRound(input, temp); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* constructor = UseFixed(instr->constructor(), a1); - LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr); -} - - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - return DefineAsRegister(new(zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI( - dividend, divisor)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = TempRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow) && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) || - (!instr->IsMathFloorOfDiv() && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? - NULL : TempRegister(); - LInstruction* result = DefineAsRegister( - new(zone()) LFlooringDivByConstI(dividend, divisor, temp)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = - DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow))) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( - dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LModByConstI( - dividend, divisor)); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = DefineAsRegister(new(zone()) LModI( - dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr); - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - HValue* left = instr->BetterLeftOperand(); - HValue* right = instr->BetterRightOperand(); - LOperand* left_op; - LOperand* right_op; - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); - - int32_t constant_value = 0; - if (right->IsConstant()) { - HConstant* constant = HConstant::cast(right); - constant_value = constant->Integer32Value(); - // Constants -1, 0 and 1 can be optimized if the result can overflow. - // For other constants, it can be optimized only without overflow. - if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) { - left_op = UseRegisterAtStart(left); - right_op = UseConstant(right); - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - LMulI* mul = new(zone()) LMulI(left_op, right_op); - if (right_op->IsConstantOperand() - ? ((can_overflow && constant_value == -1) || - (bailout_on_minus_zero && constant_value <= 0)) - : (can_overflow || bailout_on_minus_zero)) { - AssignEnvironment(mul); - } - return DefineAsRegister(mul); - - } else if (instr->representation().IsDouble()) { - if (IsMipsArchVariant(kMips32r2)) { - if (instr->HasOneUse() && instr->uses().value()->IsAdd()) { - HAdd* add = HAdd::cast(instr->uses().value()); - if (instr == add->left()) { - // This mul is the lhs of an add. The add and mul will be folded - // into a multiply-add. - return NULL; - } - if (instr == add->right() && !add->left()->IsMul()) { - // This mul is the rhs of an add, where the lhs is not another mul. - // The add and mul will be folded into a multiply-add. - return NULL; - } - } - } - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LSubI* sub = new(zone()) LSubI(left, right); - LInstruction* result = DefineAsRegister(sub); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { - LOperand* multiplier_op = UseRegisterAtStart(mul->left()); - LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); - LOperand* addend_op = UseRegisterAtStart(addend); - return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op, - multiplicand_op)); -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - LAddI* add = new(zone()) LAddI(left, right); - LInstruction* result = DefineAsRegister(add); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LAddI* add = new(zone()) LAddI(left, right); - LInstruction* result = DefineAsRegister(add); - return result; - } else if (instr->representation().IsDouble()) { - if (IsMipsArchVariant(kMips32r2)) { - if (instr->left()->IsMul()) - return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); - - if (instr->right()->IsMul()) { - DCHECK(!instr->left()->IsMul()); - return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); - } - } - return DoArithmeticD(Token::ADD, instr); - } else { - return DoArithmeticT(Token::ADD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - return DefineAsRegister(new(zone()) LMathMinMax(left, right)); -} - - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - DCHECK(instr->representation().IsDouble()); - // We call a C function for double power. It can't trigger a GC. - // We need to use fixed result register for the call. - Representation exponent_type = instr->right()->representation(); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseFixedDouble(instr->left(), f2); - LOperand* right = - exponent_type.IsDouble() - ? UseFixedDouble(instr->right(), f4) - : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent()); - LPower* result = new(zone()) LPower(left, right); - return MarkAsCall(DefineFixedDouble(result, f0), - instr, - CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), a1); - LOperand* right = UseFixed(instr->right(), a0); - LCmpT* result = new(zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } -} - - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LCmpHoleAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* temp = TempRegister(); - return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()), - temp); -} - - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsSmiAndBranch(Use(instr->value())); -} - - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsUndetectableAndBranch( - UseRegisterAtStart(instr->value()), TempRegister()); -} - - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), a1); - LOperand* right = UseFixed(instr->right(), a0); - LStringCompareAndBranch* result = - new(zone()) LStringCompareAndBranch(context, left, right); - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LHasInstanceTypeAndBranch(value); -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new (zone()) - LClassOfTestAndBranch(UseRegister(instr->value()), TempRegister()); -} - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index)); -} - - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegisterAtStart(instr->index()) - : UseRegisterOrConstantAtStart(instr->index()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL; - return new(zone()) LSeqStringSetChar(context, string, index, value); -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseRegisterOrConstantAtStart(instr->length()) - : UseRegisterAtStart(instr->length()); - LInstruction* result = new(zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { - return NULL; -} - - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new(zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempDoubleRegister(); - LInstruction* result = - DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LUnallocated* result_temp = TempRegister(); - LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); - return AssignPointerMap(Define(result, result_temp)); - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - return AssignEnvironment( - DefineAsRegister(new(zone()) LDoubleToSmi(value))); - } else { - DCHECK(to.IsInteger32()); - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); - if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); - return result; - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - if (!instr->CheckFlag(HValue::kCanOverflow)) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new(zone()) LSmiTag(value)); - } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } else { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); - } else { - return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); - } - } - } - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckInstanceType(value); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckValue(value)); -} - - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - // Revisit this decision, here and 8 lines below. - return DefineAsRegister(new(zone()) LClampDToUint8(reg, - TempDoubleRegister())); - } else if (input_rep.IsInteger32()) { - return DefineAsRegister(new(zone()) LClampIToUint8(reg)); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - LClampTToUint8* result = - new(zone()) LClampTToUint8(reg, TempDoubleRegister()); - return AssignEnvironment(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() - ? UseFixed(instr->context(), cp) - : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn(UseFixed(instr->value(), v0), context, - parameter_count); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new(zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new(zone()) LConstantI); - } else if (r.IsDouble()) { - return DefineAsRegister(new(zone()) LConstantD); - } else if (r.IsExternal()) { - return DefineAsRegister(new(zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new(zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context; - LOperand* value; - if (instr->NeedsWriteBarrier()) { - context = UseTempRegister(instr->context()); - value = UseTempRegister(instr->value()); - } else { - context = UseRegister(instr->context()); - value = UseRegister(instr->value()); - } - LInstruction* result = new(zone()) LStoreContextSlot(context, value); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - LOperand* obj = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LLoadNamedField(obj)); -} - - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - return AssignEnvironment(DefineAsRegister( - new(zone()) LLoadFunctionPrototype(UseRegister(instr->function())))); -} - - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new(zone()) LLoadRoot); -} - - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK(instr->key()->representation().IsSmiOrInteger32()); - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LInstruction* result = NULL; - - if (!instr->is_fixed_typed_array()) { - LOperand* obj = NULL; - if (instr->representation().IsDouble()) { - obj = UseRegister(instr->elements()); - } else { - DCHECK(instr->representation().IsSmiOrTagged()); - obj = UseRegisterAtStart(instr->elements()); - } - result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr)); - } else { - DCHECK( - (instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(elements_kind)) || - (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(elements_kind))); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - result = DefineAsRegister( - new (zone()) LLoadKeyed(backing_store, key, backing_store_owner)); - } - - bool needs_environment; - if (instr->is_fixed_typed_array()) { - // see LCodeGen::DoLoadKeyedExternalArray - needs_environment = elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32); - } else { - // see LCodeGen::DoLoadKeyedFixedDoubleArray and - // LCodeGen::DoLoadKeyedFixedArray - needs_environment = - instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub()); - } - - if (needs_environment) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - if (!instr->is_fixed_typed_array()) { - DCHECK(instr->elements()->representation().IsTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* object = NULL; - LOperand* val = NULL; - LOperand* key = NULL; - - if (instr->value()->representation().IsDouble()) { - object = UseRegisterAtStart(instr->elements()); - key = UseRegisterOrConstantAtStart(instr->key()); - val = UseRegister(instr->value()); - } else { - DCHECK(instr->value()->representation().IsSmiOrTagged()); - if (needs_write_barrier) { - object = UseTempRegister(instr->elements()); - val = UseTempRegister(instr->value()); - key = UseTempRegister(instr->key()); - } else { - object = UseRegisterAtStart(instr->elements()); - val = UseRegisterAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } - } - - return new (zone()) LStoreKeyed(object, key, val, nullptr); - } - - DCHECK( - (instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); - DCHECK(instr->elements()->representation().IsExternal()); - LOperand* val = UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner); -} - - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LOperand* new_map_reg = TempRegister(); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); - return result; - } else { - LOperand* object = UseFixed(instr->object(), a0); - LOperand* context = UseFixed(instr->context(), cp); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, context, NULL); - return MarkAsCall(result, instr); - } -} - - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp = TempRegister(); - LTrapAllocationMemento* result = - new(zone()) LTrapAllocationMemento(object, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = Use(instr->object()); - LOperand* elements = Use(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, v0); - return AssignPointerMap(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool is_in_object = instr->access().IsInobject(); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = instr->has_transition() && - instr->NeedsWriteBarrierForMap(); - - LOperand* obj; - if (needs_write_barrier) { - obj = is_in_object - ? UseRegister(instr->object()) - : UseTempRegister(instr->object()); - } else { - obj = needs_write_barrier_for_map - ? UseRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - } - - LOperand* val; - if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - } else if (instr->field_representation().IsDouble()) { - val = UseRegisterAtStart(instr->value()); - } else { - val = UseRegister(instr->value()); - } - - // We need a temporary register for write barrier of the map field. - LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; - - return new(zone()) LStoreNamedField(obj, val, temp); -} - - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), a1); - LOperand* right = UseFixed(instr->right(), a0); - return MarkAsCall( - DefineFixed(new(zone()) LStringAdd(context, left, right), v0), - instr); -} - - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseTempRegister(instr->string()); - LOperand* index = UseTempRegister(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new(zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new(zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = UseRegisterOrConstant(instr->size()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - if (instr->IsAllocationFolded()) { - LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new(zone()) LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new(zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk()->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kTooManySpillSlotsNeededForOSR); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseRegisterOrConstantAtStart(instr->length()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* value = UseFixed(instr->value(), a3); - LTypeof* result = new (zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); -} - - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new(zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new(zone()) LStackCheck(context))); - } -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new(zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = current_block_->last_environment()-> - DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - - return pop; -} - - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->enumerable(), a0); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map))); -} - - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegisterAtStart(instr->map()); - return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); -} - - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* index = UseTempRegister(instr->index()); - LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/crankshaft/mips/lithium-mips.h b/src/crankshaft/mips/lithium-mips.h deleted file mode 100644 index c7fbfafa2e..0000000000 --- a/src/crankshaft/mips/lithium-mips.h +++ /dev/null @@ -1,2450 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_ -#define V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(MathAbs) \ - V(MathCos) \ - V(MathSin) \ - V(MathExp) \ - V(MathClz32) \ - V(MathFloor) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(MultiplyAddD) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArgument) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { \ - return H##type::cast(hydrogen_value()); \ - } - - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) { - } - - virtual ~LInstruction() {} - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { - // Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) - kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - - // Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall(); - } - - // Interface to the register allocator and iterators. - bool IsMarkedAsCall() const { return IsCall(); } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - -#ifdef DEBUG - void VerifyCall(); -#endif - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - - private: - // Iterator interface. - friend class InputIterator; - - friend class TempIterator; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - class IsCallBits: public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int bit_field_; -}; - - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return R != 0 && result() != NULL; } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - protected: - EmbeddedContainer results_; -}; - - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) - : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const final { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new(zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - - private: - HBasicBlock* block_; -}; - - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - LLazyBailout() : gap_instructions_size_(0) { } - - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") - - void set_gap_instructions_size(int gap_instructions_size) { - gap_instructions_size_ = gap_instructions_size; - } - int gap_instructions_size() { return gap_instructions_size_; } - - private: - int gap_instructions_size_; -}; - - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { - inputs_[0] = value; - } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) - : LGap(block), replacement_(NULL) { } - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -template -class LControlInstruction : public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) { } - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } - - Label* false_label_; - Label* true_label_; -}; - - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> { - public: - LWrapReceiver(LOperand* receiver, LOperand* function) { - inputs_[0] = receiver; - inputs_[1] = function; - } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } -}; - - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, - LOperand* receiver, - LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } -}; - - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { - inputs_[0] = elements; - } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LModByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModI final : public LTemplateInstruction<1, 2, 3> { - public: - LModI(LOperand* left, - LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor) { - inputs_[0] = dividend; - inputs_[1] = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - - -class LMulI final : public LTemplateInstruction<1, 2, 0> { - public: - LMulI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -// Instruction for computing multiplier * multiplicand + addend. -class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> { - public: - LMultiplyAddD(LOperand* addend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = addend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* addend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") -}; - - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { - return hydrogen()->representation().IsDouble(); - } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LMathFloor final : public LTemplateInstruction<1, 1, 1> { - public: - LMathFloor(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathRound final : public LTemplateInstruction<1, 1, 1> { - public: - LMathRound(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathFround final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFround(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - - -class LMathAbs final : public LTemplateInstruction<1, 2, 0> { - public: - LMathAbs(LOperand* context, LOperand* value) { - inputs_[1] = context; - inputs_[0] = value; - } - - LOperand* context() { return inputs_[1]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathLog final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathLog(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - - -class LMathClz32 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathClz32(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - -class LMathCos final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathCos(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - -class LMathSin final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSin(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - -class LMathExp final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathExp(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - - -class LMathSqrt final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSqrt(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - - -class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> { - public: - LMathPowHalf(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) -}; - - -class LCmpHoleAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LHasInstanceTypeAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 1> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) { - inputs_[0] = object; - inputs_[1] = prototype; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return hydrogen()->op(); } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) -}; - - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - - -class LConstantD final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - double value() const { return hydrogen()->DoubleValue(); } - uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); } -}; - - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - - -class LBranch final : public LControlInstruction<1, 0> { - public: - explicit LBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LCmpMapAndBranch final : public LControlInstruction<1, 1> { - public: - LCmpMapAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> { - public: - LSeqStringGetChar(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; - } - - LOperand* string() const { return inputs_[0]; } - LOperand* index() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> { - public: - LSeqStringSetChar(LOperand* context, - LOperand* string, - LOperand* index, - LOperand* value) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - } - - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LMathMinMax final : public LTemplateInstruction<1, 2, 0> { - public: - LMathMinMax(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) - : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, - LOperand* context, - LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - Token::Value op() const { return op_; } - - Opcode opcode() const final { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - LOperand* value() { return inputs_[0]; } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - LOperand* parameter_count() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") -}; - - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadFunctionPrototype(LOperand* function) { - inputs_[0] = function; - } - - LOperand* function() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) -}; - - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - - -class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = backing_store_owner; - } - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* backing_store_owner() { return inputs_[2]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreContextSlot(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LPushArgument final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LPushArgument(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) { } - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object) { - inputs_[0] = function; - inputs_[1] = code_object; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : descriptor_(descriptor), - inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - const CallInterfaceDescriptor descriptor() { return descriptor_; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - CallInterfaceDescriptor descriptor_; - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LUint32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - - -class LNumberTagI final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") -}; - - -class LNumberTagU final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - - -class LNumberTagD final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToI(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 2> { - public: - LTaggedToI(LOperand* value, - LOperand* temp, - LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LNumberUntagD(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) - : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - bool needs_check() const { return needs_check_; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - private: - bool needs_check_; -}; - - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { - inputs_[0] = object; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; - - Representation representation() const { - return hydrogen()->field_representation(); - } -}; - - -class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> { - public: - LStoreKeyed(LOperand* object, LOperand* key, LOperand* value, - LOperand* backing_store_owner) { - inputs_[0] = object; - inputs_[1] = key; - inputs_[2] = value; - inputs_[3] = backing_store_owner; - } - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - LOperand* backing_store_owner() { return inputs_[3]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) - - void PrintDataTo(StringStream* stream) override; - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> { - public: - LTransitionElementsKind(LOperand* object, - LOperand* context, - LOperand* new_map_temp) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = new_map_temp; - } - - LOperand* context() { return inputs_[1]; } - LOperand* object() { return inputs_[0]; } - LOperand* new_map_temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() { return hydrogen()->from_kind(); } - ElementsKind to_kind() { return hydrogen()->to_kind(); } -}; - - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> { - public: - LTrapAllocationMemento(LOperand* object, - LOperand* temp) { - inputs_[0] = object; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, - "trap-allocation-memento") -}; - - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; } - - LOperand* view() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckInstanceType(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - - -class LCheckMaps final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckMaps(LOperand* value = NULL) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampDToUint8(LOperand* unclamped, LOperand* temp) { - inputs_[0] = unclamped; - temps_[0] = temp; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* unclamped) { - inputs_[0] = unclamped; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - - -class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* unclamped, LOperand* temp) { - inputs_[0] = unclamped; - temps_[0] = temp; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") -}; - - -class LAllocate final : public LTemplateInstruction<1, 2, 2> { - public: - LAllocate(LOperand* context, - LOperand* size, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 2> { - public: - LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { - inputs_[0] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* size() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIsAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LTypeofIsAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - LOsrEntry() {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { - inputs_[0] = map; - } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { - return HForInCacheArray::cast(this->hydrogen_value())->idx(); - } -}; - - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> { - public: - LCheckMapValue(LOperand* value, LOperand* map) { - inputs_[0] = value; - inputs_[1] = map; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) - : LChunk(info, graph) { } - - int GetNextSpillIndex(RegisterKind kind); - LOperand* GetNextSpillSlot(RegisterKind kind); -}; - - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); - - static bool HasMagicNumberForDivisor(int32_t divisor); - - LInstruction* DoMathFloor(HUnaryMathOperation* instr); - LInstruction* DoMathRound(HUnaryMathOperation* instr); - LInstruction* DoMathFround(HUnaryMathOperation* instr); - LInstruction* DoMathAbs(HUnaryMathOperation* instr); - LInstruction* DoMathLog(HUnaryMathOperation* instr); - LInstruction* DoMathCos(HUnaryMathOperation* instr); - LInstruction* DoMathSin(HUnaryMathOperation* instr); - LInstruction* DoMathExp(HUnaryMathOperation* instr); - LInstruction* DoMathSqrt(HUnaryMathOperation* instr); - LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); - LInstruction* DoMathClz32(HUnaryMathOperation* instr); - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HDiv* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(DoubleRegister reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, - DoubleRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register that may be trashed. - MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - - // An input operand in a register or stack slot. - MUST_USE_RESULT LOperand* Use(HValue* value); - MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - - // An input operand in a register, stack slot or a constant operand. - MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // An input operand in a constant operand. - MUST_USE_RESULT LOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - MUST_USE_RESULT LOperand* UseAny(HValue* value) override; - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - MUST_USE_RESULT LUnallocated* TempDoubleRegister(); - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg); - LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, - DoubleRegister reg); - LInstruction* AssignEnvironment(LInstruction* instr); - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - // By default we assume that instruction sequences generated for calls - // cannot deoptimize eagerly and we do not attach environment to this - // instruction. - LInstruction* MarkAsCall( - LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, - HBinaryOperation* instr); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_ diff --git a/src/crankshaft/mips64/OWNERS b/src/crankshaft/mips64/OWNERS deleted file mode 100644 index 3f8fbfc7c8..0000000000 --- a/src/crankshaft/mips64/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -ivica.bogosavljevic@imgtec.com -Miran.Karic@imgtec.com -dusan.simicic@imgtec.com diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc deleted file mode 100644 index 2cd3fbfc95..0000000000 --- a/src/crankshaft/mips64/lithium-codegen-mips64.cc +++ /dev/null @@ -1,5546 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/mips64/lithium-codegen-mips64.h" - -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" - -namespace v8 { -namespace internal { - - -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, - LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), - pointers_(pointers), - deopt_mode_(mode) { } - virtual ~SafepointGenerator() {} - - void BeforeCall(int call_size) const override {} - - void AfterCall() const override { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - -LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope( - LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->info()->is_calling()); - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - - StoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->push(ra); - codegen_->masm_->CallStub(&stub); -} - -LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - RestoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->push(ra); - codegen_->masm_->CallStub(&stub); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; -} - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // NONE indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::NONE); - - return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && - GenerateJumpTable() && GenerateSafepointTable(); -} - - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); -} - - -void LCodeGen::SaveCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ Sdc1(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -void LCodeGen::RestoreCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Restore clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ Ldc1(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - - // a1: Callee's JS function. - // cp: Callee's context. - // fp: Caller's frame pointer. - // lr: Caller's pc. - } - - info()->set_prologue_offset(masm_->pc_offset()); - if (NeedsEagerFrame()) { - if (info()->IsStub()) { - __ StubPrologue(StackFrame::STUB); - } else { - __ Prologue(info()->GeneratePreagedPrologue()); - } - frame_is_built_ = true; - } - - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - if (slots > 0) { - if (FLAG_debug_code) { - __ Dsubu(sp, sp, Operand(slots * kPointerSize)); - __ Push(a0, a1); - __ Daddu(a0, sp, Operand(slots * kPointerSize)); - __ li(a1, Operand(kSlotsZapValue)); - Label loop; - __ bind(&loop); - __ Dsubu(a0, a0, Operand(kPointerSize)); - __ Sd(a1, MemOperand(a0, 2 * kPointerSize)); - __ Branch(&loop, ne, a0, Operand(sp)); - __ Pop(a0, a1); - } else { - __ Dsubu(sp, sp, Operand(slots * kPointerSize)); - } - } - - if (info()->saves_caller_doubles()) { - SaveCallerDoubles(); - } - return !is_aborted(); -} - - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Possibly allocate a local context. - if (info()->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is in a1. - int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ push(a1); - __ Push(info()->scope()->scope_info()); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ li(FastNewFunctionContextDescriptor::SlotsRegister(), - Operand(slots)); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of the FastNewFunctionContext builtin is always in new space. - need_write_barrier = false; - } else { - __ push(a1); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - - // Context is returned in both v0. It replaces the context passed to us. - // It's saved in the stack and kept live in cp. - __ mov(cp, v0); - __ Sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset)); - // Copy any necessary parameters into the context. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ Ld(a0, MemOperand(fp, parameter_offset)); - // Store it in the context. - MemOperand target = ContextMemOperand(cp, var->index()); - __ Sd(a0, target); - // Update the write barrier. This clobbers a3 and a0. - if (need_write_barrier) { - __ RecordWriteContextSlot( - cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(cp, a0, &done); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } -} - - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment(";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), - code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - __ bind(code->entry()); - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ PushCommonFrame(scratch0()); - Comment(";;; Deferred code"); - } - code->Generate(); - if (NeedsDeferredFrame()) { - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - __ PopCommonFrame(scratch0()); - frame_is_built_ = false; - } - __ jmp(code->exit()); - } - } - // Deferred code is the last part of the instruction sequence. Mark - // the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateJumpTable() { - if (jump_table_.length() > 0) { - Comment(";;; -------------------- Jump table --------------------"); - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - Label table_start, call_deopt_entry; - - __ bind(&table_start); - Label needs_frame; - Address base = jump_table_[0]->address; - for (int i = 0; i < jump_table_.length(); i++) { - Deoptimizer::JumpTableEntry* table_entry = jump_table_[i]; - __ bind(&table_entry->label); - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - - // Second-level deopt table entries are contiguous and small, so instead - // of loading the full, absolute address of each one, load the base - // address and add an immediate offset. - if (is_int16(entry - base)) { - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - Comment(";;; call deopt with frame"); - __ PushCommonFrame(); - __ BranchAndLink(&needs_frame, USE_DELAY_SLOT); - __ li(t9, Operand(entry - base)); - } else { - __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT); - __ li(t9, Operand(entry - base)); - } - - } else { - __ li(t9, Operand(entry - base)); - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - Comment(";;; call deopt with frame"); - __ PushCommonFrame(); - __ BranchAndLink(&needs_frame); - } else { - __ BranchAndLink(&call_deopt_entry); - } - } - } - if (needs_frame.is_linked()) { - __ bind(&needs_frame); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ push(at); - DCHECK(info()->IsStub()); - } - - Comment(";;; call deopt"); - __ bind(&call_deopt_entry); - - if (info()->saves_caller_doubles()) { - DCHECK(info()->IsStub()); - RestoreCallerDoubles(); - } - - __ li(at, - Operand(reinterpret_cast(base), RelocInfo::RUNTIME_ENTRY)); - __ Daddu(t9, t9, Operand(at)); - __ Jump(t9); - } - // The deoptimization jump table is the last part of the instruction - // sequence. Mark the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - - -Register LCodeGen::ToRegister(int index) const { - return Register::from_code(index); -} - - -DoubleRegister LCodeGen::ToDoubleRegister(int index) const { - return DoubleRegister::from_code(index); -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - DCHECK(op->IsRegister()); - return ToRegister(op->index()); -} - - -Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { - if (op->IsRegister()) { - return ToRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk_->LookupConstant(const_op); - Handle literal = constant->handle(isolate()); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - AllowDeferredHandleDereference get_number; - DCHECK(literal->IsNumber()); - __ li(scratch, Operand(static_cast(literal->Number()))); - } else if (r.IsSmi()) { - DCHECK(constant->HasSmiValue()); - __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value()))); - } else if (r.IsDouble()) { - Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); - } else { - DCHECK(r.IsSmiOrTagged()); - __ li(scratch, literal); - } - return scratch; - } else if (op->IsStackSlot()) { - __ Ld(scratch, ToMemOperand(op)); - return scratch; - } - UNREACHABLE(); -} - - -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - DCHECK(op->IsDoubleRegister()); - return ToDoubleRegister(op->index()); -} - - -DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, - FloatRegister flt_scratch, - DoubleRegister dbl_scratch) { - if (op->IsDoubleRegister()) { - return ToDoubleRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk_->LookupConstant(const_op); - Handle literal = constant->handle(isolate()); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - DCHECK(literal->IsNumber()); - __ li(at, Operand(static_cast(literal->Number()))); - __ mtc1(at, flt_scratch); - __ cvt_d_w(dbl_scratch, flt_scratch); - return dbl_scratch; - } else if (r.IsDouble()) { - Abort(kUnsupportedDoubleImmediate); - } else if (r.IsTagged()) { - Abort(kUnsupportedTaggedImmediate); - } - } else if (op->IsStackSlot()) { - MemOperand mem_op = ToMemOperand(op); - __ Ldc1(dbl_scratch, mem_op); - return dbl_scratch; - } - UNREACHABLE(); -} - - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - - -bool LCodeGen::IsInteger32(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - - -bool LCodeGen::IsSmi(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - // return ToRepresentation(op, Representation::Integer32()); - HConstant* constant = chunk_->LookupConstant(op); - return constant->Integer32Value(); -} - - -int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op, - const Representation& r) const { - HConstant* constant = chunk_->LookupConstant(op); - int32_t value = constant->Integer32Value(); - if (r.IsInteger32()) return value; - DCHECK(r.IsSmiOrTagged()); - return reinterpret_cast(Smi::FromInt(value)); -} - - -Smi* LCodeGen::ToSmi(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - return Smi::FromInt(constant->Integer32Value()); -} - - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - - -Operand LCodeGen::ToOperand(LOperand* op) { - if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk()->LookupConstant(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsSmi()) { - DCHECK(constant->HasSmiValue()); - return Operand(Smi::FromInt(constant->Integer32Value())); - } else if (r.IsInteger32()) { - DCHECK(constant->HasInteger32Value()); - return Operand(constant->Integer32Value()); - } else if (r.IsDouble()) { - Abort(kToOperandUnsupportedDoubleImmediate); - } - DCHECK(r.IsTagged()); - return Operand(constant->handle(isolate())); - } else if (op->IsRegister()) { - return Operand(ToRegister(op)); - } else if (op->IsDoubleRegister()) { - Abort(kToOperandIsDoubleRegisterUnimplemented); - return Operand((int64_t)0); - } - // Stack slots not implemented, use ToMemOperand instead. - UNREACHABLE(); -} - - -static int ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize; -} - - -MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index())); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); - } -} - - -MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { - DCHECK(op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize); - return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - // return MemOperand( - // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); - return MemOperand( - sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize); - } -} - - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation( - environment, translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); - } -} - - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, - dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - DoubleRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode) { - DCHECK(instr != NULL); - __ Call(code, mode); - RecordSafepointWithLazyDeopt(instr, safepoint_mode); -} - - -void LCodeGen::CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - - __ CallRuntime(function, num_arguments, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - __ Move(cp, ToRegister(context)); - } else if (context->IsStackSlot()) { - __ Ld(cp, ToMemOperand(context)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ li(cp, Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context) { - LoadContextFromDeferred(context); - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters( - instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, - translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, zone()); - } -} - -void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type, - Register src1, const Operand& src2) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - return; - } - - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { - Register scratch = scratch0(); - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - Label no_deopt; - __ Push(a1, scratch); - __ li(scratch, Operand(count)); - __ Lw(a1, MemOperand(scratch)); - __ Subu(a1, a1, Operand(1)); - __ Branch(&no_deopt, ne, a1, Operand(zero_reg)); - __ li(a1, Operand(FLAG_deopt_every_n_times)); - __ Sw(a1, MemOperand(scratch)); - __ Pop(a1, scratch); - - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&no_deopt); - __ Sw(a1, MemOperand(scratch)); - __ Pop(a1, scratch); - } - - if (info()->ShouldTrapOnDeopt()) { - Label skip; - if (condition != al) { - __ Branch(&skip, NegateCondition(condition), src1, src2); - } - __ stop("trap_on_deopt"); - __ bind(&skip); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - // Go through jump table if we need to handle condition, build frame, or - // restore caller doubles. - if (condition == al && frame_is_built_ && - !info()->saves_caller_doubles()) { - DeoptComment(deopt_info); - __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); - } else { - Deoptimizer::JumpTableEntry* table_entry = - new (zone()) Deoptimizer::JumpTableEntry( - entry, deopt_info, bailout_type, !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry->IsEquivalentTo(*jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - __ Branch(&jump_table_.last()->label, condition, src1, src2); - } -} - -void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, Register src1, - const Operand& src2) { - Deoptimizer::BailoutType bailout_type = info()->IsStub() - ? Deoptimizer::LAZY - : Deoptimizer::EAGER; - DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2); -} - - -void LCodeGen::RecordSafepointWithLazyDeopt( - LInstruction* instr, SafepointMode safepoint_mode) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kLazyDeopt); - } -} - - -void LCodeGen::RecordSafepoint( - LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode deopt_mode) { - DCHECK(expected_safepoint_kind_ == kind); - - const ZoneList* operands = pointers->GetNormalizedOperands(); - Safepoint safepoint = safepoints_.DefineSafepoint(masm(), - kind, arguments, deopt_mode); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); -} - - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, deopt_mode); -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint( - pointers, Safepoint::kWithRegisters, arguments, deopt_mode); -} - - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, - label->hydrogen_value()->id(), - label->block_id(), - LabelType(label)); - __ bind(label->label()); - current_block_ = label->block_id(); - DoGap(label); -} - - -void LCodeGen::DoParallelMove(LParallelMove* move) { - resolver_.Resolve(move); -} - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; - i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } -} - - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { - DoGap(instr); -} - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - Label dividend_is_not_negative, done; - - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); - // Note: The code below even works when right contains kMinInt. - __ dsubu(dividend, zero_reg, dividend); - __ And(dividend, dividend, Operand(mask)); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - } - __ Branch(USE_DELAY_SLOT, &done); - __ dsubu(dividend, zero_reg, dividend); - } - - __ bind(÷nd_is_not_negative); - __ And(dividend, dividend, Operand(mask)); - __ bind(&done); -} - - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - __ Dmul(result, result, Operand(Abs(divisor))); - __ Dsubu(result, dividend, Operand(result)); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - __ bind(&remainder_not_zero); - } -} - - -void LCodeGen::DoModI(LModI* instr) { - HMod* hmod = instr->hydrogen(); - const Register left_reg = ToRegister(instr->left()); - const Register right_reg = ToRegister(instr->right()); - const Register result_reg = ToRegister(instr->result()); - - // div runs in the background while we check for special cases. - __ Dmod(result_reg, left_reg, right_reg); - - Label done; - // Check for x % 0, we have to deopt in this case because we can't return a - // NaN. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg, - Operand(zero_reg)); - } - - // Check for kMinInt % -1, div will return kMinInt, which is not what we - // want. We have to deopt if we care about -0, because we can't return that. - if (hmod->CheckFlag(HValue::kCanOverflow)) { - Label no_overflow_possible; - __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg, - Operand(-1)); - } else { - __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); - __ Branch(USE_DELAY_SLOT, &done); - __ mov(result_reg, zero_reg); - } - __ bind(&no_overflow_possible); - } - - // If we care about -0, test if the dividend is <0 and the result is 0. - __ Branch(&done, ge, left_reg, Operand(zero_reg)); - - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg, - Operand(zero_reg)); - } - __ bind(&done); -} - - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend, - Operand(kMinInt)); - } - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1) { - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - __ And(at, dividend, Operand(mask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at, - Operand(zero_reg)); - } - - if (divisor == -1) { // Nice shortcut, not needed for correctness. - __ Dsubu(result, zero_reg, dividend); - return; - } - uint16_t shift = WhichPowerOf2Abs(divisor); - if (shift == 0) { - __ Move(result, dividend); - } else if (shift == 1) { - __ dsrl32(result, dividend, 31); - __ Daddu(result, dividend, Operand(result)); - } else { - __ dsra32(result, dividend, 31); - __ dsrl32(result, result, 32 - shift); - __ Daddu(result, dividend, Operand(result)); - } - if (shift > 0) __ dsra(result, result, shift); - if (divisor < 0) __ Dsubu(result, zero_reg, result); -} - - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Subu(result, zero_reg, result); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - __ Dmul(scratch0(), result, Operand(divisor)); - __ Dsubu(scratch0(), scratch0(), dividend); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(), - Operand(zero_reg)); - } -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - const Register result = ToRegister(instr->result()); - - // On MIPS div is asynchronous - it will run in the background while we - // check for special cases. - __ Div(result, dividend, divisor); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor, - Operand(zero_reg)); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label left_not_zero; - __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor, - Operand(zero_reg)); - __ bind(&left_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && - !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - Label left_not_min_int; - __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1)); - __ bind(&left_not_min_int); - } - - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - // Calculate remainder. - Register remainder = ToRegister(instr->temp()); - if (kArchVariant != kMips64r6) { - __ mfhi(remainder); - } else { - __ dmod(remainder, dividend, divisor); - } - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder, - Operand(zero_reg)); - } -} - - -void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { - DoubleRegister addend = ToDoubleRegister(instr->addend()); - DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); - DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - - // This is computed in-place. - DCHECK(addend.is(ToDoubleRegister(instr->result()))); - - __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0()); -} - - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - Register result = ToRegister(instr->result()); - int32_t divisor = instr->divisor(); - Register scratch = result.is(dividend) ? scratch0() : dividend; - DCHECK(!result.is(dividend) || !scratch.is(dividend)); - - // If the divisor is 1, return the dividend. - if (divisor == 0) { - __ Move(result, dividend); - return; - } - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - uint16_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 1) { - __ dsra(result, dividend, shift); - return; - } - - // If the divisor is negative, we have to negate and handle edge cases. - // Dividend can be the same register as result so save the value of it - // for checking overflow. - __ Move(scratch, dividend); - - __ Dsubu(result, zero_reg, dividend); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result, - Operand(zero_reg)); - } - - __ Xor(scratch, scratch, result); - // Dividing by -1 is basically negation, unless we overflow. - if (divisor == -1) { - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - DeoptimizeIf(gt, instr, DeoptimizeReason::kOverflow, result, - Operand(kMaxInt)); - } - return; - } - - // If the negation could not overflow, simply shifting is OK. - if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - __ dsra(result, result, shift); - return; - } - - Label no_overflow, done; - __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); - __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); - __ Branch(&done); - __ bind(&no_overflow); - __ dsra(result, result, shift); - __ bind(&done); -} - - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, - Operand(zero_reg)); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Dsubu(result, zero_reg, result); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister(instr->temp()); - DCHECK(!temp.is(dividend) && !temp.is(result)); - Label needs_adjustment, done; - __ Branch(&needs_adjustment, divisor > 0 ? lt : gt, - dividend, Operand(zero_reg)); - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ Dsubu(result, zero_reg, result); - __ jmp(&done); - __ bind(&needs_adjustment); - __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1)); - __ TruncatingDiv(result, temp, Abs(divisor)); - if (divisor < 0) __ Dsubu(result, zero_reg, result); - __ Dsubu(result, result, Operand(1)); - __ bind(&done); -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - const Register result = ToRegister(instr->result()); - - // On MIPS div is asynchronous - it will run in the background while we - // check for special cases. - __ Ddiv(result, dividend, divisor); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor, - Operand(zero_reg)); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label left_not_zero; - __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor, - Operand(zero_reg)); - __ bind(&left_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && - !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - Label left_not_min_int; - __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1)); - __ bind(&left_not_min_int); - } - - // We performed a truncating division. Correct the result if necessary. - Label done; - Register remainder = scratch0(); - if (kArchVariant != kMips64r6) { - __ mfhi(remainder); - } else { - __ dmod(remainder, dividend, divisor); - } - __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); - __ Xor(remainder, remainder, Operand(divisor)); - __ Branch(&done, ge, remainder, Operand(zero_reg)); - __ Dsubu(result, result, Operand(1)); - __ bind(&done); -} - - -void LCodeGen::DoMulS(LMulS* instr) { - Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - // Note that result may alias left. - Register left = ToRegister(instr->left()); - LOperand* right_op = instr->right(); - - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (right_op->IsConstantOperand()) { - int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); - - if (bailout_on_minus_zero && (constant < 0)) { - // The case of a null constant will be handled separately. - // If constant is negative and left is null, the result should be -0. - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left, - Operand(zero_reg)); - } - - switch (constant) { - case -1: - if (overflow) { - Label no_overflow; - __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow); - DeoptimizeIf(al, instr); - __ bind(&no_overflow); - } else { - __ Dsubu(result, zero_reg, left); - } - break; - case 0: - if (bailout_on_minus_zero) { - // If left is strictly negative and the constant is null, the - // result is -0. Deoptimize if required, otherwise return 0. - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left, - Operand(zero_reg)); - } - __ mov(result, zero_reg); - break; - case 1: - // Nothing to do. - __ Move(result, left); - break; - default: - // Multiplying by powers of two and powers of two plus or minus - // one can be done faster with shifted operands. - // For other constants we emit standard code. - int32_t mask = constant >> 31; - uint32_t constant_abs = (constant + mask) ^ mask; - - if (base::bits::IsPowerOfTwo32(constant_abs)) { - int32_t shift = WhichPowerOf2(constant_abs); - __ dsll(result, left, shift); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Dsubu(result, zero_reg, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { - int32_t shift = WhichPowerOf2(constant_abs - 1); - __ Dlsa(result, left, left, shift); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Dsubu(result, zero_reg, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { - int32_t shift = WhichPowerOf2(constant_abs + 1); - __ dsll(scratch, left, shift); - __ Dsubu(result, scratch, left); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Dsubu(result, zero_reg, result); - } else { - // Generate standard code. - __ li(at, constant); - __ Dmul(result, left, at); - } - } - } else { - DCHECK(right_op->IsRegister()); - Register right = ToRegister(right_op); - - if (overflow) { - // hi:lo = left * right. - __ Dmulh(result, left, right); - __ dsra32(scratch, result, 0); - __ sra(at, result, 31); - __ SmiTag(result); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch, - Operand(at)); - } else { - __ SmiUntag(result, left); - __ dmul(result, result, right); - } - - if (bailout_on_minus_zero) { - Label done; - __ Xor(at, left, right); - __ Branch(&done, ge, at, Operand(zero_reg)); - // Bail out if the result is minus zero. - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result, - Operand(zero_reg)); - __ bind(&done); - } - } -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - // Note that result may alias left. - Register left = ToRegister(instr->left()); - LOperand* right_op = instr->right(); - - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (right_op->IsConstantOperand()) { - int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); - - if (bailout_on_minus_zero && (constant < 0)) { - // The case of a null constant will be handled separately. - // If constant is negative and left is null, the result should be -0. - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left, - Operand(zero_reg)); - } - - switch (constant) { - case -1: - if (overflow) { - Label no_overflow; - __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow); - DeoptimizeIf(al, instr); - __ bind(&no_overflow); - } else { - __ Subu(result, zero_reg, left); - } - break; - case 0: - if (bailout_on_minus_zero) { - // If left is strictly negative and the constant is null, the - // result is -0. Deoptimize if required, otherwise return 0. - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left, - Operand(zero_reg)); - } - __ mov(result, zero_reg); - break; - case 1: - // Nothing to do. - __ Move(result, left); - break; - default: - // Multiplying by powers of two and powers of two plus or minus - // one can be done faster with shifted operands. - // For other constants we emit standard code. - int32_t mask = constant >> 31; - uint32_t constant_abs = (constant + mask) ^ mask; - - if (base::bits::IsPowerOfTwo32(constant_abs)) { - int32_t shift = WhichPowerOf2(constant_abs); - __ sll(result, left, shift); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Subu(result, zero_reg, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { - int32_t shift = WhichPowerOf2(constant_abs - 1); - __ Lsa(result, left, left, shift); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Subu(result, zero_reg, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { - int32_t shift = WhichPowerOf2(constant_abs + 1); - __ sll(scratch, left, shift); - __ Subu(result, scratch, left); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ Subu(result, zero_reg, result); - } else { - // Generate standard code. - __ li(at, constant); - __ Mul(result, left, at); - } - } - - } else { - DCHECK(right_op->IsRegister()); - Register right = ToRegister(right_op); - - if (overflow) { - // hi:lo = left * right. - __ Dmul(result, left, right); - __ dsra32(scratch, result, 0); - __ sra(at, result, 31); - - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch, - Operand(at)); - } else { - __ mul(result, left, right); - } - - if (bailout_on_minus_zero) { - Label done; - __ Xor(at, left, right); - __ Branch(&done, ge, at, Operand(zero_reg)); - // Bail out if the result is minus zero. - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result, - Operand(zero_reg)); - __ bind(&done); - } - } -} - - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left_op = instr->left(); - LOperand* right_op = instr->right(); - DCHECK(left_op->IsRegister()); - Register left = ToRegister(left_op); - Register result = ToRegister(instr->result()); - Operand right(no_reg); - - if (right_op->IsStackSlot()) { - right = Operand(EmitLoadRegister(right_op, at)); - } else { - DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); - right = ToOperand(right_op); - } - - switch (instr->op()) { - case Token::BIT_AND: - __ And(result, left, right); - break; - case Token::BIT_OR: - __ Or(result, left, right); - break; - case Token::BIT_XOR: - if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { - __ Nor(result, zero_reg, left); - } else { - __ Xor(result, left, right); - } - break; - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so - // result may alias either of them. - LOperand* right_op = instr->right(); - Register left = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); - - if (right_op->IsRegister()) { - // No need to mask the right operand on MIPS, it is built into the variable - // shift instructions. - switch (instr->op()) { - case Token::ROR: - __ Ror(result, left, Operand(ToRegister(right_op))); - break; - case Token::SAR: - __ srav(result, left, ToRegister(right_op)); - break; - case Token::SHR: - __ srlv(result, left, ToRegister(right_op)); - if (instr->can_deopt()) { - // TODO(yy): (-1) >>> 0. anything else? - DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result, - Operand(zero_reg)); - DeoptimizeIf(gt, instr, DeoptimizeReason::kNegativeValue, result, - Operand(kMaxInt)); - } - break; - case Token::SHL: - __ sllv(result, left, ToRegister(right_op)); - break; - default: - UNREACHABLE(); - break; - } - } else { - // Mask the right_op operand. - int value = ToInteger32(LConstantOperand::cast(right_op)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::ROR: - if (shift_count != 0) { - __ Ror(result, left, Operand(shift_count)); - } else { - __ Move(result, left); - } - break; - case Token::SAR: - if (shift_count != 0) { - __ sra(result, left, shift_count); - } else { - __ Move(result, left); - } - break; - case Token::SHR: - if (shift_count != 0) { - __ srl(result, left, shift_count); - } else { - if (instr->can_deopt()) { - __ And(at, left, Operand(0x80000000)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at, - Operand(zero_reg)); - } - __ Move(result, left); - } - break; - case Token::SHL: - if (shift_count != 0) { - if (instr->hydrogen_value()->representation().IsSmi()) { - __ dsll(result, left, shift_count); - } else { - __ sll(result, left, shift_count); - } - } else { - __ Move(result, left); - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoSubS(LSubS* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (!can_overflow) { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right)); - } else { // can_overflow. - Register scratch = scratch0(); - Label no_overflow_label; - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right), - &no_overflow_label, scratch); - DeoptimizeIf(al, instr); - __ bind(&no_overflow_label); - } -} - - -void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (!can_overflow) { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ Subu(ToRegister(result), ToRegister(left), ToOperand(right)); - } else { // can_overflow. - Register scratch = scratch0(); - Label no_overflow_label; - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right), - &no_overflow_label, scratch); - DeoptimizeIf(al, instr); - __ bind(&no_overflow_label); - } -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - __ li(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ li(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - DCHECK(instr->result()->IsDoubleRegister()); - DoubleRegister result = ToDoubleRegister(instr->result()); - double v = instr->value(); - __ Move(result, v); -} - - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ li(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ li(ToRegister(instr->result()), object); -} - - -MemOperand LCodeGen::BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToInteger32(LConstantOperand::cast(index)); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldMemOperand(string, SeqString::kHeaderSize + offset); - } - Register scratch = scratch0(); - DCHECK(!scratch.is(string)); - DCHECK(!scratch.is(ToRegister(index))); - if (encoding == String::ONE_BYTE_ENCODING) { - __ Daddu(scratch, string, ToRegister(index)); - } else { - STATIC_ASSERT(kUC16Size == 2); - __ dsll(scratch, ToRegister(index), 1); - __ Daddu(scratch, string, scratch); - } - return FieldMemOperand(scratch, SeqString::kHeaderSize); -} - - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - if (FLAG_debug_code) { - Register scratch = scratch0(); - __ Ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); - __ Lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - __ And(scratch, scratch, - Operand(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg)); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ Lbu(result, operand); - } else { - __ Lhu(result, operand); - } -} - - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register value = ToRegister(instr->value()); - - if (FLAG_debug_code) { - Register scratch = scratch0(); - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ Sb(value, operand); - } else { - __ Sh(value, operand); - } -} - - -void LCodeGen::DoAddE(LAddE* instr) { - LOperand* result = instr->result(); - LOperand* left = instr->left(); - LOperand* right = instr->right(); - - DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right)); -} - - -void LCodeGen::DoAddS(LAddS* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (!can_overflow) { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right)); - } else { // can_overflow. - Label no_overflow_label; - Register scratch = scratch1(); - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right), - &no_overflow_label, scratch); - DeoptimizeIf(al, instr); - __ bind(&no_overflow_label); - } -} - - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (!can_overflow) { - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ Addu(ToRegister(result), ToRegister(left), ToOperand(right)); - } else { // can_overflow. - Label no_overflow_label; - Register scratch = scratch1(); - DCHECK(right->IsRegister() || right->IsConstantOperand()); - __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right), - &no_overflow_label, scratch); - DeoptimizeIf(al, instr); - __ bind(&no_overflow_label); - } -} - - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - HMathMinMax::Operation operation = instr->hydrogen()->operation(); - Register scratch = scratch1(); - if (instr->hydrogen()->representation().IsSmiOrInteger32()) { - Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; - Register left_reg = ToRegister(left); - Register right_reg = EmitLoadRegister(right, scratch0()); - Register result_reg = ToRegister(instr->result()); - Label return_right, done; - __ Slt(scratch, left_reg, Operand(right_reg)); - if (condition == ge) { - __ Movz(result_reg, left_reg, scratch); - __ Movn(result_reg, right_reg, scratch); - } else { - DCHECK(condition == le); - __ Movn(result_reg, left_reg, scratch); - __ Movz(result_reg, right_reg, scratch); - } - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - FPURegister left_reg = ToDoubleRegister(left); - FPURegister right_reg = ToDoubleRegister(right); - FPURegister result_reg = ToDoubleRegister(instr->result()); - Label nan, done; - if (operation == HMathMinMax::kMathMax) { - __ Float64Max(result_reg, left_reg, right_reg, &nan); - } else { - DCHECK(operation == HMathMinMax::kMathMin); - __ Float64Min(result_reg, left_reg, right_reg, &nan); - } - __ Branch(&done); - - __ bind(&nan); - __ add_d(result_reg, left_reg, right_reg); - - __ bind(&done); - } -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - DoubleRegister result = ToDoubleRegister(instr->result()); - switch (instr->op()) { - case Token::ADD: - __ add_d(result, left, right); - break; - case Token::SUB: - __ sub_d(result, left, right); - break; - case Token::MUL: - __ mul_d(result, left, right); - break; - case Token::DIV: - __ div_d(result, left, right); - break; - case Token::MOD: { - // Save a0-a3 on the stack. - RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit(); - __ MultiPush(saved_regs); - - __ PrepareCallCFunction(0, 2, scratch0()); - __ MovToFloatParameters(left, right); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), - 0, 2); - // Move the result in the double result register. - __ MovFromFloatResult(result); - - // Restore saved register. - __ MultiPop(saved_regs); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(a1)); - DCHECK(ToRegister(instr->right()).is(a0)); - DCHECK(ToRegister(instr->result()).is(v0)); - - UNREACHABLE(); -} - - -template -void LCodeGen::EmitBranch(InstrType instr, - Condition condition, - Register src1, - const Operand& src2) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - if (right_block == left_block || condition == al) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ Branch(chunk_->GetAssemblyLabel(right_block), - NegateCondition(condition), src1, src2); - } else if (right_block == next_block) { - __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2); - } else { - __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2); - __ Branch(chunk_->GetAssemblyLabel(right_block)); - } -} - - -template -void LCodeGen::EmitBranchF(InstrType instr, - Condition condition, - FPURegister src1, - FPURegister src2) { - int right_block = instr->FalseDestination(chunk_); - int left_block = instr->TrueDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - if (right_block == left_block) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL, - NegateFpuCondition(condition), src1, src2); - } else if (right_block == next_block) { - __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, - condition, src1, src2); - } else { - __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, - condition, src1, src2); - __ Branch(chunk_->GetAssemblyLabel(right_block)); - } -} - - -template -void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition, - Register src1, const Operand& src2) { - int true_block = instr->TrueDestination(chunk_); - __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2); -} - - -template -void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition, - Register src1, const Operand& src2) { - int false_block = instr->FalseDestination(chunk_); - __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2); -} - - -template -void LCodeGen::EmitFalseBranchF(InstrType instr, - Condition condition, - FPURegister src1, - FPURegister src2) { - int false_block = instr->FalseDestination(chunk_); - __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL, - condition, src1, src2); -} - - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { - __ stop("LDebugBreak"); -} - - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsInteger32() || r.IsSmi()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - EmitBranch(instr, ne, reg, Operand(zero_reg)); - } else if (r.IsDouble()) { - DCHECK(!info()->IsStub()); - DoubleRegister reg = ToDoubleRegister(instr->value()); - // Test the double value. Zero and NaN are false. - EmitBranchF(instr, ogl, reg, kDoubleRegZero); - } else { - DCHECK(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ LoadRoot(at, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq, reg, Operand(at)); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, ne, reg, Operand(zero_reg)); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, al, zero_reg, Operand(zero_reg)); - } else if (type.IsHeapNumber()) { - DCHECK(!info()->IsStub()); - DoubleRegister dbl_scratch = double_scratch0(); - __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); - // Test the double value. Zero and NaN are false. - EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - __ Ld(at, FieldMemOperand(reg, String::kLengthOffset)); - EmitBranch(instr, ne, at, Operand(zero_reg)); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - // Avoid deopts in the case where we've never executed this path before. - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); - } - if (expected & ToBooleanHint::kBoolean) { - // Boolean -> its value. - __ LoadRoot(at, Heap::kTrueValueRootIndex); - __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at)); - __ LoadRoot(at, Heap::kFalseValueRootIndex); - __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); - } - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a Smi -> deopt. - __ SmiTst(reg, at); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); - } - - const Register map = scratch0(); - if (expected & ToBooleanHint::kNeedsMap) { - __ Ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ Lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); - __ And(at, at, Operand(1 << Map::kIsUndetectable)); - __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ Lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(instr->TrueLabel(chunk_), - ge, at, Operand(FIRST_JS_RECEIVER_TYPE)); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ Lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); - __ Ld(at, FieldMemOperand(reg, String::kLengthOffset)); - __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg)); - __ Branch(instr->FalseLabel(chunk_)); - __ bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - const Register scratch = scratch1(); - __ Lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE)); - } - - if (expected & ToBooleanHint::kHeapNumber) { - // heap number -> false iff +0, -0, or NaN. - DoubleRegister dbl_scratch = double_scratch0(); - Label not_heap_number; - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - __ Branch(¬_heap_number, ne, map, Operand(at)); - __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); - __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - ne, dbl_scratch, kDoubleRegZero); - // Falls through if dbl_scratch == 0. - __ Branch(instr->FalseLabel(chunk_)); - __ bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg, - Operand(zero_reg)); - } - } - } -} - - -void LCodeGen::EmitGoto(int block) { - if (!IsNextEmittedBlock(block)) { - __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); - } -} - - -void LCodeGen::DoGoto(LGoto* instr) { - EmitGoto(instr->block_id()); -} - - -Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { - Condition cond = kNoCondition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = eq; - break; - case Token::NE: - case Token::NE_STRICT: - cond = ne; - break; - case Token::LT: - cond = is_unsigned ? lo : lt; - break; - case Token::GT: - cond = is_unsigned ? hi : gt; - break; - case Token::LTE: - cond = is_unsigned ? ls : le; - break; - case Token::GTE: - cond = is_unsigned ? hs : ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cond = TokenToCondition(instr->op(), is_unsigned); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - // Compare left and right as doubles and load the - // resulting flags into the normal status register. - FPURegister left_reg = ToDoubleRegister(left); - FPURegister right_reg = ToDoubleRegister(right); - - // If a NaN is involved, i.e. the result is unordered, - // jump to false block label. - __ BranchF(NULL, instr->FalseLabel(chunk_), eq, - left_reg, right_reg); - - EmitBranchF(instr, cond, left_reg, right_reg); - } else { - Register cmp_left; - Operand cmp_right = Operand((int64_t)0); - if (right->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(right)); - if (instr->hydrogen_value()->representation().IsSmi()) { - cmp_left = ToRegister(left); - cmp_right = Operand(Smi::FromInt(value)); - } else { - cmp_left = ToRegister(left); - cmp_right = Operand(value); - } - } else if (left->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(left)); - if (instr->hydrogen_value()->representation().IsSmi()) { - cmp_left = ToRegister(right); - cmp_right = Operand(Smi::FromInt(value)); - } else { - cmp_left = ToRegister(right); - cmp_right = Operand(value); - } - // We commuted the operands, so commute the condition. - cond = CommuteCondition(cond); - } else { - cmp_left = ToRegister(left); - cmp_right = Operand(ToRegister(right)); - } - - EmitBranch(instr, cond, cmp_left, cmp_right); - } - } -} - - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - - EmitBranch(instr, eq, left, Operand(right)); -} - - -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ li(at, Operand(factory()->the_hole_value())); - EmitBranch(instr, eq, input_reg, Operand(at)); - return; - } - - DoubleRegister input_reg = ToDoubleRegister(instr->object()); - EmitFalseBranchF(instr, eq, input_reg, input_reg); - - Register scratch = scratch0(); - __ FmoveHigh(scratch, input_reg); - EmitBranch(instr, eq, scratch, - Operand(static_cast(kHoleNanUpper32))); -} - - -Condition LCodeGen::EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - __ GetObjectType(input, temp1, temp1); - - return lt; -} - - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp1 = ToRegister(instr->temp()); - - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - Condition true_cond = - EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond, temp1, - Operand(FIRST_NONSTRING_TYPE)); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Register input_reg = EmitLoadRegister(instr->value(), at); - __ And(at, input_reg, kSmiTagMask); - EmitBranch(instr, eq, at, Operand(zero_reg)); -} - - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ Ld(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ Lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); - __ And(at, temp, Operand(1 << Map::kIsUndetectable)); - EmitBranch(instr, ne, at, Operand(zero_reg)); -} - - -static Condition ComputeCompareCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return eq; - case Token::LT: - return lt; - case Token::GT: - return gt; - case Token::LTE: - return le; - case Token::GTE: - return ge; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(a1)); - DCHECK(ToRegister(instr->right()).is(a0)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ LoadRoot(at, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq, v0, Operand(at)); -} - - -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK(from == to || to == LAST_TYPE); - return from; -} - - -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return eq; - if (to == LAST_TYPE) return hs; - if (from == FIRST_TYPE) return ls; - UNREACHABLE(); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register scratch = scratch0(); - Register input = ToRegister(instr->value()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - - __ GetObjectType(input, scratch, scratch); - EmitBranch(instr, - BranchCondition(instr->hydrogen()), - scratch, - Operand(TestType(instr->hydrogen()))); -} - -// Branches to a label or falls through with the answer in flags. Trashes -// the temp registers, but not the input. -void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handle class_name, Register input, - Register temp, Register temp2) { - DCHECK(!input.is(temp)); - DCHECK(!input.is(temp2)); - DCHECK(!temp.is(temp2)); - - __ JumpIfSmi(input, is_false); - - __ GetObjectType(input, temp, temp2); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE)); - } else { - __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE)); - } - - // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. - // Check if the constructor in the map is a function. - Register instance_type = scratch1(); - DCHECK(!instance_type.is(temp)); - __ GetMapConstructor(temp, temp, temp2, instance_type); - - // Objects with a non-function constructor have class 'Object'. - if (String::Equals(class_name, isolate()->factory()->Object_string())) { - __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE)); - } else { - __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE)); - } - - // temp now contains the constructor function. Grab the - // instance class name from there. - __ Ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); - __ Ld(temp, - FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - - // End with the address of this class_name instance in temp register. - // On MIPS, the caller must do the comparison with Handleclass_name. -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = scratch0(); - Register temp2 = ToRegister(instr->temp()); - Handle class_name = instr->hydrogen()->class_name(); - - EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - class_name, input, temp, temp2); - - EmitBranch(instr, eq, temp, Operand(class_name)); -} - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - __ Ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); - EmitBranch(instr, eq, temp, Operand(instr->map())); -} - - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = scratch0(); - Register const object_instance_type = scratch1(); - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - __ SmiTst(object, at); - EmitFalseBranch(instr, eq, at, Operand(zero_reg)); - } - - // Loop through the {object}s prototype chain looking for the {prototype}. - __ Ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); - Label loop; - __ bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ Lbu(object_instance_type, - FieldMemOperand(object_map, Map::kBitFieldOffset)); - __ And(object_instance_type, object_instance_type, - Operand(1 << Map::kIsAccessCheckNeeded)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type, - Operand(zero_reg)); - __ Lbu(object_instance_type, - FieldMemOperand(object_map, Map::kInstanceTypeOffset)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type, - Operand(JS_PROXY_TYPE)); - - __ Ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); - __ LoadRoot(at, Heap::kNullValueRootIndex); - EmitFalseBranch(instr, eq, object_prototype, Operand(at)); - EmitTrueBranch(instr, eq, object_prototype, Operand(prototype)); - __ Branch(&loop, USE_DELAY_SLOT); - __ Ld(object_map, FieldMemOperand(object_prototype, - HeapObject::kMapOffset)); // In delay slot. -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - Token::Value op = instr->op(); - - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - // On MIPS there is no need for a "no inlined smi code" marker (nop). - - Condition condition = ComputeCompareCondition(op); - // A minor optimization that relies on LoadRoot always emitting one - // instruction. - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); - Label done, check; - __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); - __ bind(&check); - __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); - DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check)); - __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); - __ bind(&done); -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in v0. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ push(v0); - __ Ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - if (info()->saves_caller_doubles()) { - RestoreCallerDoubles(); - } - if (NeedsEagerFrame()) { - __ mov(sp, fp); - __ Pop(ra, fp); - } - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - int32_t sp_delta = (parameter_count + 1) * kPointerSize; - if (sp_delta != 0) { - __ Daddu(sp, sp, Operand(sp_delta)); - } - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register reg = ToRegister(instr->parameter_count()); - // The argument count parameter is a smi - __ SmiUntag(reg); - __ Dlsa(sp, sp, reg, kPointerSizeLog2); - } - - __ Jump(ra); -} - - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - - __ Ld(result, ContextMemOperand(context, instr->slot_index())); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at)); - } else { - Label is_not_hole; - __ Branch(&is_not_hole, ne, result, Operand(at)); - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ bind(&is_not_hole); - } - } -} - - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - Register scratch = scratch0(); - MemOperand target = ContextMemOperand(context, instr->slot_index()); - - Label skip_assignment; - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ Ld(scratch, target); - __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at)); - } else { - __ Branch(&skip_assignment, ne, scratch, Operand(at)); - } - } - - __ Sd(value, target); - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - __ RecordWriteContextSlot(context, - target.offset(), - value, - scratch0(), - GetRAState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } - - __ bind(&skip_assignment); -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - Register object = ToRegister(instr->object()); - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - MemOperand operand = MemOperand(object, offset); - __ Load(result, operand, access.representation()); - return; - } - - if (instr->hydrogen()->representation().IsDouble()) { - DoubleRegister result = ToDoubleRegister(instr->result()); - __ Ldc1(result, FieldMemOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - if (!access.IsInobject()) { - __ Ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - object = result; - } - - Representation representation = access.representation(); - if (representation.IsSmi() && SmiValuesAre32Bits() && - instr->hydrogen()->representation().IsInteger32()) { - if (FLAG_debug_code) { - // Verify this is really an Smi. - Register scratch = scratch0(); - __ Load(scratch, FieldMemOperand(object, offset), representation); - __ AssertSmi(scratch); - } - - // Read int value directly from upper half of the smi. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); - offset = SmiWordOffset(offset); - representation = Representation::Integer32(); - } - __ Load(result, FieldMemOperand(object, offset), representation); -} - - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register scratch = scratch0(); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - - // Get the prototype or initial map from the function. - __ Ld(result, - FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at)); - - // If the function does not have an initial map, we're done. - Label done; - __ GetObjectType(result, scratch, scratch); - __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); - - // Get the prototype from the initial map. - __ Ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); - - // All done. - __ bind(&done); -} - - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - if (instr->length()->IsConstantOperand()) { - int const_length = ToInteger32(LConstantOperand::cast(instr->length())); - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int index = (const_length - const_index) + 1; - __ Ld(result, MemOperand(arguments, index * kPointerSize)); - } else { - Register index = ToRegister(instr->index()); - __ li(at, Operand(const_length + 1)); - __ Dsubu(result, at, index); - __ Dlsa(at, arguments, result, kPointerSizeLog2); - __ Ld(result, MemOperand(at)); - } - } else if (instr->index()->IsConstantOperand()) { - Register length = ToRegister(instr->length()); - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int loc = const_index - 1; - if (loc != 0) { - __ Dsubu(result, length, Operand(loc)); - __ Dlsa(at, arguments, result, kPointerSizeLog2); - __ Ld(result, MemOperand(at)); - } else { - __ Dlsa(at, arguments, length, kPointerSizeLog2); - __ Ld(result, MemOperand(at)); - } - } else { - Register length = ToRegister(instr->length()); - Register index = ToRegister(instr->index()); - __ Dsubu(result, length, index); - __ Daddu(result, result, 1); - __ Dlsa(at, arguments, result, kPointerSizeLog2); - __ Ld(result, MemOperand(at)); - } -} - - -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) - : element_size_shift; - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - FPURegister result = ToDoubleRegister(instr->result()); - if (key_is_constant) { - __ Daddu(scratch0(), external_pointer, - constant_key << element_size_shift); - } else { - if (shift_size < 0) { - if (shift_size == -32) { - __ dsra32(scratch0(), key, 0); - } else { - __ dsra(scratch0(), key, -shift_size); - } - } else { - __ dsll(scratch0(), key, shift_size); - } - __ Daddu(scratch0(), scratch0(), external_pointer); - } - if (elements_kind == FLOAT32_ELEMENTS) { - __ Lwc1(result, MemOperand(scratch0(), base_offset)); - __ cvt_d_s(result, result); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ Ldc1(result, MemOperand(scratch0(), base_offset)); - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, base_offset); - switch (elements_kind) { - case INT8_ELEMENTS: - __ Lb(result, mem_operand); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ Lbu(result, mem_operand); - break; - case INT16_ELEMENTS: - __ Lh(result, mem_operand); - break; - case UINT16_ELEMENTS: - __ Lhu(result, mem_operand); - break; - case INT32_ELEMENTS: - __ Lw(result, mem_operand); - break; - case UINT32_ELEMENTS: - __ Lw(result, mem_operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue, - result, Operand(0x80000000)); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { - Register elements = ToRegister(instr->elements()); - bool key_is_constant = instr->key()->IsConstantOperand(); - Register key = no_reg; - DoubleRegister result = ToDoubleRegister(instr->result()); - Register scratch = scratch0(); - - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - - int base_offset = instr->base_offset(); - if (key_is_constant) { - int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - base_offset += constant_key * kDoubleSize; - } - __ Daddu(scratch, elements, Operand(base_offset)); - - if (!key_is_constant) { - key = ToRegister(instr->key()); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) - : element_size_shift; - if (shift_size > 0) { - __ dsll(at, key, shift_size); - } else if (shift_size == -32) { - __ dsra32(at, key, 0); - } else { - __ dsra(at, key, -shift_size); - } - __ Daddu(scratch, scratch, at); - } - - __ Ldc1(result, MemOperand(scratch)); - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ FmoveHigh(scratch, result); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, - Operand(static_cast(kHoleNanUpper32))); - } -} - - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - HLoadKeyed* hinstr = instr->hydrogen(); - Register elements = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - Register store_base = scratch; - int offset = instr->base_offset(); - - if (instr->key()->IsConstantOperand()) { - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - store_base = elements; - } else { - Register key = ToRegister(instr->key()); - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsSmi()) { - __ SmiScale(scratch, key, kPointerSizeLog2); - __ daddu(scratch, elements, scratch); - } else { - __ Dlsa(scratch, elements, key, kPointerSizeLog2); - } - } - - Representation representation = hinstr->representation(); - if (representation.IsInteger32() && SmiValuesAre32Bits() && - hinstr->elements_kind() == FAST_SMI_ELEMENTS) { - DCHECK(!hinstr->RequiresHoleCheck()); - if (FLAG_debug_code) { - Register temp = scratch1(); - __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); - __ AssertSmi(temp); - } - - // Read int value directly from upper half of the smi. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); - offset = SmiWordOffset(offset); - } - - __ Load(result, MemOperand(store_base, offset), representation); - - // Check for the hole value. - if (hinstr->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ SmiTst(result, scratch); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch, - Operand(zero_reg)); - } else { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, - Operand(scratch)); - } - } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ Branch(&done, ne, result, Operand(scratch)); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise - // it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - // The comparison only needs LS bits of value, which is a smi. - __ Ld(result, FieldMemOperand(result, PropertyCell::kValueOffset)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result, - Operand(Smi::FromInt(Isolate::kProtectorValid))); - } - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ bind(&done); - } -} - - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } -} - - -MemOperand LCodeGen::PrepareKeyedOperand(Register key, - Register base, - bool key_is_constant, - int constant_key, - int element_size, - int shift_size, - int base_offset) { - if (key_is_constant) { - return MemOperand(base, (constant_key << element_size) + base_offset); - } - - if (base_offset == 0) { - if (shift_size >= 0) { - __ dsll(scratch0(), key, shift_size); - __ Daddu(scratch0(), base, scratch0()); - return MemOperand(scratch0()); - } else { - if (shift_size == -32) { - __ dsra32(scratch0(), key, 0); - } else { - __ dsra(scratch0(), key, -shift_size); - } - __ Daddu(scratch0(), base, scratch0()); - return MemOperand(scratch0()); - } - } - - if (shift_size >= 0) { - __ dsll(scratch0(), key, shift_size); - __ Daddu(scratch0(), base, scratch0()); - return MemOperand(scratch0(), base_offset); - } else { - if (shift_size == -32) { - __ dsra32(scratch0(), key, 0); - } else { - __ dsra(scratch0(), key, -shift_size); - } - __ Daddu(scratch0(), base, scratch0()); - return MemOperand(scratch0(), base_offset); - } -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register scratch = scratch0(); - Register temp = scratch1(); - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - __ Dsubu(result, sp, 2 * kPointerSize); - } else if (instr->hydrogen()->arguments_adaptor()) { - // Check if the calling frame is an arguments adaptor frame. - Label done, adapted; - __ Ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ld(result, - MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ Xor(temp, result, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne). - __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq). - } else { - __ mov(result, fp); - } -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register elem = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - __ Daddu(result, zero_reg, Operand(scope()->num_parameters())); - __ Branch(&done, eq, fp, Operand(elem)); - - // Arguments adaptor frame present. Get argument length from there. - __ Ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ld(result, - MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(result); - - // Argument length is in result register. - __ bind(&done); -} - - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. - Label global_object, result_in_receiver; - - if (!instr->hydrogen()->known_function()) { - // Do not transform the receiver to object for strict mode functions or - // builtins. - __ Ld(scratch, - FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ Lwu(at, - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); - __ And(at, at, - Operand(SharedFunctionInfo::IsStrictBit::kMask | - SharedFunctionInfo::IsNativeBit::kMask)); - __ Branch(&result_in_receiver, ne, at, Operand(zero_reg)); - } - - // Normal function. Replace undefined or null with global receiver. - __ LoadRoot(scratch, Heap::kNullValueRootIndex); - __ Branch(&global_object, eq, receiver, Operand(scratch)); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); - __ Branch(&global_object, eq, receiver, Operand(scratch)); - - // Deoptimize if the receiver is not a JS object. - __ SmiTst(receiver, scratch); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg)); - - __ GetObjectType(receiver, scratch, scratch); - DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch, - Operand(FIRST_JS_RECEIVER_TYPE)); - __ Branch(&result_in_receiver); - - __ bind(&global_object); - __ Ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); - __ Ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); - __ Ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); - - if (result.is(receiver)) { - __ bind(&result_in_receiver); - } else { - Label result_ok; - __ Branch(&result_ok); - __ bind(&result_in_receiver); - __ mov(result, receiver); - __ bind(&result_ok); - } -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); - Register scratch = scratch0(); - DCHECK(receiver.is(a0)); // Used for parameter count. - DCHECK(function.is(a1)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).is(v0)); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length, - Operand(kArgumentsLimit)); - - // Push the receiver and use the register to keep the original - // number of arguments. - __ push(receiver); - __ Move(receiver, length); - // The arguments are at a one pointer size offset from elements. - __ Daddu(elements, elements, Operand(1 * kPointerSize)); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); - __ dsll(scratch, length, kPointerSizeLog2); - __ bind(&loop); - __ Daddu(scratch, elements, scratch); - __ Ld(scratch, MemOperand(scratch)); - __ push(scratch); - __ Dsubu(length, length, Operand(1)); - __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); - __ dsll(scratch, length, kPointerSizeLog2); - - __ bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(a0); - // It is safe to use t0, t1 and t2 as scratch registers here given that - // we are not going to return to caller function anyway. - PrepareForTailCall(actual, t0, t1, t2); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - // The number of arguments is stored in receiver which is a0, as expected - // by InvokeFunction. - ParameterCount actual(receiver); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { - Abort(kDoPushArgumentNotImplementedForDoubleType); - } else { - Register argument_reg = EmitLoadRegister(argument, at); - __ push(argument_reg); - } -} - - -void LCodeGen::DoDrop(LDrop* instr) { - __ Drop(instr->count()); -} - - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ Ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); -} - - -void LCodeGen::DoContext(LContext* instr) { - // If there is a non-return use, the context must be moved to a register. - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ Ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in cp. - DCHECK(result.is(cp)); - } -} - - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - __ li(scratch0(), instr->hydrogen()->declarations()); - __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); - __ Push(scratch0(), scratch1()); - __ li(scratch0(), instr->hydrogen()->feedback_vector()); - __ Push(scratch0()); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - Register function_reg = a1; - LPointerMap* pointers = instr->pointer_map(); - - if (can_invoke_directly) { - // Change context. - __ Ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ LoadRoot(a3, Heap::kUndefinedValueRootIndex); - __ li(a0, Operand(arity)); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - __ Ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); - if (is_tail_call) { - __ Jump(at); - } else { - __ Call(at); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - } - } else { - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, expected, actual, flag, generator); - } -} - - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - DCHECK(instr->context() != NULL); - DCHECK(ToRegister(instr->context()).is(cp)); - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // Deoptimize if not a heap number. - __ Ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch, - Operand(at)); - - Label done; - Register exponent = scratch0(); - scratch = no_reg; - __ Lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, just - // return it. - __ Move(result, input); - __ And(at, exponent, Operand(HeapNumber::kSignMask)); - __ Branch(&done, eq, at, Operand(zero_reg)); - - // Input is negative. Reverse its sign. - // Preserve the value of all registers. - { - PushSafepointRegistersScope scope(this); - - // Registers were saved at the safepoint, so we can use - // many scratch registers. - Register tmp1 = input.is(a1) ? a0 : a1; - Register tmp2 = input.is(a2) ? a0 : a2; - Register tmp3 = input.is(a3) ? a0 : a3; - Register tmp4 = input.is(a4) ? a0 : a4; - - // exponent: floating point exponent value. - - Label allocated, slow; - __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); - __ Branch(&allocated); - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, - instr->context()); - // Set the pointer to the new heap number in tmp. - if (!tmp1.is(v0)) - __ mov(tmp1, v0); - // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input, input); - __ Lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - - __ bind(&allocated); - // exponent: floating point exponent value. - // tmp1: allocated heap number. - __ And(exponent, exponent, Operand(~HeapNumber::kSignMask)); - __ Sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); - __ Lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); - __ Sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - - __ StoreToSafepointRegisterSlot(tmp1, result); - } - - __ bind(&done); -} - - -void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - Label done; - __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); - __ mov(result, input); - __ subu(result, zero_reg, input); - // Overflow if result is still negative, i.e. 0x80000000. - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result, - Operand(zero_reg)); - __ bind(&done); -} - - -void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - Label done; - __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); - __ mov(result, input); - __ dsubu(result, zero_reg, input); - // Overflow if result is still negative, i.e. 0x80000000 00000000. - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result, - Operand(zero_reg)); - __ bind(&done); -} - - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - // Class for deferred case. - class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { - public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LMathAbs* instr_; - }; - - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsDouble()) { - FPURegister input = ToDoubleRegister(instr->value()); - FPURegister result = ToDoubleRegister(instr->result()); - __ abs_d(result, input); - } else if (r.IsInteger32()) { - EmitIntegerMathAbs(instr); - } else if (r.IsSmi()) { - EmitSmiMathAbs(instr); - } else { - // Representation is tagged. - DeferredMathAbsTaggedHeapNumber* deferred = - new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); - Register input = ToRegister(instr->value()); - // Smi check. - __ JumpIfNotSmi(input, deferred->entry()); - // If smi, handle it directly. - EmitSmiMathAbs(instr); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoMathFloor(LMathFloor* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register scratch1 = scratch0(); - Register except_flag = ToRegister(instr->temp()); - - __ EmitFPUTruncate(kRoundToMinusInf, - result, - input, - scratch1, - double_scratch0(), - except_flag); - - // Deopt if the operation did not succeed. - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - Label done; - __ Branch(&done, ne, result, Operand(zero_reg)); - __ mfhc1(scratch1, input); // Get exponent/sign bits. - __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, - Operand(zero_reg)); - __ bind(&done); - } -} - - -void LCodeGen::DoMathRound(LMathRound* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); - Register scratch = scratch0(); - Label done, check_sign_on_zero; - - // Extract exponent bits. - __ mfhc1(result, input); - __ Ext(scratch, - result, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - - // If the number is in ]-0.5, +0.5[, the result is +/- 0. - Label skip1; - __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2)); - __ mov(result, zero_reg); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Branch(&check_sign_on_zero); - } else { - __ Branch(&done); - } - __ bind(&skip1); - - // The following conversion will not work with numbers - // outside of ]-2^32, 2^32[. - DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch, - Operand(HeapNumber::kExponentBias + 32)); - - // Save the original sign for later comparison. - __ And(scratch, result, Operand(HeapNumber::kSignMask)); - - __ Move(double_scratch0(), 0.5); - __ add_d(double_scratch0(), input, double_scratch0()); - - // Check sign of the result: if the sign changed, the input - // value was in ]0.5, 0[ and the result should be -0. - __ mfhc1(result, double_scratch0()); - // mfhc1 sign-extends, clear the upper bits. - __ dsll32(result, result, 0); - __ dsrl32(result, result, 0); - __ Xor(result, result, Operand(scratch)); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // ARM uses 'mi' here, which is 'lt' - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result, - Operand(zero_reg)); - } else { - Label skip2; - // ARM uses 'mi' here, which is 'lt' - // Negating it results in 'ge' - __ Branch(&skip2, ge, result, Operand(zero_reg)); - __ mov(result, zero_reg); - __ Branch(&done); - __ bind(&skip2); - } - - Register except_flag = scratch; - __ EmitFPUTruncate(kRoundToMinusInf, - result, - double_scratch0(), - at, - double_scratch1, - except_flag); - - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - __ Branch(&done, ne, result, Operand(zero_reg)); - __ bind(&check_sign_on_zero); - __ mfhc1(scratch, input); // Get exponent/sign bits. - __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch, - Operand(zero_reg)); - } - __ bind(&done); -} - - -void LCodeGen::DoMathFround(LMathFround* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ cvt_s_d(result, input); - __ cvt_d_s(result, result); -} - - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ sqrt_d(result, input); -} - - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister temp = ToDoubleRegister(instr->temp()); - - DCHECK(!input.is(result)); - - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label done; - __ Move(temp, static_cast(-V8_INFINITY)); - // Set up Infinity. - __ Neg_d(result, temp); - // result is overwritten if the branch is not taken. - __ BranchF(&done, NULL, eq, temp, input); - - // Add +0 to convert -0 to +0. - __ add_d(result, input, kDoubleRegZero); - __ sqrt_d(result, result); - __ bind(&done); -} - - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - // Having marked this as a call, we can use any registers. - // Just make sure that the input/output registers are the expected ones. - Register tagged_exponent = MathPowTaggedDescriptor::exponent(); - DCHECK(!instr->right()->IsDoubleRegister() || - ToDoubleRegister(instr->right()).is(f4)); - DCHECK(!instr->right()->IsRegister() || - ToRegister(instr->right()).is(tagged_exponent)); - DCHECK(ToDoubleRegister(instr->left()).is(f2)); - DCHECK(ToDoubleRegister(instr->result()).is(f0)); - - if (exponent_type.IsSmi()) { - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsTagged()) { - Label no_deopt; - __ JumpIfSmi(tagged_exponent, &no_deopt); - DCHECK(!a7.is(tagged_exponent)); - __ Lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at)); - __ bind(&no_deopt); - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsInteger32()) { - MathPowStub stub(isolate(), MathPowStub::INTEGER); - __ CallStub(&stub); - } else { - DCHECK(exponent_type.IsDouble()); - MathPowStub stub(isolate(), MathPowStub::DOUBLE); - __ CallStub(&stub); - } -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - - -void LCodeGen::DoMathLog(LMathLog* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - __ Clz(result, input); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ Ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset)); - __ Branch(&no_arguments_adaptor, ne, scratch3, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ mov(fp, scratch2); - __ Ld(caller_args_count_reg, - MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(caller_args_count_reg); - __ Branch(&formal_parameter_count_loaded); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count - __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3); - - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->function()).is(a1)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use t0, t1 and t2 as scratch registers here given that - // we are not going to return to caller function anyway. - PrepareForTailCall(actual, t0, t1, t2); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(a1, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } -} - - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(ToRegister(instr->result()).is(v0)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - __ Jump(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Jump(target); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - __ Call(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(target)); - __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Call(target); - } - generator.AfterCall(); - } -} - - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->constructor()).is(a1)); - DCHECK(ToRegister(instr->result()).is(v0)); - - __ li(a0, Operand(instr->arity())); - __ li(a2, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - // We might need a change here, - // look at the first argument. - __ Ld(a5, MemOperand(sp, 0)); - __ Branch(&packed_case, eq, a5, Operand(zero_reg)); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), - holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ jmp(&done); - __ bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); -} - - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - __ Daddu(code_object, code_object, - Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Sd(code_object, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); -} - - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ Daddu(result, base, Operand(ToInteger32(offset))); - } else { - Register offset = ToRegister(instr->offset()); - __ Daddu(result, base, offset); - } -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - Representation representation = instr->representation(); - - Register object = ToRegister(instr->object()); - Register scratch2 = scratch1(); - Register scratch1 = scratch0(); - - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - if (access.IsExternalMemory()) { - Register value = ToRegister(instr->value()); - MemOperand operand = MemOperand(object, offset); - __ Store(value, operand, representation); - return; - } - - __ AssertNotSmi(object); - - DCHECK(!representation.IsSmi() || - !instr->value()->IsConstantOperand() || - IsSmi(LConstantOperand::cast(instr->value()))); - if (!FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!instr->hydrogen()->has_transition()); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - DoubleRegister value = ToDoubleRegister(instr->value()); - __ Sdc1(value, FieldMemOperand(object, offset)); - return; - } - - if (instr->hydrogen()->has_transition()) { - Handle transition = instr->hydrogen()->transition_map(); - AddDeprecationDependency(transition); - __ li(scratch1, Operand(transition)); - __ Sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); - if (instr->hydrogen()->NeedsWriteBarrierForMap()) { - Register temp = ToRegister(instr->temp()); - // Update the write barrier for the map field. - __ RecordWriteForMap(object, - scratch1, - temp, - GetRAState(), - kSaveFPRegs); - } - } - - // Do the store. - Register destination = object; - if (!access.IsInobject()) { - destination = scratch1; - __ Ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); - } - - if (representation.IsSmi() && SmiValuesAre32Bits() && - instr->hydrogen()->value()->representation().IsInteger32()) { - DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); - if (FLAG_debug_code) { - __ Load(scratch2, FieldMemOperand(destination, offset), representation); - __ AssertSmi(scratch2); - } - // Store int value directly to upper half of the smi. - offset = SmiWordOffset(offset); - representation = Representation::Integer32(); - } - MemOperand operand = FieldMemOperand(destination, offset); - - if (FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - DoubleRegister value = ToDoubleRegister(instr->value()); - __ Sdc1(value, operand); - } else { - DCHECK(instr->value()->IsRegister()); - Register value = ToRegister(instr->value()); - __ Store(value, operand, representation); - } - - if (instr->hydrogen()->NeedsWriteBarrier()) { - // Update the write barrier for the object for in-object properties. - Register value = ToRegister(instr->value()); - __ RecordWriteField(destination, - offset, - value, - scratch2, - GetRAState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - instr->hydrogen()->SmiCheckForWriteBarrier(), - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; - Operand operand((int64_t)0); - Register reg; - if (instr->index()->IsConstantOperand()) { - operand = ToOperand(instr->index()); - reg = ToRegister(instr->length()); - cc = CommuteCondition(cc); - } else { - reg = ToRegister(instr->index()); - operand = ToOperand(instr->length()); - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - Label done; - __ Branch(&done, NegateCondition(cc), reg, operand); - __ stop("eliminated bounds check failed"); - __ bind(&done); - } else { - DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand); - } -} - - -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) - : element_size_shift; - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - Register address = scratch0(); - FPURegister value(ToDoubleRegister(instr->value())); - if (key_is_constant) { - if (constant_key != 0) { - __ Daddu(address, external_pointer, - Operand(constant_key << element_size_shift)); - } else { - address = external_pointer; - } - } else { - if (shift_size < 0) { - if (shift_size == -32) { - __ dsra32(address, key, 0); - } else { - __ dsra(address, key, -shift_size); - } - } else { - __ dsll(address, key, shift_size); - } - __ Daddu(address, external_pointer, address); - } - - if (elements_kind == FLOAT32_ELEMENTS) { - __ cvt_s_d(double_scratch0(), value); - __ Swc1(double_scratch0(), MemOperand(address, base_offset)); - } else { // Storing doubles, not floats. - __ Sdc1(value, MemOperand(address, base_offset)); - } - } else { - Register value(ToRegister(instr->value())); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - base_offset); - switch (elements_kind) { - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - case INT8_ELEMENTS: - __ Sb(value, mem_operand); - break; - case INT16_ELEMENTS: - case UINT16_ELEMENTS: - __ Sh(value, mem_operand); - break; - case INT32_ELEMENTS: - case UINT32_ELEMENTS: - __ Sw(value, mem_operand); - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - DoubleRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register scratch = scratch0(); - DoubleRegister double_scratch = double_scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int base_offset = instr->base_offset(); - Label not_nan, done; - - // Calculate the effective address of the slot in the array to store the - // double value. - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - if (key_is_constant) { - int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - __ Daddu(scratch, elements, - Operand((constant_key << element_size_shift) + base_offset)); - } else { - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) - : element_size_shift; - __ Daddu(scratch, elements, Operand(base_offset)); - DCHECK((shift_size == 3) || (shift_size == -29)); - if (shift_size == 3) { - __ dsll(at, ToRegister(instr->key()), 3); - } else if (shift_size == -29) { - __ dsra(at, ToRegister(instr->key()), 29); - } - __ Daddu(scratch, scratch, at); - } - - if (instr->NeedsCanonicalization()) { - __ FPUCanonicalizeNaN(double_scratch, value); - __ Sdc1(double_scratch, MemOperand(scratch, 0)); - } else { - __ Sdc1(value, MemOperand(scratch, 0)); - } -} - - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) - : no_reg; - Register scratch = scratch0(); - Register store_base = scratch; - int offset = instr->base_offset(); - - // Do the store. - if (instr->key()->IsConstantOperand()) { - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - store_base = elements; - } else { - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsSmi()) { - __ SmiScale(scratch, key, kPointerSizeLog2); - __ daddu(store_base, elements, scratch); - } else { - __ Dlsa(store_base, elements, key, kPointerSizeLog2); - } - } - - Representation representation = instr->hydrogen()->value()->representation(); - if (representation.IsInteger32() && SmiValuesAre32Bits()) { - DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); - DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - if (FLAG_debug_code) { - Register temp = scratch1(); - __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); - __ AssertSmi(temp); - } - - // Store int value directly to upper half of the smi. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); - offset = SmiWordOffset(offset); - representation = Representation::Integer32(); - } - - __ Store(value, MemOperand(store_base, offset), representation); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ Daddu(key, store_base, Operand(offset)); - __ RecordWrite(elements, - key, - value, - GetRAState(), - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed, - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - // By cases: external, fast double - if (instr->is_fixed_typed_array()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = v0; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ jmp(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ Branch(deferred->entry(), le, ToRegister(current_capacity), - Operand(constant_key)); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ Branch(deferred->entry(), ge, ToRegister(key), - Operand(constant_capacity)); - } else { - __ Branch(deferred->entry(), ge, ToRegister(key), - Operand(ToRegister(current_capacity))); - } - - if (instr->elements()->IsRegister()) { - __ mov(result, ToRegister(instr->elements())); - } else { - __ Ld(result, ToMemOperand(instr->elements())); - } - - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = v0; - __ mov(result, zero_reg); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - if (instr->object()->IsRegister()) { - __ mov(result, ToRegister(instr->object())); - } else { - __ Ld(result, ToMemOperand(instr->object())); - } - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - __ li(a3, Operand(ToSmi(LConstantOperand::cast(key)))); - } else { - __ mov(a3, ToRegister(key)); - __ SmiTag(a3); - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ mov(a0, result); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - __ SmiTst(result, at); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); -} - - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp = ToRegister(instr->temp()); - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); - DeoptimizeIf(al, instr, DeoptimizeReason::kMementoFound); - __ bind(&no_memento_found); -} - - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(a1)); - DCHECK(ToRegister(instr->right()).is(a0)); - StringAddStub stub(isolate(), - instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt final : public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new(zone()) DeferredStringCharCodeAt(this, instr); - StringCharLoadGenerator::Generate(masm(), - ToRegister(instr->string()), - ToRegister(instr->index()), - ToRegister(instr->result()), - deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, zero_reg); - - PushSafepointRegistersScope scope(this); - __ push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index))); - __ push(scratch); - } else { - Register index = ToRegister(instr->index()); - __ SmiTag(index); - __ push(index); - } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, - instr->context()); - __ AssertSmi(v0); - __ SmiUntag(v0); - __ StoreToSafepointRegisterSlot(v0, result); -} - - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode final : public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredStringCharFromCode(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new(zone()) DeferredStringCharFromCode(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - DCHECK(!char_code.is(result)); - - __ Branch(deferred->entry(), hi, - char_code, Operand(String::kMaxOneByteCharCode)); - __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ Dlsa(result, result, char_code, kPointerSizeLog2); - __ Ld(result, FieldMemOperand(result, FixedArray::kHeaderSize)); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); - __ Branch(deferred->entry(), eq, result, Operand(scratch)); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, zero_reg); - - PushSafepointRegistersScope scope(this); - __ SmiTag(char_code); - __ push(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(v0, result); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister() || input->IsStackSlot()); - LOperand* output = instr->result(); - DCHECK(output->IsDoubleRegister()); - FPURegister single_scratch = double_scratch0().low(); - if (input->IsStackSlot()) { - Register scratch = scratch0(); - __ Ld(scratch, ToMemOperand(input)); - __ mtc1(scratch, single_scratch); - } else { - __ mtc1(ToRegister(input), single_scratch); - } - __ cvt_d_w(ToDoubleRegister(output), single_scratch); -} - - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - - FPURegister dbl_scratch = double_scratch0(); - __ mtc1(ToRegister(input), dbl_scratch); - __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch); -} - - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU final : public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, - instr_->value(), - instr_->temp1(), - instr_->temp2(), - UNSIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagU* instr_; - }; - - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - - DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); - __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue)); - __ SmiTag(result, input); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2, - IntegerSignedness signedness) { - Label done, slow; - Register src = ToRegister(value); - Register dst = ToRegister(instr->result()); - Register tmp1 = scratch0(); - Register tmp2 = ToRegister(temp1); - Register tmp3 = ToRegister(temp2); - DoubleRegister dbl_scratch = double_scratch0(); - - if (signedness == SIGNED_INT32) { - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - if (dst.is(src)) { - __ SmiUntag(src, dst); - __ Xor(src, src, Operand(0x80000000)); - } - __ mtc1(src, dbl_scratch); - __ cvt_d_w(dbl_scratch, dbl_scratch); - } else { - __ mtc1(src, dbl_scratch); - __ Cvt_d_uw(dbl_scratch, dbl_scratch); - } - - if (FLAG_inline_new) { - __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); - __ Branch(&done); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // TODO(3095996): Put a valid pointer value in the stack slot where the - // result register is stored, as this register is in the pointer map, but - // contains an integer value. - __ mov(dst, zero_reg); - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!dst.is(cp)) { - __ mov(cp, zero_reg); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(v0, dst); - } - - // Done. Put the value in dbl_scratch into the value of the allocated heap - // number. - __ bind(&done); - __ Sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD final : public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagD* instr_; - }; - - DoubleRegister input_reg = ToDoubleRegister(instr->value()); - Register scratch = scratch0(); - Register reg = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); - - DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); - // We want the untagged address first for performance - __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); - } else { - __ Branch(deferred->entry()); - } - __ bind(deferred->exit()); - __ Sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ mov(reg, zero_reg); - - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(cp)) { - __ mov(cp, zero_reg); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(v0, reg); -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - Register output = ToRegister(instr->result()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - __ And(at, input, Operand(0x80000000)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg)); - } - if (hchange->CheckFlag(HValue::kCanOverflow) && - !hchange->value()->CheckFlag(HValue::kUint32)) { - __ SmiTagCheckOverflow(output, input, at); - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg)); - } else { - __ SmiTag(output, input); - } -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - Register scratch = scratch0(); - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - if (instr->needs_check()) { - STATIC_ASSERT(kHeapObjectTag == 1); - // If the input is a HeapObject, value of scratch won't be zero. - __ And(scratch, input, Operand(kHeapObjectTag)); - __ SmiUntag(result, input); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch, - Operand(zero_reg)); - } else { - __ SmiUntag(result, input); - } -} - - -void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, - DoubleRegister result_reg, - NumberUntagDMode mode) { - bool can_convert_undefined_to_nan = instr->truncating(); - bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); - - Register scratch = scratch0(); - Label convert, load_smi, done; - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - // Heap number map check. - __ Ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - if (can_convert_undefined_to_nan) { - __ Branch(&convert, ne, scratch, Operand(at)); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch, - Operand(at)); - } - // Load heap number. - __ Ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - __ mfc1(at, result_reg); - __ Branch(&done, ne, at, Operand(zero_reg)); - __ mfhc1(scratch, result_reg); // Get exponent/sign bits. - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch, - Operand(HeapNumber::kSignMask)); - } - __ Branch(&done); - if (can_convert_undefined_to_nan) { - __ bind(&convert); - // Convert undefined (and hole) to NaN. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, - input_reg, Operand(at)); - __ LoadRoot(scratch, Heap::kNanValueRootIndex); - __ Ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); - __ Branch(&done); - } - } else { - __ SmiUntag(scratch, input_reg); - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - } - // Smi to double register conversion - __ bind(&load_smi); - // scratch: untagged value of input_reg - __ mtc1(scratch, result_reg); - __ cvt_d_w(result_reg, result_reg); - __ bind(&done); -} - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { - Register input_reg = ToRegister(instr->value()); - Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->temp()); - DoubleRegister double_scratch = double_scratch0(); - DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); - - DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); - DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); - - Label done; - - // The input is a tagged HeapObject. - // Heap number map check. - __ Ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - // This 'at' value and scratch1 map value are used for tests in both clauses - // of the if. - - if (instr->truncating()) { - Label truncate; - __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at)); - __ mov(scratch2, input_reg); // In delay slot. - __ Lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1, - Operand(ODDBALL_TYPE)); - __ bind(&truncate); - __ TruncateHeapNumberToI(input_reg, scratch2); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1, - Operand(at)); - - // Load the double value. - __ Ldc1(double_scratch, - FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - - Register except_flag = scratch2; - __ EmitFPUTruncate(kRoundToZero, - input_reg, - double_scratch, - scratch1, - double_scratch2, - except_flag, - kCheckForInexactConversion); - - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Branch(&done, ne, input_reg, Operand(zero_reg)); - - __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. - __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, - Operand(zero_reg)); - } - } - __ bind(&done); -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI final : public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LTaggedToI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - DCHECK(input->Equals(instr->result())); - - Register input_reg = ToRegister(input); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiUntag(input_reg); - } else { - DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); - - // Let the deferred code handle the HeapObject case. - __ JumpIfNotSmi(input_reg, deferred->entry()); - - // Smi to int32 conversion. - __ SmiUntag(input_reg); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - DoubleRegister result_reg = ToDoubleRegister(result); - - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - EmitNumberUntagD(instr, input_reg, result_reg, mode); -} - - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = scratch0(); - DoubleRegister double_input = ToDoubleRegister(instr->value()); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - Register except_flag = LCodeGen::scratch1(); - - __ EmitFPUTruncate(kRoundToMinusInf, - result_reg, - double_input, - scratch1, - double_scratch0(), - except_flag, - kCheckForInexactConversion); - - // Deopt if the operation did not succeed (except_flag != 0). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ Branch(&done, ne, result_reg, Operand(zero_reg)); - __ mfhc1(scratch1, double_input); // Get exponent/sign bits. - __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, - Operand(zero_reg)); - __ bind(&done); - } - } -} - - -void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = LCodeGen::scratch0(); - DoubleRegister double_input = ToDoubleRegister(instr->value()); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - Register except_flag = LCodeGen::scratch1(); - - __ EmitFPUTruncate(kRoundToMinusInf, - result_reg, - double_input, - scratch1, - double_scratch0(), - except_flag, - kCheckForInexactConversion); - - // Deopt if the operation did not succeed (except_flag != 0). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, - Operand(zero_reg)); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ Branch(&done, ne, result_reg, Operand(zero_reg)); - __ mfhc1(scratch1, double_input); // Get exponent/sign bits. - __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, - Operand(zero_reg)); - __ bind(&done); - } - } - __ SmiTag(result_reg, result_reg); -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->value(); - __ SmiTst(ToRegister(input), at); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg)); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - LOperand* input = instr->value(); - __ SmiTst(ToRegister(input), at); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); - } -} - - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - Register view = ToRegister(instr->view()); - Register scratch = scratch0(); - - __ Ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); - __ Lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); - __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at, - Operand(zero_reg)); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - Register scratch = scratch0(); - - __ GetObjectType(input, scratch, scratch); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch, - Operand(first)); - } else { - DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch, - Operand(first)); - // Omit check for the last type. - if (last != LAST_TYPE) { - DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch, - Operand(last)); - } - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); - __ And(at, scratch, mask); - DeoptimizeIf(tag == 0 ? ne : eq, instr, - DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg)); - } else { - __ And(scratch, scratch, Operand(mask)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch, - Operand(tag)); - } - } -} - - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Register reg = ToRegister(instr->value()); - Handle object = instr->hydrogen()->object().handle(); - AllowDeferredHandleDereference smi_check; - if (isolate()->heap()->InNewSpace(*object)) { - Register reg = ToRegister(instr->value()); - Handle cell = isolate()->factory()->NewCell(object); - __ li(at, Operand(cell)); - __ Ld(at, FieldMemOperand(at, Cell::kValueOffset)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at)); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, - Operand(object)); - } -} - - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ Ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); - __ Lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset)); - __ And(at, scratch0(), Operand(Map::Deprecated::kMask)); - __ Branch(&deopt, eq, at, Operand(zero_reg)); - - { - PushSafepointRegistersScope scope(this); - __ push(object); - __ mov(cp, zero_reg); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters( - instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(v0, scratch0()); - } - __ SmiTst(scratch0(), at); - __ Branch(&done, ne, at, Operand(zero_reg)); - - __ bind(&deopt); - // In case of "al" condition the operands are not used so just pass zero_reg - // there. - DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg, - Operand(zero_reg)); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps final : public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - void Generate() override { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - LInstruction* instr() override { return instr_; } - - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - Register map_reg = scratch0(); - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register reg = ToRegister(input); - __ Ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, reg); - __ bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMapAndBranch(map_reg, map, &success, eq, &success); - } - Handle map = maps->at(maps->size() - 1).handle(); - // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). - if (instr->hydrogen()->HasMigrationTarget()) { - __ Branch(deferred->entry(), ne, map_reg, Operand(map)); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map)); - } - - __ bind(&success); -} - - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); - __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - Register unclamped_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampUint8(result_reg, unclamped_reg); -} - - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - Register scratch = scratch0(); - Register input_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); - Label is_smi, done, heap_number; - - // Both smi and heap number cases are handled. - __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); - - // Check for heap number - __ Ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg, - Operand(factory()->undefined_value())); - __ mov(result_reg, zero_reg); - __ jmp(&done); - - // Heap number - __ bind(&heap_number); - __ Ldc1(double_scratch0(), - FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); - __ jmp(&done); - - __ bind(&is_smi); - __ ClampUint8(result_reg, scratch); - - __ bind(&done); -} - - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate final : public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, LAllocate* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredAllocate(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = - new(zone()) DeferredAllocate(this, instr); - - Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } else { - Register size = ToRegister(instr->size()); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } - - __ bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - STATIC_ASSERT(kHeapObjectTag == 1); - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ li(scratch, Operand(size - kHeapObjectTag)); - } else { - __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); - } - __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); - Label loop; - __ bind(&loop); - __ Dsubu(scratch, scratch, Operand(kPointerSize)); - __ Daddu(at, result, Operand(scratch)); - __ Sd(scratch2, MemOperand(at)); - __ Branch(&loop, ge, scratch, Operand(zero_reg)); - } -} - - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, zero_reg); - - PushSafepointRegistersScope scope(this); - if (instr->size()->IsRegister()) { - Register size = ToRegister(instr->size()); - DCHECK(!size.is(result)); - __ SmiTag(size); - __ push(size); - } else { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - if (size >= 0 && size <= Smi::kMaxValue) { - __ li(v0, Operand(Smi::FromInt(size))); - __ Push(v0); - } else { - // We should never get here at runtime => abort - __ stop("invalid allocation size"); - return; - } - } - - int flags = AllocateDoubleAlignFlag::encode( - instr->hydrogen()->MustAllocateDoubleAligned()); - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ li(v0, Operand(Smi::FromInt(flags))); - __ Push(v0); - - CallRuntimeFromDeferred( - Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); - __ StoreToSafepointRegisterSlot(v0, result); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - Register top_address = scratch0(); - __ Dsubu(v0, v0, Operand(kHeapObjectTag)); - __ li(top_address, Operand(allocation_top)); - __ Sd(v0, MemOperand(top_address)); - __ Daddu(v0, v0, Operand(kHeapObjectTag)); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register scratch1 = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->value()).is(a3)); - DCHECK(ToRegister(instr->result()).is(v0)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ li(v0, Operand(isolate()->factory()->number_string())); - __ jmp(&end); - __ bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ bind(&end); -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->value()); - - Register cmp1 = no_reg; - Operand cmp2 = Operand(no_reg); - - Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), - instr->FalseLabel(chunk_), - input, - instr->type_literal(), - &cmp1, - &cmp2); - - DCHECK(cmp1.is_valid()); - DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid()); - - if (final_branch_condition != kNoCondition) { - EmitBranch(instr, final_branch_condition, cmp1, cmp2); - } -} - - -Condition LCodeGen::EmitTypeofIs(Label* true_label, - Label* false_label, - Register input, - Handle type_name, - Register* cmp1, - Operand* cmp2) { - // This function utilizes the delay slot heavily. This is used to load - // values that are always usable without depending on the type of the input - // register. - Condition final_branch_condition = kNoCondition; - Register scratch = scratch0(); - Factory* factory = isolate()->factory(); - if (String::Equals(type_name, factory->number_string())) { - __ JumpIfSmi(input, true_label); - __ Ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - *cmp1 = input; - *cmp2 = Operand(at); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->string_string())) { - __ JumpIfSmi(input, false_label); - __ GetObjectType(input, input, scratch); - *cmp1 = scratch; - *cmp2 = Operand(FIRST_NONSTRING_TYPE); - final_branch_condition = lt; - - } else if (String::Equals(type_name, factory->symbol_string())) { - __ JumpIfSmi(input, false_label); - __ GetObjectType(input, input, scratch); - *cmp1 = scratch; - *cmp2 = Operand(SYMBOL_TYPE); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->boolean_string())) { - __ LoadRoot(at, Heap::kTrueValueRootIndex); - __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); - __ LoadRoot(at, Heap::kFalseValueRootIndex); - *cmp1 = at; - *cmp2 = Operand(input); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->undefined_string())) { - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input)); - // The first instruction of JumpIfSmi is an And - it is safe in the delay - // slot. - __ JumpIfSmi(input, false_label); - // Check for undetectable objects => true. - __ Ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); - __ Lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); - __ And(at, at, 1 << Map::kIsUndetectable); - *cmp1 = at; - *cmp2 = Operand(zero_reg); - final_branch_condition = ne; - - } else if (String::Equals(type_name, factory->function_string())) { - __ JumpIfSmi(input, false_label); - __ Ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ And(scratch, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - *cmp1 = scratch; - *cmp2 = Operand(1 << Map::kIsCallable); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->object_string())) { - __ JumpIfSmi(input, false_label); - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ GetObjectType(input, scratch, scratch1()); - __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE)); - // Check for callable or undetectable objects => false. - __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ And(at, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - *cmp1 = at; - *cmp2 = Operand(zero_reg); - final_branch_condition = eq; - - } else { - *cmp1 = at; - *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion. - __ Branch(false_label); - } - - return final_branch_condition; -} - - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - DCHECK_EQ(0, padding_size % Assembler::kInstrSize); - while (padding_size > 0) { - __ nop(); - padding_size -= Assembler::kInstrSize; - } - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && type == Deoptimizer::EAGER) { - type = Deoptimizer::LAZY; - } - - DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg, - Operand(zero_reg)); -} - - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck final : public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStackCheck(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - __ LoadRoot(at, Heap::kStackLimitRootIndex); - __ Branch(&done, hs, sp, Operand(at)); - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(cp)); - CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new(zone()) DeferredStackCheck(this, instr); - __ LoadRoot(at, Heap::kStackLimitRootIndex); - __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - Register result = ToRegister(instr->result()); - Register object = ToRegister(instr->object()); - - Label use_cache, call_runtime; - DCHECK(object.is(a0)); - __ CheckEnumCache(&call_runtime); - - __ Ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); - __ Branch(&use_cache); - - // Get the set of properties to enumerate. - __ bind(&call_runtime); - __ push(object); - CallRuntime(Runtime::kForInEnumerate, instr); - __ bind(&use_cache); -} - - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - __ EnumLength(result, map); - __ Branch(&load_cache, ne, result, Operand(Smi::kZero)); - __ li(result, Operand(isolate()->factory()->empty_fixed_array())); - __ jmp(&done); - - __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ Ld(result, - FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ Ld(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); - DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result, - Operand(zero_reg)); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - Register map = ToRegister(instr->map()); - __ Ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map, - Operand(scratch0())); -} - - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, - Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ Push(object, index); - __ mov(cp, zero_reg); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters( - instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(v0, result); -} - - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, - LLoadFieldByIndex* instr, - Register result, - Register object, - Register index) - : LDeferredCode(codegen), - instr_(instr), - result_(result), - object_(object), - index_(index) { - } - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register result_; - Register object_; - Register index_; - }; - - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - DeferredLoadMutableDouble* deferred; - deferred = new(zone()) DeferredLoadMutableDouble( - this, instr, result, object, index); - - Label out_of_object, done; - - __ And(scratch, index, Operand(Smi::FromInt(1))); - __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg)); - __ dsra(index, index, 1); - - __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg)); - __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot. - __ Daddu(scratch, object, scratch); - __ Ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); - - __ Branch(&done); - - __ bind(&out_of_object); - __ Ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - // Index is equal to negated out of object property index plus 1. - __ Dsubu(scratch, result, scratch); - __ Ld(result, - FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); - __ bind(deferred->exit()); - __ bind(&done); -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.h b/src/crankshaft/mips64/lithium-codegen-mips64.h deleted file mode 100644 index 58c907e602..0000000000 --- a/src/crankshaft/mips64/lithium-codegen-mips64.h +++ /dev/null @@ -1,408 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_ -#define V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_ - -#include "src/ast/scopes.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h" -#include "src/crankshaft/mips64/lithium-mips64.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class SafepointGenerator; - -class LCodeGen: public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - RAStatus GetRAState() const { - return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved; - } - - // Support for converting LOperands to assembler types. - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - FloatRegister flt_scratch, - DoubleRegister dbl_scratch); - int64_t ToRepresentation_donotuse(LConstantOperand* op, - const Representation& r) const; - int32_t ToInteger32(LConstantOperand* op) const; - Smi* ToSmi(LConstantOperand* op) const; - double ToDouble(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Returns a MemOperand pointing to the high word of a DoubleStackSlot. - MemOperand ToHighMemOperand(LOperand* op) const; - - bool IsInteger32(LConstantOperand* op) const; - bool IsSmi(LConstantOperand* op) const; - Handle ToHandle(LConstantOperand* op) const; - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - void DoDeferredNumberTagD(LNumberTagD* instr); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - void DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2, - IntegerSignedness signedness); - - void DoDeferredTaggedToI(LTaggedToI* instr); - void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocate(LAllocate* instr); - - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, - Register object, - Register index); - - // Parallel move support. - void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); - - MemOperand PrepareKeyedOperand(Register key, - Register base, - bool key_is_constant, - int constant_key, - int element_size, - int shift_size, - int base_offset); - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - Scope* scope() const { return scope_; } - - Register scratch0() { return kLithiumScratchReg; } - Register scratch1() { return kLithiumScratchReg2; } - DoubleRegister double_scratch0() { return kLithiumScratchDouble; } - - LInstruction* GetNextInstruction(); - - void EmitClassOfTest(Label* if_true, Label* if_false, - Handle class_name, Register input, - Register temporary, Register temporary2); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - void SaveCallerDoubles(); - void RestoreCallerDoubles(); - - // Code generation passes. Returns true if code generation should - // continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS - }; - - void CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr); - - void CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode); - - void CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, - int num_arguments, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, num_arguments, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void LoadContextFromDeferred(LOperand* context); - void CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in a1. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode); - - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type, - Register src1 = zero_reg, - const Operand& src2 = Operand(zero_reg)); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason, - Register src1 = zero_reg, - const Operand& src2 = Operand(zero_reg)); - - void AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - Register ToRegister(int index) const; - DoubleRegister ToDoubleRegister(int index) const; - - MemOperand BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding); - - void EmitIntegerMathAbs(LMathAbs* instr); - void EmitSmiMathAbs(LMathAbs* instr); - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); - - static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block); - - // EmitBranch expects to be the last instruction of a block. - template - void EmitBranch(InstrType instr, - Condition condition, - Register src1, - const Operand& src2); - template - void EmitBranchF(InstrType instr, - Condition condition, - FPURegister src1, - FPURegister src2); - template - void EmitTrueBranch(InstrType instr, Condition condition, Register src1, - const Operand& src2); - template - void EmitFalseBranch(InstrType instr, Condition condition, Register src1, - const Operand& src2); - template - void EmitFalseBranchF(InstrType instr, - Condition condition, - FPURegister src1, - FPURegister src2); - void EmitCmpI(LOperand* left, LOperand* right); - void EmitNumberUntagD(LNumberUntagD* instr, Register input, - DoubleRegister result, NumberUntagDMode mode); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - // Returns two registers in cmp1 and cmp2 that can be used in the - // Branch instruction after EmitTypeofIs. - Condition EmitTypeofIs(Label* true_label, - Label* false_label, - Register input, - Handle type_name, - Register* cmp1, - Operand* cmp2); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed); - - // Emits optimized code to deep-copy the contents of statically known - // object graphs (e.g. object literal boilerplate). - void EmitDeepCopy(Handle object, - Register result, - Register source, - int* offset, - AllocationSiteMode mode); - // Emit optimized code for integer division. - // Inputs are signed. - // All registers are clobbered. - // If 'remainder' is no_reg, it is not computed. - void EmitSignedIntegerDivisionByConstant(Register result, - Register dividend, - int32_t divisor, - Register remainder, - Register scratch, - LEnvironment* environment); - - - void EnsureSpaceForLazyDeopt(int space_needed) override; - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); - - template - void EmitVectorLoadICRegisters(T* instr); - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - class PushSafepointRegistersScope final BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen); - - ~PushSafepointRegistersScope(); - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode : public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() {} - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; - int instruction_index_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_ diff --git a/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc b/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc deleted file mode 100644 index eb50d4b2f1..0000000000 --- a/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h" - -#include "src/crankshaft/mips64/lithium-codegen-mips64.h" - -namespace v8 { -namespace internal { - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), - moves_(32, owner->zone()), - root_index_(0), - in_cycle_(false), - saved_destination_(NULL) {} - - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(moves_.is_empty()); - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - root_index_ = i; // Any cycle is found when by reaching this move again. - PerformMove(i); - if (in_cycle_) { - RestoreValue(); - } - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated()) { - DCHECK(moves_[i].source()->IsConstantOperand()); - EmitMove(i); - } - } - - moves_.Rewind(0); -} - - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); - } - Verify(); -} - - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. - - // We can only find a cycle, when doing a depth-first traversal of moves, - // be encountering the starting move again. So by spilling the source of - // the starting move, we break the cycle. All moves are then unblocked, - // and the starting move is completed by writing the spilled value to - // its destination. All other moves from the spilled source have been - // completed prior to breaking the cycle. - // An additional complication is that moves to MemOperands with large - // offsets (more than 1K or 4K) require us to spill this spilled value to - // the stack, to free up the register. - DCHECK(!moves_[index].IsPending()); - DCHECK(!moves_[index].IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved in a stack allocated local. Multiple moves can - // be pending because this function is recursive. - DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. - LOperand* destination = moves_[index].destination(); - moves_[index].set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - PerformMove(i); - // If there is a blocking, pending move it must be moves_[root_index_] - // and all other moves with the same source as moves_[root_index_] are - // sucessfully executed (because they are cycle-free) by this loop. - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - moves_[index].set_destination(destination); - - // The move may be blocked on a pending move, which must be the starting move. - // In this case, we have a cycle, and we save the source of this move to - // a scratch register to break it. - LMoveOperands other_move = moves_[root_index_]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - BreakCycle(index); - return; - } - - // This move is no longer blocked. - EmitMove(index); -} - - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - -#define __ ACCESS_MASM(cgen_->masm()) - -void LGapResolver::BreakCycle(int index) { - // We save in a register the value that should end up in the source of - // moves_[root_index]. After performing all moves in the tree rooted - // in that move, we save the value to that source. - DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); - DCHECK(!in_cycle_); - in_cycle_ = true; - LOperand* source = moves_[index].source(); - saved_destination_ = moves_[index].destination(); - if (source->IsRegister()) { - __ mov(kLithiumScratchReg, cgen_->ToRegister(source)); - } else if (source->IsStackSlot()) { - __ Ld(kLithiumScratchReg, cgen_->ToMemOperand(source)); - } else if (source->IsDoubleRegister()) { - __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); - } else if (source->IsDoubleStackSlot()) { - __ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); - } else { - UNREACHABLE(); - } - // This move will be done by restoring the saved value to the destination. - moves_[index].Eliminate(); -} - - -void LGapResolver::RestoreValue() { - DCHECK(in_cycle_); - DCHECK(saved_destination_ != NULL); - - // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble. - if (saved_destination_->IsRegister()) { - __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg); - } else if (saved_destination_->IsStackSlot()) { - __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_)); - } else if (saved_destination_->IsDoubleRegister()) { - __ mov_d(cgen_->ToDoubleRegister(saved_destination_), - kLithiumScratchDouble); - } else if (saved_destination_->IsDoubleStackSlot()) { - __ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_)); - } else { - UNREACHABLE(); - } - - in_cycle_ = false; - saved_destination_ = NULL; -} - - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - - if (source->IsRegister()) { - Register source_register = cgen_->ToRegister(source); - if (destination->IsRegister()) { - __ mov(cgen_->ToRegister(destination), source_register); - } else { - DCHECK(destination->IsStackSlot()); - __ Sd(source_register, cgen_->ToMemOperand(destination)); - } - } else if (source->IsStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsRegister()) { - __ Ld(cgen_->ToRegister(destination), source_operand); - } else { - DCHECK(destination->IsStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - if (!destination_operand.OffsetIsInt16Encodable()) { - // 'at' is overwritten while saving the value to the destination. - // Therefore we can't use 'at'. It is OK if the read from the source - // destroys 'at', since that happens before the value is read. - // This uses only a single reg of the double reg-pair. - __ Ldc1(kLithiumScratchDouble, source_operand); - __ Sdc1(kLithiumScratchDouble, destination_operand); - } else { - __ Ld(at, source_operand); - __ Sd(at, destination_operand); - } - } else { - __ Ld(kLithiumScratchReg, source_operand); - __ Sd(kLithiumScratchReg, destination_operand); - } - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - if (cgen_->IsSmi(constant_source)) { - __ li(dst, Operand(cgen_->ToSmi(constant_source))); - } else if (cgen_->IsInteger32(constant_source)) { - __ li(dst, Operand(cgen_->ToInteger32(constant_source))); - } else { - __ li(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - DoubleRegister result = cgen_->ToDoubleRegister(destination); - double v = cgen_->ToDouble(constant_source); - __ Move(result, v); - } else { - DCHECK(destination->IsStackSlot()); - DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. - if (cgen_->IsSmi(constant_source)) { - __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source))); - __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); - } else if (cgen_->IsInteger32(constant_source)) { - __ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source))); - __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); - } else { - __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source)); - __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); - } - } - - } else if (source->IsDoubleRegister()) { - DoubleRegister source_register = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - __ mov_d(cgen_->ToDoubleRegister(destination), source_register); - } else { - DCHECK(destination->IsDoubleStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - __ Sdc1(source_register, destination_operand); - } - - } else if (source->IsDoubleStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsDoubleRegister()) { - __ Ldc1(cgen_->ToDoubleRegister(destination), source_operand); - } else { - DCHECK(destination->IsDoubleStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - // kLithiumScratchDouble was used to break the cycle, - // but kLithiumScratchReg is free. - MemOperand source_high_operand = - cgen_->ToHighMemOperand(source); - MemOperand destination_high_operand = - cgen_->ToHighMemOperand(destination); - __ Lw(kLithiumScratchReg, source_operand); - __ Sw(kLithiumScratchReg, destination_operand); - __ Lw(kLithiumScratchReg, source_high_operand); - __ Sw(kLithiumScratchReg, destination_high_operand); - } else { - __ Ldc1(kLithiumScratchDouble, source_operand); - __ Sdc1(kLithiumScratchDouble, destination_operand); - } - } - } else { - UNREACHABLE(); - } - - moves_[index].Eliminate(); -} - - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/mips64/lithium-gap-resolver-mips64.h b/src/crankshaft/mips64/lithium-gap-resolver-mips64.h deleted file mode 100644 index 85d8e2920c..0000000000 --- a/src/crankshaft/mips64/lithium-gap-resolver-mips64.h +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_ -#define V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class LGapResolver final BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // If a cycle is found in the series of moves, save the blocking value to - // a scratch register. The cycle must be found by hitting the root of the - // depth-first search. - void BreakCycle(int index); - - // After a cycle has been resolved, restore the value from the scratch - // register to its proper destination. - void RestoreValue(); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Verify the move list before performing moves. - void Verify(); - - LCodeGen* cgen_; - - // List of moves not yet resolved. - ZoneList moves_; - - int root_index_; - bool in_cycle_; - LOperand* saved_destination_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_ diff --git a/src/crankshaft/mips64/lithium-mips64.cc b/src/crankshaft/mips64/lithium-mips64.cc deleted file mode 100644 index ba891097ce..0000000000 --- a/src/crankshaft/mips64/lithium-mips64.cc +++ /dev/null @@ -1,2334 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/mips64/lithium-mips64.h" - -#include - -#if V8_TARGET_ARCH_MIPS64 - -#include "src/crankshaft/lithium-inl.h" -#include "src/crankshaft/mips64/lithium-codegen-mips64.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || - LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || - operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); - } -} -#endif - - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-d"; - case Token::SUB: return "sub-d"; - case Token::MUL: return "mul-d"; - case Token::DIV: return "div-d"; - case Token::MOD: return "mod-d"; - default: - UNREACHABLE(); - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-t"; - case Token::SUB: return "sub-t"; - case Token::MUL: return "mul-t"; - case Token::MOD: return "mod-t"; - case Token::DIV: return "div-t"; - case Token::BIT_AND: return "bit-and-t"; - case Token::BIT_OR: return "bit-or-t"; - case Token::BIT_XOR: return "bit-xor-t"; - case Token::ROR: return "ror-t"; - case Token::SHL: return "sll-t"; - case Token::SAR: return "sra-t"; - case Token::SHR: return "srl-t"; - default: - UNREACHABLE(); - } -} - - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new(zone()) LDebugBreak(); -} - - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), - true_block_id(), false_block_id()); -} - - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - stream->Add(" length "); - length()->PrintTo(stream); - stream->Add(" index "); - index()->PrintTo(stream); -} - - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access() << " <- "; - stream->Add(os.str().c_str()); - value()->PrintTo(stream); -} - - -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", base_offset()); - } else { - stream->Add("]"); - } -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", base_offset()); - } else { - stream->Add("] <- "); - } - - if (value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - value()->PrintTo(stream); - } -} - - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); -} - - -int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { - // Skip a slot if for a double-width slot. - if (kind == DOUBLE_REGISTERS) current_frame_slots_++; - return current_frame_slots_++; -} - - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - int index = GetNextSpillIndex(kind); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new(zone()) LPlatformChunk(info(), graph()); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) { - return Use(value, ToUnallocated(reg)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE)); -} - - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - - -LOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, int index) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed( - LTemplateResultInstruction<1>* instr, Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixedDouble( - LTemplateResultInstruction<1>* instr, DoubleRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = - (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(zone())); - return instr; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LUnallocated* LChunkBuilder::TempDoubleRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new(zone()) LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new(zone()) LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); - } - } else { - right = UseRegisterAtStart(right_value); - } - - // Shift operations can only deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result = - DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; - } else { - return DoArithmeticT(op, instr); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (op == Token::MOD) { - LOperand* left = UseFixedDouble(instr->left(), f2); - LOperand* right = UseFixedDouble(instr->right(), f4); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - // We call a C function for double modulo. It can't trigger a GC. We need - // to use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - return MarkAsCall(DefineFixedDouble(result, f2), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineAsRegister(result); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - HValue* left = instr->left(); - HValue* right = instr->right(); - DCHECK(left->representation().IsTagged()); - DCHECK(right->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left_operand = UseFixed(left, a1); - LOperand* right_operand = UseFixed(right, a0); - LArithmeticT* result = - new(zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - DCHECK(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new(zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new(zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new(zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - instr = new(zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { -// Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()); -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - ToBooleanHints expected = instr->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || - type.IsJSArray() || type.IsHeapNumber() || type.IsString(); - LInstruction* branch = new(zone()) LBranch(UseRegister(value)); - if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) && - (expected & ToBooleanHint::kNeedsMap)) || - expected != ToBooleanHint::kAny)) { - branch = AssignEnvironment(branch); - } - return branch; -} - - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new(zone()) LCmpMapAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister( - new(zone()) LArgumentsLength(UseRegister(length->value()))); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsElements); -} - - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LHasInPrototypeChainAndBranch* result = - new (zone()) LHasInPrototypeChainAndBranch(object, prototype); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegisterAtStart(instr->receiver()); - LOperand* function = UseRegisterAtStart(instr->function()); - LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), a1); - LOperand* receiver = UseFixed(instr->receiver(), a0); - LOperand* length = UseFixed(instr->length(), a2); - LOperand* elements = UseFixed(instr->elements(), a3); - LApplyArguments* result = new(zone()) LApplyArguments(function, - receiver, - length, - elements); - return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - for (int i = 0; i < argc; ++i) { - LOperand* argument = Use(instr->argument(i)); - AddInstruction(new(zone()) LPushArgument(argument), instr); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoStoreCodeEntry( - HStoreCodeEntry* store_code_entry) { - LOperand* function = UseRegister(store_code_entry->function()); - LOperand* code_object = UseTempRegister(store_code_entry->code_object()); - return new(zone()) LStoreCodeEntry(function, code_object); -} - - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister( - new(zone()) LInnerAllocatedObject(base_object, offset)); -} - - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() - ? NULL - : DefineAsRegister(new(zone()) LThisFunction); -} - - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new(zone()) LContext, cp); - } - - return DefineAsRegister(new(zone()) LContext); -} - - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); -} - - -LInstruction* LChunkBuilder::DoCallWithDescriptor( - HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), cp); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - for (; i < descriptor.GetParameterCount(); i++) { - op = UseAny(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - AddInstruction(new (zone()) LPushArgument(op), instr); - } - - LCallWithDescriptor* result = new(zone()) LCallWithDescriptor( - descriptor, ops, zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* function = UseFixed(instr->function(), a1); - LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathFloor: - return DoMathFloor(instr); - case kMathRound: - return DoMathRound(instr); - case kMathFround: - return DoMathFround(instr); - case kMathAbs: - return DoMathAbs(instr); - case kMathLog: - return DoMathLog(instr); - case kMathCos: - return DoMathCos(instr); - case kMathSin: - return DoMathSin(instr); - case kMathExp: - return DoMathExp(instr); - case kMathSqrt: - return DoMathSqrt(instr); - case kMathPowHalf: - return DoMathPowHalf(instr); - case kMathClz32: - return DoMathClz32(instr); - default: - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), f4); - return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr); -} - - -LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathClz32* result = new(zone()) LMathClz32(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), f4); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), f4), instr); -} - -LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), f4); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), f4), instr); -} - -LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), f4); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), f4), instr); -} - - -LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { - // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf. - LOperand* input = UseFixedDouble(instr->value(), f8); - LOperand* temp = TempDoubleRegister(); - LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp); - return DefineFixedDouble(result, f4); -} - - -LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { - Representation r = instr->value()->representation(); - LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) - ? NULL - : UseFixed(instr->context(), cp); - LOperand* input = UseRegister(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LMathAbs(context, input)); - if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LOperand* temp = TempRegister(); - LMathFloor* result = new(zone()) LMathFloor(input, temp); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); -} - - -LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathSqrt* result = new(zone()) LMathSqrt(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LOperand* temp = TempDoubleRegister(); - LMathRound* result = new(zone()) LMathRound(input, temp); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* constructor = UseFixed(instr->constructor(), a1); - LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr); -} - - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - return DefineAsRegister(new(zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI( - dividend, divisor)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) - ? NULL : TempRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow) && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) || - (!instr->IsMathFloorOfDiv() && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? - NULL : TempRegister(); - LInstruction* result = DefineAsRegister( - new(zone()) LFlooringDivByConstI(dividend, divisor, temp)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = - DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow))) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( - dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LModByConstI( - dividend, divisor)); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = DefineAsRegister(new(zone()) LModI( - dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr); - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - HValue* left = instr->BetterLeftOperand(); - HValue* right = instr->BetterRightOperand(); - LOperand* left_op; - LOperand* right_op; - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); - - int32_t constant_value = 0; - if (right->IsConstant()) { - HConstant* constant = HConstant::cast(right); - constant_value = constant->Integer32Value(); - // Constants -1, 0 and 1 can be optimized if the result can overflow. - // For other constants, it can be optimized only without overflow. - if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) { - left_op = UseRegisterAtStart(left); - right_op = UseConstant(right); - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - LInstruction* result = - instr->representation().IsSmi() - ? DefineAsRegister(new (zone()) LMulS(left_op, right_op)) - : DefineAsRegister(new (zone()) LMulI(left_op, right_op)); - if (right_op->IsConstantOperand() - ? ((can_overflow && constant_value == -1) || - (bailout_on_minus_zero && constant_value <= 0)) - : (can_overflow || bailout_on_minus_zero)) { - AssignEnvironment(result); - } - return result; - - } else if (instr->representation().IsDouble()) { - if (kArchVariant == kMips64r2) { - if (instr->HasOneUse() && instr->uses().value()->IsAdd()) { - HAdd* add = HAdd::cast(instr->uses().value()); - if (instr == add->left()) { - // This mul is the lhs of an add. The add and mul will be folded - // into a multiply-add. - return NULL; - } - if (instr == add->right() && !add->left()->IsMul()) { - // This mul is the rhs of an add, where the lhs is not another mul. - // The add and mul will be folded into a multiply-add. - return NULL; - } - } - } - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - LInstruction* result = - instr->representation().IsSmi() - ? DefineAsRegister(new (zone()) LSubS(left, right)) - : DefineAsRegister(new (zone()) LSubI(left, right)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { - LOperand* multiplier_op = UseRegisterAtStart(mul->left()); - LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); - LOperand* addend_op = UseRegisterAtStart(addend); - return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op, - multiplicand_op)); -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterOrConstantAtStart(instr->BetterRightOperand()); - LInstruction* result = - instr->representation().IsSmi() - ? DefineAsRegister(new (zone()) LAddS(left, right)) - : DefineAsRegister(new (zone()) LAddI(left, right)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return DefineAsRegister(new (zone()) LAddE(left, right)); - } else if (instr->representation().IsDouble()) { - if (kArchVariant == kMips64r2) { - if (instr->left()->IsMul()) - return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); - - if (instr->right()->IsMul()) { - DCHECK(!instr->left()->IsMul()); - return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); - } - } - return DoArithmeticD(Token::ADD, instr); - } else { - return DoArithmeticT(Token::ADD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - return DefineAsRegister(new(zone()) LMathMinMax(left, right)); -} - - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - DCHECK(instr->representation().IsDouble()); - // We call a C function for double power. It can't trigger a GC. - // We need to use fixed result register for the call. - Representation exponent_type = instr->right()->representation(); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseFixedDouble(instr->left(), f2); - LOperand* right = - exponent_type.IsDouble() - ? UseFixedDouble(instr->right(), f4) - : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent()); - LPower* result = new(zone()) LPower(left, right); - return MarkAsCall(DefineFixedDouble(result, f0), - instr, - CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), a1); - LOperand* right = UseFixed(instr->right(), a0); - LCmpT* result = new(zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } -} - - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LCmpHoleAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* temp = TempRegister(); - return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()), - temp); -} - - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsSmiAndBranch(Use(instr->value())); -} - - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsUndetectableAndBranch( - UseRegisterAtStart(instr->value()), TempRegister()); -} - - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), a1); - LOperand* right = UseFixed(instr->right(), a0); - LStringCompareAndBranch* result = - new(zone()) LStringCompareAndBranch(context, left, right); - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LHasInstanceTypeAndBranch(value); -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new (zone()) - LClassOfTestAndBranch(UseRegister(instr->value()), TempRegister()); -} - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index)); -} - - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegisterAtStart(instr->index()) - : UseRegisterOrConstantAtStart(instr->index()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL; - return new(zone()) LSeqStringSetChar(context, string, index, value); -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseRegisterOrConstantAtStart(instr->length()) - : UseRegisterAtStart(instr->length()); - LInstruction* result = new(zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } -return result; -} - - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { - return NULL; -} - - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new(zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempDoubleRegister(); - LInstruction* result = - DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - - LUnallocated* result_temp = TempRegister(); - LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); - return AssignPointerMap(Define(result, result_temp)); - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - return AssignEnvironment( - DefineAsRegister(new(zone()) LDoubleToSmi(value))); - } else { - DCHECK(to.IsInteger32()); - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); - if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); - return result; - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } else { - STATIC_ASSERT((kMinInt == Smi::kMinValue) && - (kMaxInt == Smi::kMaxValue)); - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new(zone()) LSmiTag(value)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); - } else { - return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); - } - } - } - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckInstanceType(value); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckValue(value)); -} - - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - // Revisit this decision, here and 8 lines below. - return DefineAsRegister(new(zone()) LClampDToUint8(reg, - TempDoubleRegister())); - } else if (input_rep.IsInteger32()) { - return DefineAsRegister(new(zone()) LClampIToUint8(reg)); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - LClampTToUint8* result = - new(zone()) LClampTToUint8(reg, TempDoubleRegister()); - return AssignEnvironment(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() - ? UseFixed(instr->context(), cp) - : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn(UseFixed(instr->value(), v0), context, - parameter_count); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new(zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new(zone()) LConstantI); - } else if (r.IsDouble()) { - return DefineAsRegister(new(zone()) LConstantD); - } else if (r.IsExternal()) { - return DefineAsRegister(new(zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new(zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context; - LOperand* value; - if (instr->NeedsWriteBarrier()) { - context = UseTempRegister(instr->context()); - value = UseTempRegister(instr->value()); - } else { - context = UseRegister(instr->context()); - value = UseRegister(instr->value()); - } - LInstruction* result = new(zone()) LStoreContextSlot(context, value); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - LOperand* obj = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LLoadNamedField(obj)); -} - - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - return AssignEnvironment(DefineAsRegister( - new(zone()) LLoadFunctionPrototype(UseRegister(instr->function())))); -} - - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new(zone()) LLoadRoot); -} - - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK(instr->key()->representation().IsSmiOrInteger32()); - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LInstruction* result = NULL; - - if (!instr->is_fixed_typed_array()) { - LOperand* obj = NULL; - if (instr->representation().IsDouble()) { - obj = UseRegister(instr->elements()); - } else { - DCHECK(instr->representation().IsSmiOrTagged() || - instr->representation().IsInteger32()); - obj = UseRegisterAtStart(instr->elements()); - } - result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr)); - } else { - DCHECK( - (instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(elements_kind)) || - (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(elements_kind))); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - result = DefineAsRegister( - new (zone()) LLoadKeyed(backing_store, key, backing_store_owner)); - } - - bool needs_environment; - if (instr->is_fixed_typed_array()) { - // see LCodeGen::DoLoadKeyedExternalArray - needs_environment = elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32); - } else { - // see LCodeGen::DoLoadKeyedFixedDoubleArray and - // LCodeGen::DoLoadKeyedFixedArray - needs_environment = - instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub()); - } - - if (needs_environment) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - if (!instr->is_fixed_typed_array()) { - DCHECK(instr->elements()->representation().IsTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* object = NULL; - LOperand* val = NULL; - LOperand* key = NULL; - - if (instr->value()->representation().IsDouble()) { - object = UseRegisterAtStart(instr->elements()); - key = UseRegisterOrConstantAtStart(instr->key()); - val = UseRegister(instr->value()); - } else { - DCHECK(instr->value()->representation().IsSmiOrTagged() || - instr->value()->representation().IsInteger32()); - if (needs_write_barrier) { - object = UseTempRegister(instr->elements()); - val = UseTempRegister(instr->value()); - key = UseTempRegister(instr->key()); - } else { - object = UseRegisterAtStart(instr->elements()); - val = UseRegisterAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } - } - - return new (zone()) LStoreKeyed(object, key, val, nullptr); - } - - DCHECK( - (instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); - DCHECK(instr->elements()->representation().IsExternal()); - LOperand* val = UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner); -} - - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LOperand* new_map_reg = TempRegister(); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); - return result; - } else { - LOperand* object = UseFixed(instr->object(), a0); - LOperand* context = UseFixed(instr->context(), cp); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, context, NULL); - return MarkAsCall(result, instr); - } -} - - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp = TempRegister(); - LTrapAllocationMemento* result = - new(zone()) LTrapAllocationMemento(object, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = Use(instr->object()); - LOperand* elements = Use(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, v0); - return AssignPointerMap(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool is_in_object = instr->access().IsInobject(); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = instr->has_transition() && - instr->NeedsWriteBarrierForMap(); - - LOperand* obj; - if (needs_write_barrier) { - obj = is_in_object - ? UseRegister(instr->object()) - : UseTempRegister(instr->object()); - } else { - obj = needs_write_barrier_for_map - ? UseRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - } - - LOperand* val; - if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - } else if (instr->field_representation().IsDouble()) { - val = UseRegisterAtStart(instr->value()); - } else { - val = UseRegister(instr->value()); - } - - // We need a temporary register for write barrier of the map field. - LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; - - return new(zone()) LStoreNamedField(obj, val, temp); -} - - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), a1); - LOperand* right = UseFixed(instr->right(), a0); - return MarkAsCall( - DefineFixed(new(zone()) LStringAdd(context, left, right), v0), - instr); -} - - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseTempRegister(instr->string()); - LOperand* index = UseTempRegister(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new(zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new(zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = UseRegisterOrConstant(instr->size()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - if (instr->IsAllocationFolded()) { - LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new(zone()) LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new(zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk()->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kTooManySpillSlotsNeededForOSR); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseRegisterOrConstantAtStart(instr->length()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* value = UseFixed(instr->value(), a3); - LTypeof* result = new (zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); -} - - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new(zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new(zone()) LStackCheck(context))); - } -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new(zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = current_block_->last_environment()-> - DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - - return pop; -} - - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->enumerable(), a0); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map))); -} - - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegisterAtStart(instr->map()); - return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); -} - - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* index = UseTempRegister(instr->index()); - LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS64 diff --git a/src/crankshaft/mips64/lithium-mips64.h b/src/crankshaft/mips64/lithium-mips64.h deleted file mode 100644 index c75959a248..0000000000 --- a/src/crankshaft/mips64/lithium-mips64.h +++ /dev/null @@ -1,2496 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_ -#define V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddE) \ - V(AddI) \ - V(AddS) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(MathAbs) \ - V(MathCos) \ - V(MathSin) \ - V(MathExp) \ - V(MathClz32) \ - V(MathFloor) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(MulS) \ - V(MultiplyAddD) \ - V(NumberTagD) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArgument) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(SubS) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { \ - return H##type::cast(hydrogen_value()); \ - } - - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) { - } - - virtual ~LInstruction() {} - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { - // Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) - kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - - // Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall(); - } - - // Interface to the register allocator and iterators. - bool IsMarkedAsCall() const { return IsCall(); } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - -#ifdef DEBUG - void VerifyCall(); -#endif - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - - private: - // Iterator interface. - friend class InputIterator; - - friend class TempIterator; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - class IsCallBits: public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int bit_field_; -}; - - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return R != 0 && result() != NULL; } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - protected: - EmbeddedContainer results_; -}; - - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) - : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const final { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new(zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - - private: - HBasicBlock* block_; -}; - - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - LLazyBailout() : gap_instructions_size_(0) { } - - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") - - void set_gap_instructions_size(int gap_instructions_size) { - gap_instructions_size_ = gap_instructions_size; - } - int gap_instructions_size() { return gap_instructions_size_; } - - private: - int gap_instructions_size_; -}; - - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { - inputs_[0] = value; - } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) - : LGap(block), replacement_(NULL) { } - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -template -class LControlInstruction : public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) { } - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } - - Label* false_label_; - Label* true_label_; -}; - - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> { - public: - LWrapReceiver(LOperand* receiver, LOperand* function) { - inputs_[0] = receiver; - inputs_[1] = function; - } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } -}; - - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, - LOperand* receiver, - LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } -}; - - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { - inputs_[0] = elements; - } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LModByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModI final : public LTemplateInstruction<1, 2, 3> { - public: - LModI(LOperand* left, - LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor) { - inputs_[0] = dividend; - inputs_[1] = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - - -class LMulS final : public LTemplateInstruction<1, 2, 0> { - public: - LMulS(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulS, "mul-s") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -class LMulI final : public LTemplateInstruction<1, 2, 0> { - public: - LMulI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -// Instruction for computing multiplier * multiplicand + addend. -class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> { - public: - LMultiplyAddD(LOperand* addend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = addend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* addend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") -}; - - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { - return hydrogen()->representation().IsDouble(); - } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LMathFloor final : public LTemplateInstruction<1, 1, 1> { - public: - LMathFloor(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathRound final : public LTemplateInstruction<1, 1, 1> { - public: - LMathRound(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathFround final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFround(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - - -class LMathAbs final : public LTemplateInstruction<1, 2, 0> { - public: - LMathAbs(LOperand* context, LOperand* value) { - inputs_[1] = context; - inputs_[0] = value; - } - - LOperand* context() { return inputs_[1]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathLog final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathLog(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - - -class LMathClz32 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathClz32(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - -class LMathCos final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathCos(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - -class LMathSin final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSin(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - -class LMathExp final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathExp(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - - -class LMathSqrt final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSqrt(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - - -class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> { - public: - LMathPowHalf(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) -}; - - -class LCmpHoleAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LHasInstanceTypeAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 1> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) { - inputs_[0] = object; - inputs_[1] = prototype; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return hydrogen()->op(); } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) -}; - - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LSubS final : public LTemplateInstruction<1, 2, 0> { - public: - LSubS(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - - -class LConstantD final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - double value() const { return hydrogen()->DoubleValue(); } -}; - - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - - -class LBranch final : public LControlInstruction<1, 0> { - public: - explicit LBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LCmpMapAndBranch final : public LControlInstruction<1, 1> { - public: - LCmpMapAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> { - public: - LSeqStringGetChar(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; - } - - LOperand* string() const { return inputs_[0]; } - LOperand* index() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> { - public: - LSeqStringSetChar(LOperand* context, - LOperand* string, - LOperand* index, - LOperand* value) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - } - - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - - -class LAddE final : public LTemplateInstruction<1, 2, 0> { - public: - LAddE(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LAddS final : public LTemplateInstruction<1, 2, 0> { - public: - LAddS(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LMathMinMax final : public LTemplateInstruction<1, 2, 0> { - public: - LMathMinMax(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) - : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, - LOperand* context, - LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - Token::Value op() const { return op_; } - - Opcode opcode() const final { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - LOperand* value() { return inputs_[0]; } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - LOperand* parameter_count() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") -}; - - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadFunctionPrototype(LOperand* function) { - inputs_[0] = function; - } - - LOperand* function() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) -}; - - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - - -class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = backing_store_owner; - } - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* backing_store_owner() { return inputs_[2]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreContextSlot(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LPushArgument final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LPushArgument(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) { } - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object) { - inputs_[0] = function; - inputs_[1] = code_object; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : descriptor_(descriptor), - inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - const CallInterfaceDescriptor descriptor() { return descriptor_; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - CallInterfaceDescriptor descriptor_; - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LUint32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - - -class LNumberTagU final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - - -class LNumberTagD final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToI(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 2> { - public: - LTaggedToI(LOperand* value, - LOperand* temp, - LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LNumberUntagD(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) - : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - bool needs_check() const { return needs_check_; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - private: - bool needs_check_; -}; - - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { - inputs_[0] = object; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; - - Representation representation() const { - return hydrogen()->field_representation(); - } -}; - - -class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> { - public: - LStoreKeyed(LOperand* object, LOperand* key, LOperand* value, - LOperand* backing_store_owner) { - inputs_[0] = object; - inputs_[1] = key; - inputs_[2] = value; - inputs_[3] = backing_store_owner; - } - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - LOperand* backing_store_owner() { return inputs_[3]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) - - void PrintDataTo(StringStream* stream) override; - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> { - public: - LTransitionElementsKind(LOperand* object, - LOperand* context, - LOperand* new_map_temp) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = new_map_temp; - } - - LOperand* context() { return inputs_[1]; } - LOperand* object() { return inputs_[0]; } - LOperand* new_map_temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() { return hydrogen()->from_kind(); } - ElementsKind to_kind() { return hydrogen()->to_kind(); } -}; - - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> { - public: - LTrapAllocationMemento(LOperand* object, - LOperand* temp) { - inputs_[0] = object; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, - "trap-allocation-memento") -}; - - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; } - - LOperand* view() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckInstanceType(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - - -class LCheckMaps final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckMaps(LOperand* value = NULL) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampDToUint8(LOperand* unclamped, LOperand* temp) { - inputs_[0] = unclamped; - temps_[0] = temp; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* unclamped) { - inputs_[0] = unclamped; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - - -class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* unclamped, LOperand* temp) { - inputs_[0] = unclamped; - temps_[0] = temp; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") -}; - - -class LAllocate final : public LTemplateInstruction<1, 2, 2> { - public: - LAllocate(LOperand* context, - LOperand* size, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 2> { - public: - LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { - inputs_[0] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* size() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIsAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LTypeofIsAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - LOsrEntry() {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { - inputs_[0] = map; - } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { - return HForInCacheArray::cast(this->hydrogen_value())->idx(); - } -}; - - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> { - public: - LCheckMapValue(LOperand* value, LOperand* map) { - inputs_[0] = value; - inputs_[1] = map; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) - : LChunk(info, graph) { } - - int GetNextSpillIndex(RegisterKind kind); - LOperand* GetNextSpillSlot(RegisterKind kind); -}; - - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); - - static bool HasMagicNumberForDivisor(int32_t divisor); - - LInstruction* DoMathFloor(HUnaryMathOperation* instr); - LInstruction* DoMathRound(HUnaryMathOperation* instr); - LInstruction* DoMathFround(HUnaryMathOperation* instr); - LInstruction* DoMathAbs(HUnaryMathOperation* instr); - LInstruction* DoMathLog(HUnaryMathOperation* instr); - LInstruction* DoMathCos(HUnaryMathOperation* instr); - LInstruction* DoMathSin(HUnaryMathOperation* instr); - LInstruction* DoMathExp(HUnaryMathOperation* instr); - LInstruction* DoMathSqrt(HUnaryMathOperation* instr); - LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); - LInstruction* DoMathClz32(HUnaryMathOperation* instr); - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HDiv* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(DoubleRegister reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, - DoubleRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register that may be trashed. - MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - - // An input operand in a register or stack slot. - MUST_USE_RESULT LOperand* Use(HValue* value); - MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - - // An input operand in a register, stack slot or a constant operand. - MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // An input operand in a constant operand. - MUST_USE_RESULT LOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - MUST_USE_RESULT LOperand* UseAny(HValue* value) override; - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - MUST_USE_RESULT LUnallocated* TempDoubleRegister(); - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg); - LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, - DoubleRegister reg); - LInstruction* AssignEnvironment(LInstruction* instr); - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - // By default we assume that instruction sequences generated for calls - // cannot deoptimize eagerly and we do not attach environment to this - // instruction. - LInstruction* MarkAsCall( - LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, - HBinaryOperation* instr); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_ diff --git a/src/crankshaft/ppc/OWNERS b/src/crankshaft/ppc/OWNERS deleted file mode 100644 index 752e8e3d81..0000000000 --- a/src/crankshaft/ppc/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -jyan@ca.ibm.com -dstence@us.ibm.com -joransiu@ca.ibm.com -mbrandy@us.ibm.com -michael_dawson@ca.ibm.com -bjaideep@ca.ibm.com diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc deleted file mode 100644 index b29493bc52..0000000000 --- a/src/crankshaft/ppc/lithium-codegen-ppc.cc +++ /dev/null @@ -1,5637 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/ppc/lithium-codegen-ppc.h" - -#include "src/base/bits.h" -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" - -namespace v8 { -namespace internal { - - -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {} - virtual ~SafepointGenerator() {} - - void BeforeCall(int call_size) const override {} - - void AfterCall() const override { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - -LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope( - LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->info()->is_calling()); - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - StoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->CallStub(&stub); -} - -LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - RestoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->CallStub(&stub); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; -} - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // NONE indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::NONE); - - bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && - GenerateJumpTable() && GenerateSafepointTable(); - if (FLAG_enable_embedded_constant_pool && !rc) { - masm()->AbortConstantPoolBuilding(); - } - return rc; -} - - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); -} - - -void LCodeGen::SaveCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ stfd(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -void LCodeGen::RestoreCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Restore clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ lfd(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - - // r4: Callee's JS function. - // cp: Callee's context. - // pp: Callee's constant pool pointer (if enabled) - // fp: Caller's frame pointer. - // lr: Caller's pc. - // ip: Our own function entry (required by the prologue) - } - - int prologue_offset = masm_->pc_offset(); - - if (prologue_offset) { - // Prologue logic requires it's starting address in ip and the - // corresponding offset from the function entry. - prologue_offset += Instruction::kInstrSize; - __ addi(ip, ip, Operand(prologue_offset)); - } - info()->set_prologue_offset(prologue_offset); - if (NeedsEagerFrame()) { - if (info()->IsStub()) { - __ StubPrologue(StackFrame::STUB, ip, prologue_offset); - } else { - __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset); - } - frame_is_built_ = true; - } - - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - if (slots > 0) { - __ subi(sp, sp, Operand(slots * kPointerSize)); - if (FLAG_debug_code) { - __ Push(r3, r4); - __ li(r0, Operand(slots)); - __ mtctr(r0); - __ addi(r3, sp, Operand((slots + 2) * kPointerSize)); - __ mov(r4, Operand(kSlotsZapValue)); - Label loop; - __ bind(&loop); - __ StorePU(r4, MemOperand(r3, -kPointerSize)); - __ bdnz(&loop); - __ Pop(r3, r4); - } - } - - if (info()->saves_caller_doubles()) { - SaveCallerDoubles(); - } - return !is_aborted(); -} - - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Possibly allocate a local context. - if (info()->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is in r4. - int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ push(r4); - __ Push(info()->scope()->scope_info()); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), - Operand(slots)); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of the FastNewFunctionContext builtin is always in new space. - need_write_barrier = false; - } else { - __ push(r4); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - - // Context is returned in both r3 and cp. It replaces the context - // passed to us. It's saved in the stack and kept live in cp. - __ mr(cp, r3); - __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset)); - // Copy any necessary parameters into the context. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ LoadP(r3, MemOperand(fp, parameter_offset)); - // Store it in the context. - MemOperand target = ContextMemOperand(cp, var->index()); - __ StoreP(r3, target, r0); - // Update the write barrier. This clobbers r6 and r3. - if (need_write_barrier) { - __ RecordWriteContextSlot(cp, target.offset(), r3, r6, - GetLinkRegisterState(), kSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(cp, r3, &done); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } -} - - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment( - ";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - __ bind(code->entry()); - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ PushCommonFrame(scratch0()); - Comment(";;; Deferred code"); - } - code->Generate(); - if (NeedsDeferredFrame()) { - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - __ PopCommonFrame(scratch0()); - frame_is_built_ = false; - } - __ b(code->exit()); - } - } - - return !is_aborted(); -} - - -bool LCodeGen::GenerateJumpTable() { - // Check that the jump table is accessible from everywhere in the function - // code, i.e. that offsets to the table can be encoded in the 24bit signed - // immediate of a branch instruction. - // To simplify we consider the code size from the first instruction to the - // end of the jump table. We also don't consider the pc load delta. - // Each entry in the jump table generates one instruction and inlines one - // 32bit data after it. - if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + - jump_table_.length() * 7)) { - Abort(kGeneratedCodeIsTooLarge); - } - - if (jump_table_.length() > 0) { - Label needs_frame, call_deopt_entry; - - Comment(";;; -------------------- Jump table --------------------"); - Address base = jump_table_[0].address; - - Register entry_offset = scratch0(); - - int length = jump_table_.length(); - for (int i = 0; i < length; i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); - - DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type); - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - - // Second-level deopt table entries are contiguous and small, so instead - // of loading the full, absolute address of each one, load an immediate - // offset which will be added to the base address later. - __ mov(entry_offset, Operand(entry - base)); - - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - Comment(";;; call deopt with frame"); - __ PushCommonFrame(); - __ b(&needs_frame, SetLK); - } else { - __ b(&call_deopt_entry, SetLK); - } - } - - if (needs_frame.is_linked()) { - __ bind(&needs_frame); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ push(ip); - DCHECK(info()->IsStub()); - } - - Comment(";;; call deopt"); - __ bind(&call_deopt_entry); - - if (info()->saves_caller_doubles()) { - DCHECK(info()->IsStub()); - RestoreCallerDoubles(); - } - - // Add the base address to the offset previously loaded in entry_offset. - __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); - __ add(ip, entry_offset, ip); - __ Jump(ip); - } - - // The deoptimization jump table is the last part of the instruction - // sequence. Mark the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - - -Register LCodeGen::ToRegister(int code) const { - return Register::from_code(code); -} - - -DoubleRegister LCodeGen::ToDoubleRegister(int code) const { - return DoubleRegister::from_code(code); -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - DCHECK(op->IsRegister()); - return ToRegister(op->index()); -} - - -Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { - if (op->IsRegister()) { - return ToRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk_->LookupConstant(const_op); - Handle literal = constant->handle(isolate()); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - AllowDeferredHandleDereference get_number; - DCHECK(literal->IsNumber()); - __ LoadIntLiteral(scratch, static_cast(literal->Number())); - } else if (r.IsDouble()) { - Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); - } else { - DCHECK(r.IsSmiOrTagged()); - __ Move(scratch, literal); - } - return scratch; - } else if (op->IsStackSlot()) { - __ LoadP(scratch, ToMemOperand(op)); - return scratch; - } - UNREACHABLE(); -} - - -void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op, - Register dst) { - DCHECK(IsInteger32(const_op)); - HConstant* constant = chunk_->LookupConstant(const_op); - int32_t value = constant->Integer32Value(); - if (IsSmi(const_op)) { - __ LoadSmiLiteral(dst, Smi::FromInt(value)); - } else { - __ LoadIntLiteral(dst, value); - } -} - - -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - DCHECK(op->IsDoubleRegister()); - return ToDoubleRegister(op->index()); -} - - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - - -bool LCodeGen::IsInteger32(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - - -bool LCodeGen::IsSmi(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - return ToRepresentation(op, Representation::Integer32()); -} - - -intptr_t LCodeGen::ToRepresentation(LConstantOperand* op, - const Representation& r) const { - HConstant* constant = chunk_->LookupConstant(op); - int32_t value = constant->Integer32Value(); - if (r.IsInteger32()) return value; - DCHECK(r.IsSmiOrTagged()); - return reinterpret_cast(Smi::FromInt(value)); -} - - -Smi* LCodeGen::ToSmi(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - return Smi::FromInt(constant->Integer32Value()); -} - - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - - -Operand LCodeGen::ToOperand(LOperand* op) { - if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk()->LookupConstant(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsSmi()) { - DCHECK(constant->HasSmiValue()); - return Operand(Smi::FromInt(constant->Integer32Value())); - } else if (r.IsInteger32()) { - DCHECK(constant->HasInteger32Value()); - return Operand(constant->Integer32Value()); - } else if (r.IsDouble()) { - Abort(kToOperandUnsupportedDoubleImmediate); - } - DCHECK(r.IsTagged()); - return Operand(constant->handle(isolate())); - } else if (op->IsRegister()) { - return Operand(ToRegister(op)); - } else if (op->IsDoubleRegister()) { - Abort(kToOperandIsDoubleRegisterUnimplemented); - return Operand::Zero(); - } - // Stack slots not implemented, use ToMemOperand instead. - UNREACHABLE(); -} - - -static int ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize; -} - - -MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index())); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); - } -} - - -MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { - DCHECK(op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand(sp, - ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); - } -} - - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation( - environment, translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); - } -} - - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, LOperand* op, - bool is_tagged, bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, translation, value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - DoubleRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallCode(Handle code, RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::CallCodeGeneric(Handle code, RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode) { - DCHECK(instr != NULL); - __ Call(code, mode); - RecordSafepointWithLazyDeopt(instr, safepoint_mode); - - // Signal that we don't inline smi code before these stubs in the - // optimizing code generator. - if (code->kind() == Code::COMPARE_IC) { - __ nop(); - } -} - - -void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr, SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - - __ CallRuntime(function, num_arguments, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - __ Move(cp, ToRegister(context)); - } else if (context->IsStackSlot()) { - __ LoadP(cp, ToMemOperand(context)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ Move(cp, Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, - LInstruction* instr, LOperand* context) { - LoadContextFromDeferred(context); - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters(instr->pointer_map(), argc, - Safepoint::kNoLazyDeopt); -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, zone()); - } -} - -void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type, - CRegister cr) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - return; - } - - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { - CRegister alt_cr = cr6; - Register scratch = scratch0(); - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - Label no_deopt; - DCHECK(!alt_cr.is(cr)); - __ Push(r4, scratch); - __ mov(scratch, Operand(count)); - __ lwz(r4, MemOperand(scratch)); - __ subi(r4, r4, Operand(1)); - __ cmpi(r4, Operand::Zero(), alt_cr); - __ bne(&no_deopt, alt_cr); - __ li(r4, Operand(FLAG_deopt_every_n_times)); - __ stw(r4, MemOperand(scratch)); - __ Pop(r4, scratch); - - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&no_deopt); - __ stw(r4, MemOperand(scratch)); - __ Pop(r4, scratch); - } - - if (info()->ShouldTrapOnDeopt()) { - __ stop("trap_on_deopt", cond, kDefaultStopCode, cr); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - // Go through jump table if we need to handle condition, build frame, or - // restore caller doubles. - if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) { - DeoptComment(deopt_info); - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - } else { - Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, - !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - __ b(cond, &jump_table_.last().label, cr); - } -} - -void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, CRegister cr) { - Deoptimizer::BailoutType bailout_type = - info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr); -} - - -void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - RecordSafepointWithRegisters(instr->pointer_map(), 0, - Safepoint::kLazyDeopt); - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, - int arguments, Safepoint::DeoptMode deopt_mode) { - DCHECK(expected_safepoint_kind_ == kind); - - const ZoneList* operands = pointers->GetNormalizedOperands(); - Safepoint safepoint = - safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); -} - - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, deopt_mode); -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); -} - - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, label->hydrogen_value()->id(), - label->block_id(), LabelType(label)); - __ bind(label->label()); - current_block_ = label->block_id(); - DoGap(label); -} - - -void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); } - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } -} - - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); } - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t shift = WhichPowerOf2Abs(divisor); - Label dividend_is_not_negative, done; - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ cmpwi(dividend, Operand::Zero()); - __ bge(÷nd_is_not_negative); - if (shift) { - // Note that this is correct even for kMinInt operands. - __ neg(dividend, dividend); - __ ExtractBitRange(dividend, dividend, shift - 1, 0); - __ neg(dividend, dividend, LeaveOE, SetRC); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0); - } - } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ li(dividend, Operand::Zero()); - } else { - DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero); - } - __ b(&done); - } - - __ bind(÷nd_is_not_negative); - if (shift) { - __ ExtractBitRange(dividend, dividend, shift - 1, 0); - } else { - __ li(dividend, Operand::Zero()); - } - __ bind(&done); -} - - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - __ mov(ip, Operand(Abs(divisor))); - __ mullw(result, result, ip); - __ sub(result, dividend, result, LeaveOE, SetRC); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ bne(&remainder_not_zero, cr0); - __ cmpwi(dividend, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(&remainder_not_zero); - } -} - - -void LCodeGen::DoModI(LModI* instr) { - HMod* hmod = instr->hydrogen(); - Register left_reg = ToRegister(instr->left()); - Register right_reg = ToRegister(instr->right()); - Register result_reg = ToRegister(instr->result()); - Register scratch = scratch0(); - bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow); - Label done; - - if (can_overflow) { - __ li(r0, Operand::Zero()); // clear xer - __ mtxer(r0); - } - - __ divw(scratch, left_reg, right_reg, SetOE, SetRC); - - // Check for x % 0. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmpwi(right_reg, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for kMinInt % -1, divw will return undefined, which is not what we - // want. We have to deopt if we care about -0, because we can't return that. - if (can_overflow) { - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero, cr0); - } else { - if (CpuFeatures::IsSupported(ISELECT)) { - __ isel(overflow, result_reg, r0, result_reg, cr0); - __ boverflow(&done, cr0); - } else { - Label no_overflow_possible; - __ bnooverflow(&no_overflow_possible, cr0); - __ li(result_reg, Operand::Zero()); - __ b(&done); - __ bind(&no_overflow_possible); - } - } - } - - __ mullw(scratch, right_reg, scratch); - __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); - - // If we care about -0, test if the dividend is <0 and the result is 0. - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ bne(&done, cr0); - __ cmpwi(left_reg, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - - __ bind(&done); -} - - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ cmpwi(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); - __ cmpw(dividend, r0); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } - - int32_t shift = WhichPowerOf2Abs(divisor); - - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { - __ TestBitRange(dividend, shift - 1, 0, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0); - } - - if (divisor == -1) { // Nice shortcut, not needed for correctness. - __ neg(result, dividend); - return; - } - if (shift == 0) { - __ mr(result, dividend); - } else { - if (shift == 1) { - __ srwi(result, dividend, Operand(31)); - } else { - __ srawi(result, dividend, 31); - __ srwi(result, result, Operand(32 - shift)); - } - __ add(result, dividend, result); - __ srawi(result, result, shift); - } - if (divisor < 0) __ neg(result, result); -} - - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ cmpwi(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ neg(result, result); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - Register scratch = scratch0(); - __ mov(ip, Operand(divisor)); - __ mullw(scratch, result, ip); - __ cmpw(scratch, dividend); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); - } -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - const Register dividend = ToRegister(instr->dividend()); - const Register divisor = ToRegister(instr->divisor()); - Register result = ToRegister(instr->result()); - bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow); - - DCHECK(!dividend.is(result)); - DCHECK(!divisor.is(result)); - - if (can_overflow) { - __ li(r0, Operand::Zero()); // clear xer - __ mtxer(r0); - } - - __ divw(result, dividend, divisor, SetOE, SetRC); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmpwi(divisor, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ cmpwi(dividend, Operand::Zero()); - __ bne(÷nd_not_zero); - __ cmpwi(divisor, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (can_overflow) { - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); - } else { - // When truncating, we want kMinInt / -1 = kMinInt. - if (CpuFeatures::IsSupported(ISELECT)) { - __ isel(overflow, result, dividend, result, cr0); - } else { - Label no_overflow_possible; - __ bnooverflow(&no_overflow_possible, cr0); - __ mr(result, dividend); - __ bind(&no_overflow_possible); - } - } - } - -#if V8_TARGET_ARCH_PPC64 - __ extsw(result, result); -#endif - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - // Deoptimize if remainder is not 0. - Register scratch = scratch0(); - __ mullw(scratch, divisor, result); - __ cmpw(dividend, scratch); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); - } -} - - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register result = ToRegister(instr->result()); - int32_t divisor = instr->divisor(); - bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt); - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - int32_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 0) { - if (shift || !result.is(dividend)) { - __ srawi(result, dividend, shift); - } - return; - } - - // If the divisor is negative, we have to negate and handle edge cases. - OEBit oe = LeaveOE; -#if V8_TARGET_ARCH_PPC64 - if (divisor == -1 && can_overflow) { - __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); - __ cmpw(dividend, r0); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } -#else - if (can_overflow) { - __ li(r0, Operand::Zero()); // clear xer - __ mtxer(r0); - oe = SetOE; - } -#endif - - __ neg(result, dividend, oe, SetRC); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0); - } - -// If the negation could not overflow, simply shifting is OK. -#if !V8_TARGET_ARCH_PPC64 - if (!can_overflow) { -#endif - if (shift) { - __ ShiftRightArithImm(result, result, shift); - } - return; -#if !V8_TARGET_ARCH_PPC64 - } - - // Dividing by -1 is basically negation, unless we overflow. - if (divisor == -1) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); - return; - } - - Label overflow, done; - __ boverflow(&overflow, cr0); - __ srawi(result, result, shift); - __ b(&done); - __ bind(&overflow); - __ mov(result, Operand(kMinInt / divisor)); - __ bind(&done); -#endif -} - - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ cmpwi(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ neg(result, result); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister(instr->temp()); - DCHECK(!temp.is(dividend) && !temp.is(result)); - Label needs_adjustment, done; - __ cmpwi(dividend, Operand::Zero()); - __ b(divisor > 0 ? lt : gt, &needs_adjustment); - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ neg(result, result); - __ b(&done); - __ bind(&needs_adjustment); - __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1)); - __ TruncatingDiv(result, temp, Abs(divisor)); - if (divisor < 0) __ neg(result, result); - __ subi(result, result, Operand(1)); - __ bind(&done); -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - const Register dividend = ToRegister(instr->dividend()); - const Register divisor = ToRegister(instr->divisor()); - Register result = ToRegister(instr->result()); - bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow); - - DCHECK(!dividend.is(result)); - DCHECK(!divisor.is(result)); - - if (can_overflow) { - __ li(r0, Operand::Zero()); // clear xer - __ mtxer(r0); - } - - __ divw(result, dividend, divisor, SetOE, SetRC); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmpwi(divisor, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ cmpwi(dividend, Operand::Zero()); - __ bne(÷nd_not_zero); - __ cmpwi(divisor, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (can_overflow) { - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); - } else { - // When truncating, we want kMinInt / -1 = kMinInt. - if (CpuFeatures::IsSupported(ISELECT)) { - __ isel(overflow, result, dividend, result, cr0); - } else { - Label no_overflow_possible; - __ bnooverflow(&no_overflow_possible, cr0); - __ mr(result, dividend); - __ bind(&no_overflow_possible); - } - } - } - - Label done; - Register scratch = scratch0(); -// If both operands have the same sign then we are done. -#if V8_TARGET_ARCH_PPC64 - __ xor_(scratch, dividend, divisor); - __ cmpwi(scratch, Operand::Zero()); - __ bge(&done); -#else - __ xor_(scratch, dividend, divisor, SetRC); - __ bge(&done, cr0); -#endif - - // If there is no remainder then we are done. - __ mullw(scratch, divisor, result); - __ cmpw(dividend, scratch); - __ beq(&done); - - // We performed a truncating division. Correct the result. - __ subi(result, result, Operand(1)); - __ bind(&done); -#if V8_TARGET_ARCH_PPC64 - __ extsw(result, result); -#endif -} - - -void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { - DoubleRegister addend = ToDoubleRegister(instr->addend()); - DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); - DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - DoubleRegister result = ToDoubleRegister(instr->result()); - - __ fmadd(result, multiplier, multiplicand, addend); -} - - -void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { - DoubleRegister minuend = ToDoubleRegister(instr->minuend()); - DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); - DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - DoubleRegister result = ToDoubleRegister(instr->result()); - - __ fmsub(result, multiplier, multiplicand, minuend); -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - // Note that result may alias left. - Register left = ToRegister(instr->left()); - LOperand* right_op = instr->right(); - - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (right_op->IsConstantOperand()) { - int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); - - if (bailout_on_minus_zero && (constant < 0)) { - // The case of a null constant will be handled separately. - // If constant is negative and left is null, the result should be -0. - __ cmpi(left, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - switch (constant) { - case -1: - if (can_overflow) { -#if V8_TARGET_ARCH_PPC64 - if (instr->hydrogen()->representation().IsSmi()) { -#endif - __ li(r0, Operand::Zero()); // clear xer - __ mtxer(r0); - __ neg(result, left, SetOE, SetRC); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); -#if V8_TARGET_ARCH_PPC64 - } else { - __ neg(result, left); - __ TestIfInt32(result, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - } -#endif - } else { - __ neg(result, left); - } - break; - case 0: - if (bailout_on_minus_zero) { -// If left is strictly negative and the constant is null, the -// result is -0. Deoptimize if required, otherwise return 0. -#if V8_TARGET_ARCH_PPC64 - if (instr->hydrogen()->representation().IsSmi()) { -#endif - __ cmpi(left, Operand::Zero()); -#if V8_TARGET_ARCH_PPC64 - } else { - __ cmpwi(left, Operand::Zero()); - } -#endif - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - __ li(result, Operand::Zero()); - break; - case 1: - __ Move(result, left); - break; - default: - // Multiplying by powers of two and powers of two plus or minus - // one can be done faster with shifted operands. - // For other constants we emit standard code. - int32_t mask = constant >> 31; - uint32_t constant_abs = (constant + mask) ^ mask; - - if (base::bits::IsPowerOfTwo32(constant_abs)) { - int32_t shift = WhichPowerOf2(constant_abs); - __ ShiftLeftImm(result, left, Operand(shift)); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ neg(result, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { - int32_t shift = WhichPowerOf2(constant_abs - 1); - __ ShiftLeftImm(scratch, left, Operand(shift)); - __ add(result, scratch, left); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ neg(result, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { - int32_t shift = WhichPowerOf2(constant_abs + 1); - __ ShiftLeftImm(scratch, left, Operand(shift)); - __ sub(result, scratch, left); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ neg(result, result); - } else { - // Generate standard code. - __ mov(ip, Operand(constant)); - __ Mul(result, left, ip); - } - } - - } else { - DCHECK(right_op->IsRegister()); - Register right = ToRegister(right_op); - - if (can_overflow) { -#if V8_TARGET_ARCH_PPC64 - // result = left * right. - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ SmiUntag(scratch, right); - __ Mul(result, result, scratch); - } else { - __ Mul(result, left, right); - } - __ TestIfInt32(result, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiTag(result); - } -#else - // scratch:result = left * right. - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ mulhw(scratch, result, right); - __ mullw(result, result, right); - } else { - __ mulhw(scratch, left, right); - __ mullw(result, left, right); - } - __ TestIfInt32(scratch, result, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); -#endif - } else { - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ Mul(result, result, right); - } else { - __ Mul(result, left, right); - } - } - - if (bailout_on_minus_zero) { - Label done; -#if V8_TARGET_ARCH_PPC64 - if (instr->hydrogen()->representation().IsSmi()) { -#endif - __ xor_(r0, left, right, SetRC); - __ bge(&done, cr0); -#if V8_TARGET_ARCH_PPC64 - } else { - __ xor_(r0, left, right); - __ cmpwi(r0, Operand::Zero()); - __ bge(&done); - } -#endif - // Bail out if the result is minus zero. - __ cmpi(result, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } -} - - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left_op = instr->left(); - LOperand* right_op = instr->right(); - DCHECK(left_op->IsRegister()); - Register left = ToRegister(left_op); - Register result = ToRegister(instr->result()); - Operand right(no_reg); - - if (right_op->IsStackSlot()) { - right = Operand(EmitLoadRegister(right_op, ip)); - } else { - DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); - right = ToOperand(right_op); - - if (right_op->IsConstantOperand() && is_uint16(right.immediate())) { - switch (instr->op()) { - case Token::BIT_AND: - __ andi(result, left, right); - break; - case Token::BIT_OR: - __ ori(result, left, right); - break; - case Token::BIT_XOR: - __ xori(result, left, right); - break; - default: - UNREACHABLE(); - break; - } - return; - } - } - - switch (instr->op()) { - case Token::BIT_AND: - __ And(result, left, right); - break; - case Token::BIT_OR: - __ Or(result, left, right); - break; - case Token::BIT_XOR: - if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { - __ notx(result, left); - } else { - __ Xor(result, left, right); - } - break; - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so - // result may alias either of them. - LOperand* right_op = instr->right(); - Register left = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - if (right_op->IsRegister()) { - // Mask the right_op operand. - __ andi(scratch, ToRegister(right_op), Operand(0x1F)); - switch (instr->op()) { - case Token::ROR: - // rotate_right(a, b) == rotate_left(a, 32 - b) - __ subfic(scratch, scratch, Operand(32)); - __ rotlw(result, left, scratch); - break; - case Token::SAR: - __ sraw(result, left, scratch); - break; - case Token::SHR: - if (instr->can_deopt()) { - __ srw(result, left, scratch, SetRC); -#if V8_TARGET_ARCH_PPC64 - __ extsw(result, result, SetRC); -#endif - DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0); - } else { - __ srw(result, left, scratch); - } - break; - case Token::SHL: - __ slw(result, left, scratch); -#if V8_TARGET_ARCH_PPC64 - __ extsw(result, result); -#endif - break; - default: - UNREACHABLE(); - break; - } - } else { - // Mask the right_op operand. - int value = ToInteger32(LConstantOperand::cast(right_op)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::ROR: - if (shift_count != 0) { - __ rotrwi(result, left, shift_count); - } else { - __ Move(result, left); - } - break; - case Token::SAR: - if (shift_count != 0) { - __ srawi(result, left, shift_count); - } else { - __ Move(result, left); - } - break; - case Token::SHR: - if (shift_count != 0) { - __ srwi(result, left, Operand(shift_count)); - } else { - if (instr->can_deopt()) { - __ cmpwi(left, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue); - } - __ Move(result, left); - } - break; - case Token::SHL: - if (shift_count != 0) { -#if V8_TARGET_ARCH_PPC64 - if (instr->hydrogen_value()->representation().IsSmi()) { - __ sldi(result, left, Operand(shift_count)); -#else - if (instr->hydrogen_value()->representation().IsSmi() && - instr->can_deopt()) { - if (shift_count != 1) { - __ slwi(result, left, Operand(shift_count - 1)); - __ SmiTagCheckOverflow(result, result, scratch); - } else { - __ SmiTagCheckOverflow(result, left, scratch); - } - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); -#endif - } else { - __ slwi(result, left, Operand(shift_count)); -#if V8_TARGET_ARCH_PPC64 - __ extsw(result, result); -#endif - } - } else { - __ Move(result, left); - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoSubI(LSubI* instr) { - LOperand* right = instr->right(); - Register left = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); -#if V8_TARGET_ARCH_PPC64 - const bool isInteger = !instr->hydrogen()->representation().IsSmi(); -#else - const bool isInteger = false; -#endif - if (!can_overflow || isInteger) { - if (right->IsConstantOperand()) { - __ Add(result, left, -(ToOperand(right).immediate()), r0); - } else { - __ sub(result, left, EmitLoadRegister(right, ip)); - } - if (can_overflow) { -#if V8_TARGET_ARCH_PPC64 - __ TestIfInt32(result, r0); -#else - __ TestIfInt32(scratch0(), result, r0); -#endif - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - } - - } else { - if (right->IsConstantOperand()) { - __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()), - scratch0(), r0); - } else { - __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), - scratch0(), r0); - } - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); - } -} - - -void LCodeGen::DoRSubI(LRSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - - DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && - right->IsConstantOperand()); - - Operand right_operand = ToOperand(right); - if (is_int16(right_operand.immediate())) { - __ subfic(ToRegister(result), ToRegister(left), right_operand); - } else { - __ mov(r0, right_operand); - __ sub(ToRegister(result), r0, ToRegister(left)); - } -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - __ mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ LoadSmiLiteral(ToRegister(instr->result()), instr->value()); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - DCHECK(instr->result()->IsDoubleRegister()); - DoubleRegister result = ToDoubleRegister(instr->result()); -#if V8_HOST_ARCH_IA32 - // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator - // builds. - uint64_t bits = instr->bits(); - if ((bits & V8_UINT64_C(0x7FF8000000000000)) == - V8_UINT64_C(0x7FF0000000000000)) { - uint32_t lo = static_cast(bits); - uint32_t hi = static_cast(bits >> 32); - __ mov(ip, Operand(lo)); - __ mov(scratch0(), Operand(hi)); - __ MovInt64ToDouble(result, scratch0(), ip); - return; - } -#endif - double v = instr->value(); - __ LoadDoubleLiteral(result, v, scratch0()); -} - - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ Move(ToRegister(instr->result()), object); -} - - -MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToInteger32(LConstantOperand::cast(index)); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldMemOperand(string, SeqString::kHeaderSize + offset); - } - Register scratch = scratch0(); - DCHECK(!scratch.is(string)); - DCHECK(!scratch.is(ToRegister(index))); - if (encoding == String::ONE_BYTE_ENCODING) { - __ add(scratch, string, ToRegister(index)); - } else { - STATIC_ASSERT(kUC16Size == 2); - __ ShiftLeftImm(scratch, ToRegister(index), Operand(1)); - __ add(scratch, string, scratch); - } - return FieldMemOperand(scratch, SeqString::kHeaderSize); -} - - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - if (FLAG_debug_code) { - Register scratch = scratch0(); - __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); - __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - __ andi(scratch, scratch, - Operand(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ cmpi(scratch, - Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type - : two_byte_seq_type)); - __ Check(eq, kUnexpectedStringType); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ lbz(result, operand); - } else { - __ lhz(result, operand); - } -} - - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register value = ToRegister(instr->value()); - - if (FLAG_debug_code) { - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type - : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ stb(value, operand); - } else { - __ sth(value, operand); - } -} - - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* right = instr->right(); - Register left = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); -#if V8_TARGET_ARCH_PPC64 - const bool isInteger = !(instr->hydrogen()->representation().IsSmi() || - instr->hydrogen()->representation().IsExternal()); -#else - const bool isInteger = false; -#endif - - if (!can_overflow || isInteger) { - if (right->IsConstantOperand()) { - __ Add(result, left, ToOperand(right).immediate(), r0); - } else { - __ add(result, left, EmitLoadRegister(right, ip)); - } -#if V8_TARGET_ARCH_PPC64 - if (can_overflow) { - __ TestIfInt32(result, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - } -#endif - } else { - if (right->IsConstantOperand()) { - __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(), - scratch0(), r0); - } else { - __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), - scratch0(), r0); - } - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); - } -} - - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - HMathMinMax::Operation operation = instr->hydrogen()->operation(); - Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; - if (instr->hydrogen()->representation().IsSmiOrInteger32()) { - Register left_reg = ToRegister(left); - Register right_reg = EmitLoadRegister(right, ip); - Register result_reg = ToRegister(instr->result()); - Label return_left, done; -#if V8_TARGET_ARCH_PPC64 - if (instr->hydrogen_value()->representation().IsSmi()) { -#endif - __ cmp(left_reg, right_reg); -#if V8_TARGET_ARCH_PPC64 - } else { - __ cmpw(left_reg, right_reg); - } -#endif - if (CpuFeatures::IsSupported(ISELECT)) { - __ isel(cond, result_reg, left_reg, right_reg); - } else { - __ b(cond, &return_left); - __ Move(result_reg, right_reg); - __ b(&done); - __ bind(&return_left); - __ Move(result_reg, left_reg); - __ bind(&done); - } - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - DoubleRegister left_reg = ToDoubleRegister(left); - DoubleRegister right_reg = ToDoubleRegister(right); - DoubleRegister result_reg = ToDoubleRegister(instr->result()); - Label check_nan_left, check_zero, return_left, return_right, done; - __ fcmpu(left_reg, right_reg); - __ bunordered(&check_nan_left); - __ beq(&check_zero); - __ b(cond, &return_left); - __ b(&return_right); - - __ bind(&check_zero); - __ fcmpu(left_reg, kDoubleRegZero); - __ bne(&return_left); // left == right != 0. - - // At this point, both left and right are either 0 or -0. - if (operation == HMathMinMax::kMathMin) { - // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being - // different registers is most efficiently expressed as -((-L) - R). - __ fneg(left_reg, left_reg); - if (left_reg.is(right_reg)) { - __ fadd(result_reg, left_reg, right_reg); - } else { - __ fsub(result_reg, left_reg, right_reg); - } - __ fneg(result_reg, result_reg); - } else { - // Max: The following works because +0 + -0 == +0 - __ fadd(result_reg, left_reg, right_reg); - } - __ b(&done); - - __ bind(&check_nan_left); - __ fcmpu(left_reg, left_reg); - __ bunordered(&return_left); // left == NaN. - - __ bind(&return_right); - if (!right_reg.is(result_reg)) { - __ fmr(result_reg, right_reg); - } - __ b(&done); - - __ bind(&return_left); - if (!left_reg.is(result_reg)) { - __ fmr(result_reg, left_reg); - } - __ bind(&done); - } -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - DoubleRegister result = ToDoubleRegister(instr->result()); - switch (instr->op()) { - case Token::ADD: - if (CpuFeatures::IsSupported(VSX)) { - __ xsadddp(result, left, right); - } else { - __ fadd(result, left, right); - } - break; - case Token::SUB: - if (CpuFeatures::IsSupported(VSX)) { - __ xssubdp(result, left, right); - } else { - __ fsub(result, left, right); - } - break; - case Token::MUL: - if (CpuFeatures::IsSupported(VSX)) { - __ xsmuldp(result, left, right); - } else { - __ fmul(result, left, right); - } - break; - case Token::DIV: - if (CpuFeatures::IsSupported(VSX)) { - __ xsdivdp(result, left, right); - } else { - __ fdiv(result, left, right); - } - break; - case Token::MOD: { - __ PrepareCallCFunction(0, 2, scratch0()); - __ MovToFloatParameters(left, right); - __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), - 0, 2); - // Move the result in the double result register. - __ MovFromFloatResult(result); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r4)); - DCHECK(ToRegister(instr->right()).is(r3)); - DCHECK(ToRegister(instr->result()).is(r3)); - - UNREACHABLE(); -} - - -template -void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - - if (right_block == left_block || cond == al) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr); - } else if (right_block == next_block) { - __ b(cond, chunk_->GetAssemblyLabel(left_block), cr); - } else { - __ b(cond, chunk_->GetAssemblyLabel(left_block), cr); - __ b(chunk_->GetAssemblyLabel(right_block)); - } -} - - -template -void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) { - int true_block = instr->TrueDestination(chunk_); - __ b(cond, chunk_->GetAssemblyLabel(true_block), cr); -} - - -template -void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) { - int false_block = instr->FalseDestination(chunk_); - __ b(cond, chunk_->GetAssemblyLabel(false_block), cr); -} - - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); } - - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - DoubleRegister dbl_scratch = double_scratch0(); - const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) | - 1 << (31 - Assembler::encode_crbit(cr7, CR_FU))); - - if (r.IsInteger32()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - __ cmpwi(reg, Operand::Zero()); - EmitBranch(instr, ne); - } else if (r.IsSmi()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - __ cmpi(reg, Operand::Zero()); - EmitBranch(instr, ne); - } else if (r.IsDouble()) { - DCHECK(!info()->IsStub()); - DoubleRegister reg = ToDoubleRegister(instr->value()); - // Test the double value. Zero and NaN are false. - __ fcmpu(reg, kDoubleRegZero, cr7); - __ mfcr(r0); - __ andi(r0, r0, Operand(crZOrNaNBits)); - EmitBranch(instr, eq, cr0); - } else { - DCHECK(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ CompareRoot(reg, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - __ cmpi(reg, Operand::Zero()); - EmitBranch(instr, ne); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, al); - } else if (type.IsHeapNumber()) { - DCHECK(!info()->IsStub()); - __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); - // Test the double value. Zero and NaN are false. - __ fcmpu(dbl_scratch, kDoubleRegZero, cr7); - __ mfcr(r0); - __ andi(r0, r0, Operand(crZOrNaNBits)); - EmitBranch(instr, eq, cr0); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); - __ cmpi(ip, Operand::Zero()); - EmitBranch(instr, ne); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - // Avoid deopts in the case where we've never executed this path before. - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); - __ beq(instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kBoolean) { - // Boolean -> its value. - __ CompareRoot(reg, Heap::kTrueValueRootIndex); - __ beq(instr->TrueLabel(chunk_)); - __ CompareRoot(reg, Heap::kFalseValueRootIndex); - __ beq(instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ CompareRoot(reg, Heap::kNullValueRootIndex); - __ beq(instr->FalseLabel(chunk_)); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - __ cmpi(reg, Operand::Zero()); - __ beq(instr->FalseLabel(chunk_)); - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a Smi -> deopt. - __ TestIfSmi(reg, r0); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); - } - - const Register map = scratch0(); - if (expected & ToBooleanHint::kNeedsMap) { - __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); - - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); - __ TestBit(ip, Map::kIsUndetectable, r0); - __ bne(instr->FalseLabel(chunk_), cr0); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE); - __ bge(instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); - __ bge(¬_string); - __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); - __ cmpi(ip, Operand::Zero()); - __ bne(instr->TrueLabel(chunk_)); - __ b(instr->FalseLabel(chunk_)); - __ bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - __ CompareInstanceType(map, ip, SYMBOL_TYPE); - __ beq(instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kHeapNumber) { - // heap number -> false iff +0, -0, or NaN. - Label not_heap_number; - __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); - __ bne(¬_heap_number); - __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); - // Test the double value. Zero and NaN are false. - __ fcmpu(dbl_scratch, kDoubleRegZero, cr7); - __ mfcr(r0); - __ andi(r0, r0, Operand(crZOrNaNBits)); - __ bne(instr->FalseLabel(chunk_), cr0); - __ b(instr->TrueLabel(chunk_)); - __ bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject); - } - } - } -} - - -void LCodeGen::EmitGoto(int block) { - if (!IsNextEmittedBlock(block)) { - __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); - } -} - - -void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); } - - -Condition LCodeGen::TokenToCondition(Token::Value op) { - Condition cond = kNoCondition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = eq; - break; - case Token::NE: - case Token::NE_STRICT: - cond = ne; - break; - case Token::LT: - cond = lt; - break; - case Token::GT: - cond = gt; - break; - case Token::LTE: - cond = le; - break; - case Token::GTE: - cond = ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cond = TokenToCondition(instr->op()); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - // Compare left and right operands as doubles and load the - // resulting flags into the normal status register. - __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right)); - // If a NaN is involved, i.e. the result is unordered, - // jump to false block label. - __ bunordered(instr->FalseLabel(chunk_)); - } else { - if (right->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(right)); - if (instr->hydrogen_value()->representation().IsSmi()) { - if (is_unsigned) { - __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); - } else { - __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); - } - } else { - if (is_unsigned) { - __ Cmplwi(ToRegister(left), Operand(value), r0); - } else { - __ Cmpwi(ToRegister(left), Operand(value), r0); - } - } - } else if (left->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(left)); - if (instr->hydrogen_value()->representation().IsSmi()) { - if (is_unsigned) { - __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); - } else { - __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); - } - } else { - if (is_unsigned) { - __ Cmplwi(ToRegister(right), Operand(value), r0); - } else { - __ Cmpwi(ToRegister(right), Operand(value), r0); - } - } - // We commuted the operands, so commute the condition. - cond = CommuteCondition(cond); - } else if (instr->hydrogen_value()->representation().IsSmi()) { - if (is_unsigned) { - __ cmpl(ToRegister(left), ToRegister(right)); - } else { - __ cmp(ToRegister(left), ToRegister(right)); - } - } else { - if (is_unsigned) { - __ cmplw(ToRegister(left), ToRegister(right)); - } else { - __ cmpw(ToRegister(left), ToRegister(right)); - } - } - } - EmitBranch(instr, cond); - } -} - - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - - __ cmp(left, right); - EmitBranch(instr, eq); -} - - -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ mov(ip, Operand(factory()->the_hole_value())); - __ cmp(input_reg, ip); - EmitBranch(instr, eq); - return; - } - - DoubleRegister input_reg = ToDoubleRegister(instr->object()); - __ fcmpu(input_reg, input_reg); - EmitFalseBranch(instr, ordered); - - Register scratch = scratch0(); - __ MovDoubleHighToInt(scratch, input_reg); - __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); - EmitBranch(instr, eq); -} - - -Condition LCodeGen::EmitIsString(Register input, Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); - - return lt; -} - - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp1 = ToRegister(instr->temp()); - - SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK - : INLINE_SMI_CHECK; - Condition true_cond = - EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Register input_reg = EmitLoadRegister(instr->value(), ip); - __ TestIfSmi(input_reg, r0); - EmitBranch(instr, eq, cr0); -} - - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); - __ TestBit(temp, Map::kIsUndetectable, r0); - EmitBranch(instr, ne, cr0); -} - - -static Condition ComputeCompareCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return eq; - case Token::LT: - return lt; - case Token::GT: - return gt; - case Token::LTE: - return le; - case Token::GTE: - return ge; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r4)); - DCHECK(ToRegister(instr->right()).is(r3)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ CompareRoot(r3, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq); -} - - -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK(from == to || to == LAST_TYPE); - return from; -} - - -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return eq; - if (to == LAST_TYPE) return ge; - if (from == FIRST_TYPE) return le; - UNREACHABLE(); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register scratch = scratch0(); - Register input = ToRegister(instr->value()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - - __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); - EmitBranch(instr, BranchCondition(instr->hydrogen())); -} - -// Branches to a label or falls through with the answer in flags. Trashes -// the temp registers, but not the input. -void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handle class_name, Register input, - Register temp, Register temp2) { - DCHECK(!input.is(temp)); - DCHECK(!input.is(temp2)); - DCHECK(!temp.is(temp2)); - - __ JumpIfSmi(input, is_false); - - __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ bge(is_true); - } else { - __ bge(is_false); - } - - // Check if the constructor in the map is a function. - Register instance_type = ip; - __ GetMapConstructor(temp, temp, temp2, instance_type); - - // Objects with a non-function constructor have class 'Object'. - __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE)); - if (String::Equals(isolate()->factory()->Object_string(), class_name)) { - __ bne(is_true); - } else { - __ bne(is_false); - } - - // temp now contains the constructor function. Grab the - // instance class name from there. - __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); - __ LoadP(temp, - FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - __ Cmpi(temp, Operand(class_name), r0); - // End with the answer in flags. -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = scratch0(); - Register temp2 = ToRegister(instr->temp()); - Handle class_name = instr->hydrogen()->class_name(); - - EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - class_name, input, temp, temp2); - - EmitBranch(instr, eq); -} - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ Cmpi(temp, Operand(instr->map()), r0); - EmitBranch(instr, eq); -} - - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = scratch0(); - Register const object_instance_type = ip; - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - __ TestIfSmi(object, r0); - EmitFalseBranch(instr, eq, cr0); - } - - // Loop through the {object}s prototype chain looking for the {prototype}. - __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); - Label loop; - __ bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ lbz(object_instance_type, - FieldMemOperand(object_map, Map::kBitFieldOffset)); - __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0); - // Deoptimize for proxies. - __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); - DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy); - __ LoadP(object_prototype, - FieldMemOperand(object_map, Map::kPrototypeOffset)); - __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); - EmitFalseBranch(instr, eq); - __ cmp(object_prototype, prototype); - EmitTrueBranch(instr, eq); - __ LoadP(object_map, - FieldMemOperand(object_prototype, HeapObject::kMapOffset)); - __ b(&loop); -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - Token::Value op = instr->op(); - - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - // This instruction also signals no smi code inlined - __ cmpi(r3, Operand::Zero()); - - Condition condition = ComputeCompareCondition(op); - if (CpuFeatures::IsSupported(ISELECT)) { - __ LoadRoot(r4, Heap::kTrueValueRootIndex); - __ LoadRoot(r5, Heap::kFalseValueRootIndex); - __ isel(condition, ToRegister(instr->result()), r4, r5); - } else { - Label true_value, done; - - __ b(condition, &true_value); - - __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); - __ b(&done); - - __ bind(&true_value); - __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); - - __ bind(&done); - } -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in r3. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ push(r3); - __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - if (info()->saves_caller_doubles()) { - RestoreCallerDoubles(); - } - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - int32_t sp_delta = (parameter_count + 1) * kPointerSize; - if (NeedsEagerFrame()) { - masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta); - } else if (sp_delta != 0) { - __ addi(sp, sp, Operand(sp_delta)); - } - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register reg = ToRegister(instr->parameter_count()); - // The argument count parameter is a smi - if (NeedsEagerFrame()) { - masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); - } - __ SmiToPtrArrayOffset(r0, reg); - __ add(sp, sp, r0); - } - - __ blr(); -} - - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - __ LoadP(result, ContextMemOperand(context, instr->slot_index())); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - if (instr->hydrogen()->DeoptimizesOnHole()) { - __ cmp(result, ip); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } else { - if (CpuFeatures::IsSupported(ISELECT)) { - Register scratch = scratch0(); - __ mov(scratch, Operand(factory()->undefined_value())); - __ cmp(result, ip); - __ isel(eq, result, scratch, result); - } else { - Label skip; - __ cmp(result, ip); - __ bne(&skip); - __ mov(result, Operand(factory()->undefined_value())); - __ bind(&skip); - } - } - } -} - - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - Register scratch = scratch0(); - MemOperand target = ContextMemOperand(context, instr->slot_index()); - - Label skip_assignment; - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadP(scratch, target); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(scratch, ip); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } else { - __ bne(&skip_assignment); - } - } - - __ StoreP(value, target, r0); - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK - : INLINE_SMI_CHECK; - __ RecordWriteContextSlot(context, target.offset(), value, scratch, - GetLinkRegisterState(), kSaveFPRegs, - EMIT_REMEMBERED_SET, check_needed); - } - - __ bind(&skip_assignment); -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - Register object = ToRegister(instr->object()); - - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - MemOperand operand = MemOperand(object, offset); - __ LoadRepresentation(result, operand, access.representation(), r0); - return; - } - - if (instr->hydrogen()->representation().IsDouble()) { - DCHECK(access.IsInobject()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ lfd(result, FieldMemOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - if (!access.IsInobject()) { - __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - object = result; - } - - Representation representation = access.representation(); - -#if V8_TARGET_ARCH_PPC64 - // 64-bit Smi optimization - if (representation.IsSmi() && - instr->hydrogen()->representation().IsInteger32()) { - // Read int value directly from upper half of the smi. - offset = SmiWordOffset(offset); - representation = Representation::Integer32(); - } -#endif - - __ LoadRepresentation(result, FieldMemOperand(object, offset), representation, - r0); -} - - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register scratch = scratch0(); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - - // Get the prototype or initial map from the function. - __ LoadP(result, - FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(result, ip); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - - // If the function does not have an initial map, we're done. - if (CpuFeatures::IsSupported(ISELECT)) { - // Get the prototype from the initial map (optimistic). - __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset)); - __ CompareObjectType(result, scratch, scratch, MAP_TYPE); - __ isel(eq, result, ip, result); - } else { - Label done; - __ CompareObjectType(result, scratch, scratch, MAP_TYPE); - __ bne(&done); - - // Get the prototype from the initial map. - __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); - - // All done. - __ bind(&done); - } -} - - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - if (instr->length()->IsConstantOperand()) { - int const_length = ToInteger32(LConstantOperand::cast(instr->length())); - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int index = (const_length - const_index) + 1; - __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0); - } else { - Register index = ToRegister(instr->index()); - __ subfic(result, index, Operand(const_length + 1)); - __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); - __ LoadPX(result, MemOperand(arguments, result)); - } - } else if (instr->index()->IsConstantOperand()) { - Register length = ToRegister(instr->length()); - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int loc = const_index - 1; - if (loc != 0) { - __ subi(result, length, Operand(loc)); - __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); - __ LoadPX(result, MemOperand(arguments, result)); - } else { - __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2)); - __ LoadPX(result, MemOperand(arguments, result)); - } - } else { - Register length = ToRegister(instr->length()); - Register index = ToRegister(instr->index()); - __ sub(result, length, index); - __ addi(result, result, Operand(1)); - __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); - __ LoadPX(result, MemOperand(arguments, result)); - } -} - - -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - DoubleRegister result = ToDoubleRegister(instr->result()); - if (key_is_constant) { - __ Add(scratch0(), external_pointer, constant_key << element_size_shift, - r0); - } else { - __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); - __ add(scratch0(), external_pointer, r0); - } - if (elements_kind == FLOAT32_ELEMENTS) { - __ lfs(result, MemOperand(scratch0(), base_offset)); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ lfd(result, MemOperand(scratch0(), base_offset)); - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = - PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, - constant_key, element_size_shift, base_offset); - switch (elements_kind) { - case INT8_ELEMENTS: - if (key_is_constant) { - __ LoadByte(result, mem_operand, r0); - } else { - __ lbzx(result, mem_operand); - } - __ extsb(result, result); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - if (key_is_constant) { - __ LoadByte(result, mem_operand, r0); - } else { - __ lbzx(result, mem_operand); - } - break; - case INT16_ELEMENTS: - if (key_is_constant) { - __ LoadHalfWordArith(result, mem_operand, r0); - } else { - __ lhax(result, mem_operand); - } - break; - case UINT16_ELEMENTS: - if (key_is_constant) { - __ LoadHalfWord(result, mem_operand, r0); - } else { - __ lhzx(result, mem_operand); - } - break; - case INT32_ELEMENTS: - if (key_is_constant) { - __ LoadWordArith(result, mem_operand, r0); - } else { - __ lwax(result, mem_operand); - } - break; - case UINT32_ELEMENTS: - if (key_is_constant) { - __ LoadWord(result, mem_operand, r0); - } else { - __ lwzx(result, mem_operand); - } - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); - __ cmplw(result, r0); - DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { - Register elements = ToRegister(instr->elements()); - bool key_is_constant = instr->key()->IsConstantOperand(); - Register key = no_reg; - DoubleRegister result = ToDoubleRegister(instr->result()); - Register scratch = scratch0(); - - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - - int base_offset = instr->base_offset() + constant_key * kDoubleSize; - if (!key_is_constant) { - __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); - __ add(scratch, elements, r0); - elements = scratch; - } - if (!is_int16(base_offset)) { - __ Add(scratch, elements, base_offset, r0); - base_offset = 0; - elements = scratch; - } - __ lfd(result, MemOperand(elements, base_offset)); - - if (instr->hydrogen()->RequiresHoleCheck()) { - if (is_int16(base_offset + Register::kExponentOffset)) { - __ lwz(scratch, - MemOperand(elements, base_offset + Register::kExponentOffset)); - } else { - __ addi(scratch, elements, Operand(base_offset)); - __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); - } - __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } -} - - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - HLoadKeyed* hinstr = instr->hydrogen(); - Register elements = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - Register store_base = scratch; - int offset = instr->base_offset(); - - if (instr->key()->IsConstantOperand()) { - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - store_base = elements; - } else { - Register key = ToRegister(instr->key()); - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (hinstr->key()->representation().IsSmi()) { - __ SmiToPtrArrayOffset(r0, key); - } else { - __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2)); - } - __ add(scratch, elements, r0); - } - - bool requires_hole_check = hinstr->RequiresHoleCheck(); - Representation representation = hinstr->representation(); - -#if V8_TARGET_ARCH_PPC64 - // 64-bit Smi optimization - if (representation.IsInteger32() && - hinstr->elements_kind() == FAST_SMI_ELEMENTS) { - DCHECK(!requires_hole_check); - // Read int value directly from upper half of the smi. - offset = SmiWordOffset(offset); - } -#endif - - __ LoadRepresentation(result, MemOperand(store_base, offset), representation, - r0); - - // Check for the hole value. - if (requires_hole_check) { - if (IsFastSmiElementsKind(hinstr->elements_kind())) { - __ TestIfSmi(result, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); - } else { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } - } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - __ bne(&done); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise - // it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset)); - __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kHole); - } - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ bind(&done); - } -} - - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } -} - - -MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base, - bool key_is_constant, bool key_is_smi, - int constant_key, - int element_size_shift, - int base_offset) { - Register scratch = scratch0(); - - if (key_is_constant) { - return MemOperand(base, (constant_key << element_size_shift) + base_offset); - } - - bool needs_shift = - (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0)); - - if (!(base_offset || needs_shift)) { - return MemOperand(base, key); - } - - if (needs_shift) { - __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi); - key = scratch; - } - - if (base_offset) { - __ Add(scratch, key, base_offset, r0); - } - - return MemOperand(base, scratch); -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - __ subi(result, sp, Operand(2 * kPointerSize)); - } else if (instr->hydrogen()->arguments_adaptor()) { - // Check if the calling frame is an arguments adaptor frame. - __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP( - result, - MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ cmpi(result, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - if (CpuFeatures::IsSupported(ISELECT)) { - __ isel(eq, result, scratch, fp); - } else { - Label done, adapted; - __ beq(&adapted); - __ mr(result, fp); - __ b(&done); - - __ bind(&adapted); - __ mr(result, scratch); - __ bind(&done); - } - } else { - __ mr(result, fp); - } -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register elem = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - __ cmp(fp, elem); - __ mov(result, Operand(scope()->num_parameters())); - __ beq(&done); - - // Arguments adaptor frame present. Get argument length from there. - __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(result, - MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(result); - - // Argument length is in result register. - __ bind(&done); -} - - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. - Label global_object, result_in_receiver; - - if (!instr->hydrogen()->known_function()) { - // Do not transform the receiver to object for strict mode - // functions or builtins. - __ LoadP(scratch, - FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ lwz(scratch, - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); - __ andi(r0, scratch, - Operand(SharedFunctionInfo::IsStrictBit::kMask | - SharedFunctionInfo::IsNativeBit::kMask)); - __ bne(&result_in_receiver, cr0); - } - - // Normal function. Replace undefined or null with global receiver. - __ LoadRoot(scratch, Heap::kNullValueRootIndex); - __ cmp(receiver, scratch); - __ beq(&global_object); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); - __ cmp(receiver, scratch); - __ beq(&global_object); - - // Deoptimize if the receiver is not a JS object. - __ TestIfSmi(receiver, r0); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); - __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); - DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject); - - __ b(&result_in_receiver); - __ bind(&global_object); - __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); - __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); - __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); - - if (result.is(receiver)) { - __ bind(&result_in_receiver); - } else { - Label result_ok; - __ b(&result_ok); - __ bind(&result_in_receiver); - __ mr(result, receiver); - __ bind(&result_ok); - } -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); - Register scratch = scratch0(); - DCHECK(receiver.is(r3)); // Used for parameter count. - DCHECK(function.is(r4)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).is(r3)); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - __ cmpli(length, Operand(kArgumentsLimit)); - DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments); - - // Push the receiver and use the register to keep the original - // number of arguments. - __ push(receiver); - __ mr(receiver, length); - // The arguments are at a one pointer size offset from elements. - __ addi(elements, elements, Operand(1 * kPointerSize)); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ cmpi(length, Operand::Zero()); - __ beq(&invoke); - __ mtctr(length); - __ bind(&loop); - __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2)); - __ LoadPX(scratch, MemOperand(elements, r0)); - __ push(scratch); - __ addi(length, length, Operand(-1)); - __ bdnz(&loop); - - __ bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(r3); - // It is safe to use r6, r7 and r8 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) r6 (new.target) will be initialized below. - PrepareForTailCall(actual, r6, r7, r8); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - // The number of arguments is stored in receiver which is r3, as expected - // by InvokeFunction. - ParameterCount actual(receiver); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { - Abort(kDoPushArgumentNotImplementedForDoubleType); - } else { - Register argument_reg = EmitLoadRegister(argument, ip); - __ push(argument_reg); - } -} - - -void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); } - - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); -} - - -void LCodeGen::DoContext(LContext* instr) { - // If there is a non-return use, the context must be moved to a register. - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in cp. - DCHECK(result.is(cp)); - } -} - - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - __ Move(scratch0(), instr->hydrogen()->declarations()); - __ push(scratch0()); - __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags())); - __ push(scratch0()); - __ Move(scratch0(), instr->hydrogen()->feedback_vector()); - __ push(scratch0()); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - Register function_reg = r4; - - LPointerMap* pointers = instr->pointer_map(); - - if (can_invoke_directly) { - // Change context. - __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); - __ mov(r3, Operand(arity)); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); - if (is_tail_call) { - __ JumpToJSEntry(ip); - } else { - __ CallJSEntry(ip); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - } - } else { - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, expected, actual, flag, generator); - } -} - - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - DCHECK(instr->context() != NULL); - DCHECK(ToRegister(instr->context()).is(cp)); - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // Deoptimize if not a heap number. - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch, ip); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - - Label done; - Register exponent = scratch0(); - scratch = no_reg; - __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, just - // return it. - __ cmpwi(exponent, Operand::Zero()); - // Move the input to the result if necessary. - __ Move(result, input); - __ bge(&done); - - // Input is negative. Reverse its sign. - // Preserve the value of all registers. - { - PushSafepointRegistersScope scope(this); - - // Registers were saved at the safepoint, so we can use - // many scratch registers. - Register tmp1 = input.is(r4) ? r3 : r4; - Register tmp2 = input.is(r5) ? r3 : r5; - Register tmp3 = input.is(r6) ? r3 : r6; - Register tmp4 = input.is(r7) ? r3 : r7; - - // exponent: floating point exponent value. - - Label allocated, slow; - __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); - __ b(&allocated); - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, - instr->context()); - // Set the pointer to the new heap number in tmp. - if (!tmp1.is(r3)) __ mr(tmp1, r3); - // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input, input); - __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - - __ bind(&allocated); - // exponent: floating point exponent value. - // tmp1: allocated heap number. - STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); - __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit - __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); - __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); - __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - - __ StoreToSafepointRegisterSlot(tmp1, result); - } - - __ bind(&done); -} - - -void LCodeGen::EmitMathAbs(LMathAbs* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Label done; - __ cmpi(input, Operand::Zero()); - __ Move(result, input); - __ bge(&done); - __ li(r0, Operand::Zero()); // clear xer - __ mtxer(r0); - __ neg(result, result, SetOE, SetRC); - // Deoptimize on overflow. - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); - __ bind(&done); -} - - -#if V8_TARGET_ARCH_PPC64 -void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Label done; - __ cmpwi(input, Operand::Zero()); - __ Move(result, input); - __ bge(&done); - - // Deoptimize on overflow. - __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); - __ cmpw(input, r0); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - - __ neg(result, result); - __ bind(&done); -} -#endif - - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - // Class for deferred case. - class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { - public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { - codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LMathAbs* instr_; - }; - - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsDouble()) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ fabs(result, input); -#if V8_TARGET_ARCH_PPC64 - } else if (r.IsInteger32()) { - EmitInteger32MathAbs(instr); - } else if (r.IsSmi()) { -#else - } else if (r.IsSmiOrInteger32()) { -#endif - EmitMathAbs(instr); - } else { - // Representation is tagged. - DeferredMathAbsTaggedHeapNumber* deferred = - new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr); - Register input = ToRegister(instr->value()); - // Smi check. - __ JumpIfNotSmi(input, deferred->entry()); - // If smi, handle it directly. - EmitMathAbs(instr); - __ bind(deferred->exit()); - } -} - -void LCodeGen::DoMathFloorD(LMathFloorD* instr) { - DoubleRegister input_reg = ToDoubleRegister(instr->value()); - DoubleRegister output_reg = ToDoubleRegister(instr->result()); - __ frim(output_reg, input_reg); -} - -void LCodeGen::DoMathFloorI(LMathFloorI* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register input_high = scratch0(); - Register scratch = ip; - Label done, exact; - - __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, - &exact); - DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); - - __ bind(&exact); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - __ cmpi(result, Operand::Zero()); - __ bne(&done); - __ cmpwi(input_high, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - __ bind(&done); -} - -void LCodeGen::DoMathRoundD(LMathRoundD* instr) { - DoubleRegister input_reg = ToDoubleRegister(instr->value()); - DoubleRegister output_reg = ToDoubleRegister(instr->result()); - DoubleRegister dot_five = double_scratch0(); - Label done; - - __ frin(output_reg, input_reg); - __ fcmpu(input_reg, kDoubleRegZero); - __ bge(&done); - __ fcmpu(output_reg, input_reg); - __ beq(&done); - - // Negative, non-integer case - __ LoadDoubleLiteral(dot_five, 0.5, r0); - __ fadd(output_reg, input_reg, dot_five); - __ frim(output_reg, output_reg); - // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative. - __ fabs(output_reg, output_reg); - __ fneg(output_reg, output_reg); - - __ bind(&done); -} - -void LCodeGen::DoMathRoundI(LMathRoundI* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); - DoubleRegister input_plus_dot_five = double_scratch1; - Register scratch1 = scratch0(); - Register scratch2 = ip; - DoubleRegister dot_five = double_scratch0(); - Label convert, done; - - __ LoadDoubleLiteral(dot_five, 0.5, r0); - __ fabs(double_scratch1, input); - __ fcmpu(double_scratch1, dot_five); - DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN); - // If input is in [-0.5, -0], the result is -0. - // If input is in [+0, +0.5[, the result is +0. - // If the input is +0.5, the result is 1. - __ bgt(&convert); // Out of [-0.5, +0.5]. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // [-0.5, -0] (negative) yields minus zero. - __ TestDoubleSign(input, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - __ fcmpu(input, dot_five); - if (CpuFeatures::IsSupported(ISELECT)) { - __ li(result, Operand(1)); - __ isel(lt, result, r0, result); - __ b(&done); - } else { - Label return_zero; - __ bne(&return_zero); - __ li(result, Operand(1)); // +0.5. - __ b(&done); - // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on - // flag kBailoutOnMinusZero. - __ bind(&return_zero); - __ li(result, Operand::Zero()); - __ b(&done); - } - - __ bind(&convert); - __ fadd(input_plus_dot_five, input, dot_five); - // Reuse dot_five (double_scratch0) as we no longer need this value. - __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, - double_scratch0(), &done, &done); - DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); - __ bind(&done); -} - - -void LCodeGen::DoMathFround(LMathFround* instr) { - DoubleRegister input_reg = ToDoubleRegister(instr->value()); - DoubleRegister output_reg = ToDoubleRegister(instr->result()); - __ frsp(output_reg, input_reg); -} - - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ fsqrt(result, input); -} - - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister temp = double_scratch0(); - - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label skip, done; - - __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0()); - __ fcmpu(input, temp); - __ bne(&skip); - __ fneg(result, temp); - __ b(&done); - - // Add +0 to convert -0 to +0. - __ bind(&skip); - __ fadd(result, input, kDoubleRegZero); - __ fsqrt(result, result); - __ bind(&done); -} - - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); -// Having marked this as a call, we can use any registers. -// Just make sure that the input/output registers are the expected ones. - Register tagged_exponent = MathPowTaggedDescriptor::exponent(); - DCHECK(!instr->right()->IsDoubleRegister() || - ToDoubleRegister(instr->right()).is(d2)); - DCHECK(!instr->right()->IsRegister() || - ToRegister(instr->right()).is(tagged_exponent)); - DCHECK(ToDoubleRegister(instr->left()).is(d1)); - DCHECK(ToDoubleRegister(instr->result()).is(d3)); - - if (exponent_type.IsSmi()) { - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsTagged()) { - Label no_deopt; - __ JumpIfSmi(tagged_exponent, &no_deopt); - DCHECK(!r10.is(tagged_exponent)); - __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r10, ip); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - __ bind(&no_deopt); - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsInteger32()) { - MathPowStub stub(isolate(), MathPowStub::INTEGER); - __ CallStub(&stub); - } else { - DCHECK(exponent_type.IsDouble()); - MathPowStub stub(isolate(), MathPowStub::DOUBLE); - __ CallStub(&stub); - } -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathLog(LMathLog* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - __ cntlzw(result, input); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(scratch3, - MemOperand(scratch2, StandardFrameConstants::kContextOffset)); - __ cmpi(scratch3, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - __ bne(&no_arguments_adaptor); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ mr(fp, scratch2); - __ LoadP(caller_args_count_reg, - MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(caller_args_count_reg); - __ b(&formal_parameter_count_loaded); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count - __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3); - - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->function()).is(r4)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use r6, r7 and r8 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) r6 (new.target) will be initialized below. - PrepareForTailCall(actual, r6, r7, r8); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(r4, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } -} - - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(ToRegister(instr->result()).is(r3)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - __ Jump(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ JumpToJSEntry(ip); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - __ Call(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(target)); - __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ CallJSEntry(ip); - } - generator.AfterCall(); - } -} - - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->constructor()).is(r4)); - DCHECK(ToRegister(instr->result()).is(r3)); - - __ mov(r3, Operand(instr->arity())); - __ Move(r5, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - // We might need a change here - // look at the first argument - __ LoadP(r8, MemOperand(sp, 0)); - __ cmpi(r8, Operand::Zero()); - __ beq(&packed_case); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ b(&done); - __ bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); -} - - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - __ addi(code_object, code_object, - Operand(Code::kHeaderSize - kHeapObjectTag)); - __ StoreP(code_object, - FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0); -} - - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ Add(result, base, ToInteger32(offset), r0); - } else { - Register offset = ToRegister(instr->offset()); - __ add(result, base, offset); - } -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - HStoreNamedField* hinstr = instr->hydrogen(); - Representation representation = instr->representation(); - - Register object = ToRegister(instr->object()); - Register scratch = scratch0(); - HObjectAccess access = hinstr->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - Register value = ToRegister(instr->value()); - MemOperand operand = MemOperand(object, offset); - __ StoreRepresentation(value, operand, representation, r0); - return; - } - - __ AssertNotSmi(object); - -#if V8_TARGET_ARCH_PPC64 - DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || - IsInteger32(LConstantOperand::cast(instr->value()))); -#else - DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || - IsSmi(LConstantOperand::cast(instr->value()))); -#endif - if (!FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!hinstr->has_transition()); - DCHECK(!hinstr->NeedsWriteBarrier()); - DoubleRegister value = ToDoubleRegister(instr->value()); - __ stfd(value, FieldMemOperand(object, offset)); - return; - } - - if (hinstr->has_transition()) { - Handle transition = hinstr->transition_map(); - AddDeprecationDependency(transition); - __ mov(scratch, Operand(transition)); - __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0); - if (hinstr->NeedsWriteBarrierForMap()) { - Register temp = ToRegister(instr->temp()); - // Update the write barrier for the map field. - __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(), - kSaveFPRegs); - } - } - - // Do the store. - Register record_dest = object; - Register record_value = no_reg; - Register record_scratch = scratch; -#if V8_TARGET_ARCH_PPC64 - if (FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - DoubleRegister value = ToDoubleRegister(instr->value()); - __ stfd(value, FieldMemOperand(object, offset)); - if (hinstr->NeedsWriteBarrier()) { - record_value = ToRegister(instr->value()); - } - } else { - if (representation.IsSmi() && - hinstr->value()->representation().IsInteger32()) { - DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); - // 64-bit Smi optimization - // Store int value directly to upper half of the smi. - offset = SmiWordOffset(offset); - representation = Representation::Integer32(); - } -#endif - if (access.IsInobject()) { - Register value = ToRegister(instr->value()); - MemOperand operand = FieldMemOperand(object, offset); - __ StoreRepresentation(value, operand, representation, r0); - record_value = value; - } else { - Register value = ToRegister(instr->value()); - __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); - MemOperand operand = FieldMemOperand(scratch, offset); - __ StoreRepresentation(value, operand, representation, r0); - record_dest = scratch; - record_value = value; - record_scratch = object; - } -#if V8_TARGET_ARCH_PPC64 - } -#endif - - if (hinstr->NeedsWriteBarrier()) { - __ RecordWriteField(record_dest, offset, record_value, record_scratch, - GetLinkRegisterState(), kSaveFPRegs, - EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(), - hinstr->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Representation representation = instr->hydrogen()->length()->representation(); - DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); - DCHECK(representation.IsSmiOrInteger32()); - - Condition cc = instr->hydrogen()->allow_equality() ? lt : le; - if (instr->length()->IsConstantOperand()) { - int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); - Register index = ToRegister(instr->index()); - if (representation.IsSmi()) { - __ CmplSmiLiteral(index, Smi::FromInt(length), r0); - } else { - __ Cmplwi(index, Operand(length), r0); - } - cc = CommuteCondition(cc); - } else if (instr->index()->IsConstantOperand()) { - int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); - Register length = ToRegister(instr->length()); - if (representation.IsSmi()) { - __ CmplSmiLiteral(length, Smi::FromInt(index), r0); - } else { - __ Cmplwi(length, Operand(index), r0); - } - } else { - Register index = ToRegister(instr->index()); - Register length = ToRegister(instr->length()); - if (representation.IsSmi()) { - __ cmpl(length, index); - } else { - __ cmplw(length, index); - } - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - Label done; - __ b(NegateCondition(cc), &done); - __ stop("eliminated bounds check failed"); - __ bind(&done); - } else { - DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); - } -} - - -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - Register address = scratch0(); - DoubleRegister value(ToDoubleRegister(instr->value())); - if (key_is_constant) { - if (constant_key != 0) { - __ Add(address, external_pointer, constant_key << element_size_shift, - r0); - } else { - address = external_pointer; - } - } else { - __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); - __ add(address, external_pointer, r0); - } - if (elements_kind == FLOAT32_ELEMENTS) { - __ frsp(double_scratch0(), value); - __ stfs(double_scratch0(), MemOperand(address, base_offset)); - } else { // Storing doubles, not floats. - __ stfd(value, MemOperand(address, base_offset)); - } - } else { - Register value(ToRegister(instr->value())); - MemOperand mem_operand = - PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, - constant_key, element_size_shift, base_offset); - switch (elements_kind) { - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - case INT8_ELEMENTS: - if (key_is_constant) { - __ StoreByte(value, mem_operand, r0); - } else { - __ stbx(value, mem_operand); - } - break; - case INT16_ELEMENTS: - case UINT16_ELEMENTS: - if (key_is_constant) { - __ StoreHalfWord(value, mem_operand, r0); - } else { - __ sthx(value, mem_operand); - } - break; - case INT32_ELEMENTS: - case UINT32_ELEMENTS: - if (key_is_constant) { - __ StoreWord(value, mem_operand, r0); - } else { - __ stwx(value, mem_operand); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - DoubleRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = no_reg; - Register scratch = scratch0(); - DoubleRegister double_scratch = double_scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - - // Calculate the effective address of the slot in the array to store the - // double value. - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - int base_offset = instr->base_offset() + constant_key * kDoubleSize; - if (!key_is_constant) { - __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi); - __ add(scratch, elements, scratch); - elements = scratch; - } - if (!is_int16(base_offset)) { - __ Add(scratch, elements, base_offset, r0); - base_offset = 0; - elements = scratch; - } - - if (instr->NeedsCanonicalization()) { - // Turn potential sNaN value into qNaN. - __ CanonicalizeNaN(double_scratch, value); - __ stfd(double_scratch, MemOperand(elements, base_offset)); - } else { - __ stfd(value, MemOperand(elements, base_offset)); - } -} - - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - HStoreKeyed* hinstr = instr->hydrogen(); - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - Register scratch = scratch0(); - Register store_base = scratch; - int offset = instr->base_offset(); - - // Do the store. - if (instr->key()->IsConstantOperand()) { - DCHECK(!hinstr->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - store_base = elements; - } else { - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (hinstr->key()->representation().IsSmi()) { - __ SmiToPtrArrayOffset(scratch, key); - } else { - __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2)); - } - __ add(scratch, elements, scratch); - } - - Representation representation = hinstr->value()->representation(); - -#if V8_TARGET_ARCH_PPC64 - // 64-bit Smi optimization - if (representation.IsInteger32()) { - DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); - DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); - // Store int value directly to upper half of the smi. - offset = SmiWordOffset(offset); - } -#endif - - __ StoreRepresentation(value, MemOperand(store_base, offset), representation, - r0); - - if (hinstr->NeedsWriteBarrier()) { - SmiCheck check_needed = hinstr->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK - : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ Add(key, store_base, offset, r0); - __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, - EMIT_REMEMBERED_SET, check_needed, - hinstr->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - // By cases: external, fast double - if (instr->is_fixed_typed_array()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = r3; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ b(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0); - __ ble(deferred->entry()); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0); - __ bge(deferred->entry()); - } else { - __ cmpw(ToRegister(key), ToRegister(current_capacity)); - __ bge(deferred->entry()); - } - - if (instr->elements()->IsRegister()) { - __ Move(result, ToRegister(instr->elements())); - } else { - __ LoadP(result, ToMemOperand(instr->elements())); - } - - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = r3; - __ li(result, Operand::Zero()); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - if (instr->object()->IsRegister()) { - __ Move(result, ToRegister(instr->object())); - } else { - __ LoadP(result, ToMemOperand(instr->object())); - } - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - LConstantOperand* constant_key = LConstantOperand::cast(key); - int32_t int_key = ToInteger32(constant_key); - if (Smi::IsValid(int_key)) { - __ LoadSmiLiteral(r6, Smi::FromInt(int_key)); - } else { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - Label is_smi; -#if V8_TARGET_ARCH_PPC64 - __ SmiTag(r6, ToRegister(key)); -#else - // Deopt if the key is outside Smi range. The stub expects Smi and would - // bump the elements into dictionary mode (and trigger a deopt) anyways. - __ SmiTagCheckOverflow(r6, ToRegister(key), r0); - __ BranchOnNoOverflow(&is_smi); - __ PopSafepointRegisters(); - DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0); - __ bind(&is_smi); -#endif - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - __ TestIfSmi(result, r0); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); -} - - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound); - __ bind(&no_memento_found); -} - - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r4)); - DCHECK(ToRegister(instr->right()).is(r3)); - StringAddStub stub(isolate(), instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt final : public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new (zone()) DeferredStringCharCodeAt(this, instr); - - StringCharLoadGenerator::Generate( - masm(), ToRegister(instr->string()), ToRegister(instr->index()), - ToRegister(instr->result()), deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ li(result, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - __ push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - __ LoadSmiLiteral(scratch, Smi::FromInt(const_index)); - __ push(scratch); - } else { - Register index = ToRegister(instr->index()); - __ SmiTag(index); - __ push(index); - } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, - instr->context()); - __ AssertSmi(r3); - __ SmiUntag(r3); - __ StoreToSafepointRegisterSlot(r3, result); -} - - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode final : public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { - codegen()->DoDeferredStringCharFromCode(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new (zone()) DeferredStringCharFromCode(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - DCHECK(!char_code.is(result)); - - __ cmpli(char_code, Operand(String::kMaxOneByteCharCode)); - __ bgt(deferred->entry()); - __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2)); - __ add(result, result, r0); - __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(result, ip); - __ beq(deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ li(result, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - __ SmiTag(char_code); - __ push(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(r3, result); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister() || input->IsStackSlot()); - LOperand* output = instr->result(); - DCHECK(output->IsDoubleRegister()); - if (input->IsStackSlot()) { - Register scratch = scratch0(); - __ LoadP(scratch, ToMemOperand(input)); - __ ConvertIntToDouble(scratch, ToDoubleRegister(output)); - } else { - __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output)); - } -} - - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output)); -} - - -void LCodeGen::DoNumberTagI(LNumberTagI* instr) { - class DeferredNumberTagI final : public LDeferredCode { - public: - DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), - instr_->temp2(), SIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagI* instr_; - }; - - Register src = ToRegister(instr->value()); - Register dst = ToRegister(instr->result()); - - DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr); -#if V8_TARGET_ARCH_PPC64 - __ SmiTag(dst, src); -#else - __ SmiTagCheckOverflow(dst, src, r0); - __ BranchOnOverflow(deferred->entry()); -#endif - __ bind(deferred->exit()); -} - - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU final : public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), - instr_->temp2(), UNSIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagU* instr_; - }; - - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - - DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr); - __ Cmpli(input, Operand(Smi::kMaxValue), r0); - __ bgt(deferred->entry()); - __ SmiTag(result, input); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, - LOperand* temp1, LOperand* temp2, - IntegerSignedness signedness) { - Label done, slow; - Register src = ToRegister(value); - Register dst = ToRegister(instr->result()); - Register tmp1 = scratch0(); - Register tmp2 = ToRegister(temp1); - Register tmp3 = ToRegister(temp2); - DoubleRegister dbl_scratch = double_scratch0(); - - if (signedness == SIGNED_INT32) { - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - if (dst.is(src)) { - __ SmiUntag(src, dst); - __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16)); - } - __ ConvertIntToDouble(src, dbl_scratch); - } else { - __ ConvertUnsignedIntToDouble(src, dbl_scratch); - } - - if (FLAG_inline_new) { - __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); - __ b(&done); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // TODO(3095996): Put a valid pointer value in the stack slot where the - // result register is stored, as this register is in the pointer map, but - // contains an integer value. - __ li(dst, Operand::Zero()); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!dst.is(cp)) { - __ li(cp, Operand::Zero()); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters(instr->pointer_map(), 0, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r3, dst); - } - - // Done. Put the value in dbl_scratch into the value of the allocated heap - // number. - __ bind(&done); - __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD final : public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagD* instr_; - }; - - DoubleRegister input_reg = ToDoubleRegister(instr->value()); - Register scratch = scratch0(); - Register reg = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); - - DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); - } else { - __ b(deferred->entry()); - } - __ bind(deferred->exit()); - __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ li(reg, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(cp)) { - __ li(cp, Operand::Zero()); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters(instr->pointer_map(), 0, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r3, reg); -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - Register output = ToRegister(instr->result()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - __ TestUnsignedSmiCandidate(input, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0); - } -#if !V8_TARGET_ARCH_PPC64 - if (hchange->CheckFlag(HValue::kCanOverflow) && - !hchange->value()->CheckFlag(HValue::kUint32)) { - __ SmiTagCheckOverflow(output, input, r0); - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); - } else { -#endif - __ SmiTag(output, input); -#if !V8_TARGET_ARCH_PPC64 - } -#endif -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - Register scratch = scratch0(); - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - if (instr->needs_check()) { - // If the input is a HeapObject, value of scratch won't be zero. - __ andi(scratch, input, Operand(kHeapObjectTag)); - __ SmiUntag(result, input); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); - } else { - __ SmiUntag(result, input); - } -} - - -void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, - DoubleRegister result_reg, - NumberUntagDMode mode) { - bool can_convert_undefined_to_nan = instr->truncating(); - bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); - - Register scratch = scratch0(); - DCHECK(!result_reg.is(double_scratch0())); - - Label convert, load_smi, done; - - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - - // Heap number map check. - __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch, ip); - if (can_convert_undefined_to_nan) { - __ bne(&convert); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - } - // load heap number - __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - __ TestDoubleIsMinusZero(result_reg, scratch, ip); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - __ b(&done); - if (can_convert_undefined_to_nan) { - __ bind(&convert); - // Convert undefined (and hole) to NaN. - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input_reg, ip); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ LoadRoot(scratch, Heap::kNanValueRootIndex); - __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); - __ b(&done); - } - } else { - __ SmiUntag(scratch, input_reg); - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - } - // Smi to double register conversion - __ bind(&load_smi); - // scratch: untagged value of input_reg - __ ConvertIntToDouble(scratch, result_reg); - __ bind(&done); -} - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { - Register input_reg = ToRegister(instr->value()); - Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->temp()); - DoubleRegister double_scratch = double_scratch0(); - DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); - - DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); - DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); - - Label done; - - // Heap number map check. - __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch1, ip); - - if (instr->truncating()) { - Label truncate; - __ beq(&truncate); - __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball); - __ bind(&truncate); - __ mr(scratch2, input_reg); - __ TruncateHeapNumberToI(input_reg, scratch2); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - - __ lfd(double_scratch2, - FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // preserve heap number pointer in scratch2 for minus zero check below - __ mr(scratch2, input_reg); - } - __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, - double_scratch); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ cmpi(input_reg, Operand::Zero()); - __ bne(&done); - __ TestHeapNumberSign(scratch2, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - } - __ bind(&done); -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI final : public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LTaggedToI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - DCHECK(input->Equals(instr->result())); - - Register input_reg = ToRegister(input); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiUntag(input_reg); - } else { - DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr); - - // Branch to deferred code if the input is a HeapObject. - __ JumpIfNotSmi(input_reg, deferred->entry()); - - __ SmiUntag(input_reg); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - DoubleRegister result_reg = ToDoubleRegister(result); - - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI - : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - EmitNumberUntagD(instr, input_reg, result_reg, mode); -} - - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = scratch0(); - DoubleRegister double_input = ToDoubleRegister(instr->value()); - DoubleRegister double_scratch = double_scratch0(); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, - double_scratch); - // Deoptimize if the input wasn't a int32 (inside a double). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ cmpi(result_reg, Operand::Zero()); - __ bne(&done); - __ TestDoubleSign(double_input, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } -} - - -void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = scratch0(); - DoubleRegister double_input = ToDoubleRegister(instr->value()); - DoubleRegister double_scratch = double_scratch0(); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, - double_scratch); - // Deoptimize if the input wasn't a int32 (inside a double). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ cmpi(result_reg, Operand::Zero()); - __ bne(&done); - __ TestDoubleSign(double_input, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } -#if V8_TARGET_ARCH_PPC64 - __ SmiTag(result_reg); -#else - __ SmiTagCheckOverflow(result_reg, r0); - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); -#endif -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->value(); - __ TestIfSmi(ToRegister(input), r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - LOperand* input = instr->value(); - __ TestIfSmi(ToRegister(input), r0); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); - } -} - - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - Register view = ToRegister(instr->view()); - Register scratch = scratch0(); - - __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); - __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); - __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - Register scratch = scratch0(); - - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - __ cmpli(scratch, Operand(first)); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); - } else { - DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType); - // Omit check for the last type. - if (last != LAST_TYPE) { - __ cmpli(scratch, Operand(last)); - DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType); - } - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); - __ andi(r0, scratch, Operand(mask)); - DeoptimizeIf(tag == 0 ? ne : eq, instr, - DeoptimizeReason::kWrongInstanceType, cr0); - } else { - __ andi(scratch, scratch, Operand(mask)); - __ cmpi(scratch, Operand(tag)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); - } - } -} - - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Register reg = ToRegister(instr->value()); - Handle object = instr->hydrogen()->object().handle(); - AllowDeferredHandleDereference smi_check; - if (isolate()->heap()->InNewSpace(*object)) { - Register reg = ToRegister(instr->value()); - Handle cell = isolate()->factory()->NewCell(object); - __ mov(ip, Operand(cell)); - __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); - __ cmp(reg, ip); - } else { - __ Cmpi(reg, Operand(object), r0); - } - DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch); -} - - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Register temp = ToRegister(instr->temp()); - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset)); - __ lwz(temp, FieldMemOperand(temp, Map::kBitField3Offset)); - __ TestBitMask(temp, Map::Deprecated::kMask, r0); - __ beq(&deopt, cr0); - - { - PushSafepointRegistersScope scope(this); - __ push(object); - __ li(cp, Operand::Zero()); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters(instr->pointer_map(), 1, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r3, temp); - } - __ TestIfSmi(temp, r0); - __ bne(&done, cr0); - - __ bind(&deopt); - // In case of "al" condition the operand is not used so just pass cr0 there. - DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps final : public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - void Generate() override { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - LInstruction* instr() override { return instr_; } - - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - Register object = ToRegister(instr->value()); - Register map_reg = ToRegister(instr->temp()); - - __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset)); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new (zone()) DeferredCheckMaps(this, instr, object); - __ bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMap(map_reg, map, &success); - __ beq(&success); - } - - Handle map = maps->at(maps->size() - 1).handle(); - __ CompareMap(map_reg, map, &success); - if (instr->hydrogen()->HasMigrationTarget()) { - __ bne(deferred->entry()); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); - } - - __ bind(&success); -} - - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - Register unclamped_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampUint8(result_reg, unclamped_reg); -} - - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - Register scratch = scratch0(); - Register input_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); - Label is_smi, done, heap_number; - - // Both smi and heap number cases are handled. - __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); - - // Check for heap number - __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0); - __ beq(&heap_number); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ li(result_reg, Operand::Zero()); - __ b(&done); - - // Heap number - __ bind(&heap_number); - __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); - __ b(&done); - - // smi - __ bind(&is_smi); - __ ClampUint8(result_reg, result_reg); - - __ bind(&done); -} - - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate final : public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, LAllocate* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredAllocate(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = - new(zone()) DeferredAllocate(this, instr); - - Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } else { - Register size = ToRegister(instr->size()); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } - - __ bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ LoadIntLiteral(scratch, size - kHeapObjectTag); - } else { - __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); - } - __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); - Label loop; - __ bind(&loop); - __ subi(scratch, scratch, Operand(kPointerSize)); - __ StorePX(scratch2, MemOperand(result, scratch)); - __ cmpi(scratch, Operand::Zero()); - __ bge(&loop); - } -} - - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ LoadSmiLiteral(result, Smi::kZero); - - PushSafepointRegistersScope scope(this); - if (instr->size()->IsRegister()) { - Register size = ToRegister(instr->size()); - DCHECK(!size.is(result)); - __ SmiTag(size); - __ push(size); - } else { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); -#if !V8_TARGET_ARCH_PPC64 - if (size >= 0 && size <= Smi::kMaxValue) { -#endif - __ Push(Smi::FromInt(size)); -#if !V8_TARGET_ARCH_PPC64 - } else { - // We should never get here at runtime => abort - __ stop("invalid allocation size"); - return; - } -#endif - } - - int flags = AllocateDoubleAlignFlag::encode( - instr->hydrogen()->MustAllocateDoubleAligned()); - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ Push(Smi::FromInt(flags)); - - CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(r3, result); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - Register top_address = scratch0(); - __ subi(r3, r3, Operand(kHeapObjectTag)); - __ mov(top_address, Operand(allocation_top)); - __ StoreP(r3, MemOperand(top_address)); - __ addi(r3, r3, Operand(kHeapObjectTag)); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register scratch1 = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->value()).is(r6)); - DCHECK(ToRegister(instr->result()).is(r3)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ mov(r3, Operand(isolate()->factory()->number_string())); - __ b(&end); - __ bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ bind(&end); -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->value()); - - Condition final_branch_condition = - EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input, - instr->type_literal()); - if (final_branch_condition != kNoCondition) { - EmitBranch(instr, final_branch_condition); - } -} - - -Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label, - Register input, Handle type_name) { - Condition final_branch_condition = kNoCondition; - Register scratch = scratch0(); - Factory* factory = isolate()->factory(); - if (String::Equals(type_name, factory->number_string())) { - __ JumpIfSmi(input, true_label); - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->string_string())) { - __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); - final_branch_condition = lt; - - } else if (String::Equals(type_name, factory->symbol_string())) { - __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->boolean_string())) { - __ CompareRoot(input, Heap::kTrueValueRootIndex); - __ beq(true_label); - __ CompareRoot(input, Heap::kFalseValueRootIndex); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->undefined_string())) { - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ beq(false_label); - __ JumpIfSmi(input, false_label); - // Check for undetectable objects => true. - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ ExtractBit(r0, scratch, Map::kIsUndetectable); - __ cmpi(r0, Operand::Zero()); - final_branch_condition = ne; - - } else if (String::Equals(type_name, factory->function_string())) { - __ JumpIfSmi(input, false_label); - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ andi(scratch, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - __ cmpi(scratch, Operand(1 << Map::kIsCallable)); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->object_string())) { - __ JumpIfSmi(input, false_label); - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ beq(true_label); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE); - __ blt(false_label); - // Check for callable or undetectable objects => false. - __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ andi(r0, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - __ cmpi(r0, Operand::Zero()); - final_branch_condition = eq; - - } else { - __ b(false_label); - } - - return final_branch_condition; -} - - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - DCHECK_EQ(0, padding_size % Assembler::kInstrSize); - while (padding_size > 0) { - __ nop(); - padding_size -= Assembler::kInstrSize; - } - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && type == Deoptimizer::EAGER) { - type = Deoptimizer::LAZY; - } - - DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type); -} - - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck final : public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredStackCheck(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmpl(sp, ip); - __ bge(&done); - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(cp)); - CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, - instr); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new (zone()) DeferredStackCheck(this, instr); - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmpl(sp, ip); - __ blt(deferred_stack_check->entry()); - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - Label use_cache, call_runtime; - __ CheckEnumCache(&call_runtime); - - __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ b(&use_cache); - - // Get the set of properties to enumerate. - __ bind(&call_runtime); - __ push(r3); - CallRuntime(Runtime::kForInEnumerate, instr); - __ bind(&use_cache); -} - - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - __ EnumLength(result, map); - __ CmpSmiLiteral(result, Smi::kZero, r0); - __ bne(&load_cache); - __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); - __ b(&done); - - __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ LoadP(result, - FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); - __ cmpi(result, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - Register map = ToRegister(instr->map()); - __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); - __ cmp(map, scratch0()); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); -} - - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ Push(object, index); - __ li(cp, Operand::Zero()); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters(instr->pointer_map(), 2, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r3, result); -} - - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr, - Register result, Register object, Register index) - : LDeferredCode(codegen), - instr_(instr), - result_(result), - object_(object), - index_(index) {} - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register result_; - Register object_; - Register index_; - }; - - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - DeferredLoadMutableDouble* deferred; - deferred = new (zone()) - DeferredLoadMutableDouble(this, instr, result, object, index); - - Label out_of_object, done; - - __ TestBitMask(index, reinterpret_cast(Smi::FromInt(1)), r0); - __ bne(deferred->entry(), cr0); - __ ShiftRightArithImm(index, index, 1); - - __ cmpi(index, Operand::Zero()); - __ blt(&out_of_object); - - __ SmiToPtrArrayOffset(r0, index); - __ add(scratch, object, r0); - __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); - - __ b(&done); - - __ bind(&out_of_object); - __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - // Index is equal to negated out of object property index plus 1. - __ SmiToPtrArrayOffset(r0, index); - __ sub(scratch, result, r0); - __ LoadP(result, - FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); - __ bind(deferred->exit()); - __ bind(&done); -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.h b/src/crankshaft/ppc/lithium-codegen-ppc.h deleted file mode 100644 index 32b9e18487..0000000000 --- a/src/crankshaft/ppc/lithium-codegen-ppc.h +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_ -#define V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_ - -#include "src/ast/scopes.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h" -#include "src/crankshaft/ppc/lithium-ppc.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class SafepointGenerator; - -class LCodeGen : public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - LinkRegisterStatus GetLinkRegisterState() const { - return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; - } - - // Support for converting LOperands to assembler types. - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LConstantOperand must be an Integer32 or Smi - void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - intptr_t ToRepresentation(LConstantOperand* op, - const Representation& r) const; - int32_t ToInteger32(LConstantOperand* op) const; - Smi* ToSmi(LConstantOperand* op) const; - double ToDouble(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Returns a MemOperand pointing to the high word of a DoubleStackSlot. - MemOperand ToHighMemOperand(LOperand* op) const; - - bool IsInteger32(LConstantOperand* op) const; - bool IsSmi(LConstantOperand* op) const; - Handle ToHandle(LConstantOperand* op) const; - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - // Deferred code support. - void DoDeferredNumberTagD(LNumberTagD* instr); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, - LOperand* temp1, LOperand* temp2, - IntegerSignedness signedness); - - void DoDeferredTaggedToI(LTaggedToI* instr); - void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocate(LAllocate* instr); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result, - Register object, Register index); - - // Parallel move support. - void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); - - MemOperand PrepareKeyedOperand(Register key, Register base, - bool key_is_constant, bool key_is_tagged, - int constant_key, int element_size_shift, - int base_offset); - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - -// Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - Scope* scope() const { return scope_; } - - Register scratch0() { return kLithiumScratch; } - DoubleRegister double_scratch0() { return kScratchDoubleReg; } - - LInstruction* GetNextInstruction(); - - void EmitClassOfTest(Label* if_true, Label* if_false, - Handle class_name, Register input, - Register temporary, Register temporary2); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - void SaveCallerDoubles(); - void RestoreCallerDoubles(); - - // Code generation passes. Returns true if code generation should - // continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS - }; - - void CallCode(Handle code, RelocInfo::Mode mode, LInstruction* instr); - - void CallCodeGeneric(Handle code, RelocInfo::Mode mode, - LInstruction* instr, SafepointMode safepoint_mode); - - void CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, int num_arguments, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, num_arguments, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void LoadContextFromDeferred(LOperand* context); - void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, - LInstruction* instr, LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in r4. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode); - - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type, CRegister cr = cr7); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, CRegister cr = cr7); - - void AddToTranslation(LEnvironment* environment, Translation* translation, - LOperand* op, bool is_tagged, bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - Register ToRegister(int index) const; - DoubleRegister ToDoubleRegister(int index) const; - - MemOperand BuildSeqStringOperand(Register string, LOperand* index, - String::Encoding encoding); - - void EmitMathAbs(LMathAbs* instr); -#if V8_TARGET_ARCH_PPC64 - void EmitInteger32MathAbs(LMathAbs* instr); -#endif - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, - int arguments, Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, - Safepoint::DeoptMode mode); - - static Condition TokenToCondition(Token::Value op); - void EmitGoto(int block); - - // EmitBranch expects to be the last instruction of a block. - template - void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7); - template - void EmitTrueBranch(InstrType instr, Condition condition, CRegister cr = cr7); - template - void EmitFalseBranch(InstrType instr, Condition condition, - CRegister cr = cr7); - void EmitNumberUntagD(LNumberUntagD* instr, Register input, - DoubleRegister result, NumberUntagDMode mode); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input, - Handle type_name); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, Register temp1, Label* is_not_string, - SmiCheck check_needed); - - // Emits optimized code to deep-copy the contents of statically known - // object graphs (e.g. object literal boilerplate). - void EmitDeepCopy(Handle object, Register result, Register source, - int* offset, AllocationSiteMode mode); - - void EnsureSpaceForLazyDeopt(int space_needed) override; - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); - - template - void EmitVectorLoadICRegisters(T* instr); - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - class PushSafepointRegistersScope final BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen); - - ~PushSafepointRegistersScope(); - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode : public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() {} - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; - int instruction_index_; -}; -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_ diff --git a/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc b/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc deleted file mode 100644 index 4e249808f7..0000000000 --- a/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h" - -#include "src/crankshaft/ppc/lithium-codegen-ppc.h" - -namespace v8 { -namespace internal { - -static const Register kSavedValueRegister = {11}; - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), - moves_(32, owner->zone()), - root_index_(0), - in_cycle_(false), - saved_destination_(NULL) {} - - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(moves_.is_empty()); - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - root_index_ = i; // Any cycle is found when by reaching this move again. - PerformMove(i); - if (in_cycle_) { - RestoreValue(); - } - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated()) { - DCHECK(moves_[i].source()->IsConstantOperand()); - EmitMove(i); - } - } - - moves_.Rewind(0); -} - - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); - } - Verify(); -} - - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. - - // We can only find a cycle, when doing a depth-first traversal of moves, - // be encountering the starting move again. So by spilling the source of - // the starting move, we break the cycle. All moves are then unblocked, - // and the starting move is completed by writing the spilled value to - // its destination. All other moves from the spilled source have been - // completed prior to breaking the cycle. - // An additional complication is that moves to MemOperands with large - // offsets (more than 1K or 4K) require us to spill this spilled value to - // the stack, to free up the register. - DCHECK(!moves_[index].IsPending()); - DCHECK(!moves_[index].IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved in a stack allocated local. Multiple moves can - // be pending because this function is recursive. - DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. - LOperand* destination = moves_[index].destination(); - moves_[index].set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - PerformMove(i); - // If there is a blocking, pending move it must be moves_[root_index_] - // and all other moves with the same source as moves_[root_index_] are - // sucessfully executed (because they are cycle-free) by this loop. - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - moves_[index].set_destination(destination); - - // The move may be blocked on a pending move, which must be the starting move. - // In this case, we have a cycle, and we save the source of this move to - // a scratch register to break it. - LMoveOperands other_move = moves_[root_index_]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - BreakCycle(index); - return; - } - - // This move is no longer blocked. - EmitMove(index); -} - - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - -#define __ ACCESS_MASM(cgen_->masm()) - -void LGapResolver::BreakCycle(int index) { - // We save in a register the value that should end up in the source of - // moves_[root_index]. After performing all moves in the tree rooted - // in that move, we save the value to that source. - DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); - DCHECK(!in_cycle_); - in_cycle_ = true; - LOperand* source = moves_[index].source(); - saved_destination_ = moves_[index].destination(); - if (source->IsRegister()) { - __ mr(kSavedValueRegister, cgen_->ToRegister(source)); - } else if (source->IsStackSlot()) { - __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source)); - } else if (source->IsDoubleRegister()) { - __ fmr(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); - } else if (source->IsDoubleStackSlot()) { - __ lfd(kScratchDoubleReg, cgen_->ToMemOperand(source)); - } else { - UNREACHABLE(); - } - // This move will be done by restoring the saved value to the destination. - moves_[index].Eliminate(); -} - - -void LGapResolver::RestoreValue() { - DCHECK(in_cycle_); - DCHECK(saved_destination_ != NULL); - - // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister. - if (saved_destination_->IsRegister()) { - __ mr(cgen_->ToRegister(saved_destination_), kSavedValueRegister); - } else if (saved_destination_->IsStackSlot()) { - __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); - } else if (saved_destination_->IsDoubleRegister()) { - __ fmr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); - } else if (saved_destination_->IsDoubleStackSlot()) { - __ stfd(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); - } else { - UNREACHABLE(); - } - - in_cycle_ = false; - saved_destination_ = NULL; -} - - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - - if (source->IsRegister()) { - Register source_register = cgen_->ToRegister(source); - if (destination->IsRegister()) { - __ mr(cgen_->ToRegister(destination), source_register); - } else { - DCHECK(destination->IsStackSlot()); - __ StoreP(source_register, cgen_->ToMemOperand(destination)); - } - } else if (source->IsStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsRegister()) { - __ LoadP(cgen_->ToRegister(destination), source_operand); - } else { - DCHECK(destination->IsStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - __ LoadP(ip, source_operand); - __ StoreP(ip, destination_operand); - } else { - __ LoadP(kSavedValueRegister, source_operand); - __ StoreP(kSavedValueRegister, destination_operand); - } - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - if (cgen_->IsInteger32(constant_source)) { - cgen_->EmitLoadIntegerConstant(constant_source, dst); - } else { - __ Move(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - DoubleRegister result = cgen_->ToDoubleRegister(destination); - double v = cgen_->ToDouble(constant_source); - __ LoadDoubleLiteral(result, v, ip); - } else { - DCHECK(destination->IsStackSlot()); - DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. - if (cgen_->IsInteger32(constant_source)) { - cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister); - } else { - __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source)); - } - __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsDoubleRegister()) { - DoubleRegister source_register = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - __ fmr(cgen_->ToDoubleRegister(destination), source_register); - } else { - DCHECK(destination->IsDoubleStackSlot()); - __ stfd(source_register, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsDoubleStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsDoubleRegister()) { - __ lfd(cgen_->ToDoubleRegister(destination), source_operand); - } else { - DCHECK(destination->IsDoubleStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { -// kSavedDoubleValueRegister was used to break the cycle, -// but kSavedValueRegister is free. -#if V8_TARGET_ARCH_PPC64 - __ ld(kSavedValueRegister, source_operand); - __ std(kSavedValueRegister, destination_operand); -#else - MemOperand source_high_operand = cgen_->ToHighMemOperand(source); - MemOperand destination_high_operand = - cgen_->ToHighMemOperand(destination); - __ lwz(kSavedValueRegister, source_operand); - __ stw(kSavedValueRegister, destination_operand); - __ lwz(kSavedValueRegister, source_high_operand); - __ stw(kSavedValueRegister, destination_high_operand); -#endif - } else { - __ lfd(kScratchDoubleReg, source_operand); - __ stfd(kScratchDoubleReg, destination_operand); - } - } - } else { - UNREACHABLE(); - } - - moves_[index].Eliminate(); -} - - -#undef __ -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/ppc/lithium-gap-resolver-ppc.h b/src/crankshaft/ppc/lithium-gap-resolver-ppc.h deleted file mode 100644 index 6eeea5eee5..0000000000 --- a/src/crankshaft/ppc/lithium-gap-resolver-ppc.h +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_ -#define V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class LGapResolver final BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // If a cycle is found in the series of moves, save the blocking value to - // a scratch register. The cycle must be found by hitting the root of the - // depth-first search. - void BreakCycle(int index); - - // After a cycle has been resolved, restore the value from the scratch - // register to its proper destination. - void RestoreValue(); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Verify the move list before performing moves. - void Verify(); - - LCodeGen* cgen_; - - // List of moves not yet resolved. - ZoneList moves_; - - int root_index_; - bool in_cycle_; - LOperand* saved_destination_; -}; -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_ diff --git a/src/crankshaft/ppc/lithium-ppc.cc b/src/crankshaft/ppc/lithium-ppc.cc deleted file mode 100644 index 95ebb99334..0000000000 --- a/src/crankshaft/ppc/lithium-ppc.cc +++ /dev/null @@ -1,2352 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/ppc/lithium-ppc.h" - -#include - -#include "src/crankshaft/lithium-inl.h" -#include "src/crankshaft/ppc/lithium-codegen-ppc.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy()); - } -} -#endif - - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: - return "add-d"; - case Token::SUB: - return "sub-d"; - case Token::MUL: - return "mul-d"; - case Token::DIV: - return "div-d"; - case Token::MOD: - return "mod-d"; - default: - UNREACHABLE(); - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: - return "add-t"; - case Token::SUB: - return "sub-t"; - case Token::MUL: - return "mul-t"; - case Token::MOD: - return "mod-t"; - case Token::DIV: - return "div-t"; - case Token::BIT_AND: - return "bit-and-t"; - case Token::BIT_OR: - return "bit-or-t"; - case Token::BIT_XOR: - return "bit-xor-t"; - case Token::ROR: - return "ror-t"; - case Token::SHL: - return "shl-t"; - case Token::SAR: - return "sar-t"; - case Token::SHR: - return "shr-t"; - default: - UNREACHABLE(); - } -} - - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), true_block_id(), - false_block_id()); -} - - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - stream->Add(" length "); - length()->PrintTo(stream); - stream->Add(" index "); - index()->PrintTo(stream); -} - - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access() << " <- "; - stream->Add(os.str().c_str()); - value()->PrintTo(stream); -} - - -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", base_offset()); - } else { - stream->Add("]"); - } -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", base_offset()); - } else { - stream->Add("] <- "); - } - - if (value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - value()->PrintTo(stream); - } -} - - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); -} - - -int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { - // Skip a slot if for a double-width slot. - if (kind == DOUBLE_REGISTERS) current_frame_slots_++; - return current_frame_slots_++; -} - - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - int index = GetNextSpillIndex(kind); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new (zone()) LPlatformChunk(info(), graph()); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) { - return Use(value, ToUnallocated(reg)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, - new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new (zone()) LUnallocated(LUnallocated::NONE)); -} - - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new (zone()) - LUnallocated(LUnallocated::NONE, LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - - -LOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value, new (zone()) LUnallocated(LUnallocated::ANY)); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, int index) { - return Define(instr, - new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixedDouble( - LTemplateResultInstruction<1>* instr, DoubleRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new (zone()) LPointerMap(zone())); - return instr; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LUnallocated* LChunkBuilder::TempDoubleRegister() { - LUnallocated* operand = - new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new (zone()) LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new (zone()) LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); - } - } else { - right = UseRegisterAtStart(right_value); - } - - // Shift operations can only deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result = - DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; - } else { - return DoArithmeticT(op, instr); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (op == Token::MOD) { - LOperand* left = UseFixedDouble(instr->left(), d1); - LOperand* right = UseFixedDouble(instr->right(), d2); - LArithmeticD* result = new (zone()) LArithmeticD(op, left, right); - // We call a C function for double modulo. It can't trigger a GC. We need - // to use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - return MarkAsCall(DefineFixedDouble(result, d1), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LArithmeticD* result = new (zone()) LArithmeticD(op, left, right); - return DefineAsRegister(result); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - HValue* left = instr->left(); - HValue* right = instr->right(); - DCHECK(left->representation().IsTagged()); - DCHECK(right->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left_operand = UseFixed(left, r4); - LOperand* right_operand = UseFixed(right, r3); - LArithmeticT* result = - new (zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, r3), instr); -} - - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - DCHECK(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new (zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new (zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new (zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - instr = new (zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new (zone()) LGoto(instr->FirstSuccessor()); -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - ToBooleanHints expected = instr->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || - type.IsJSArray() || type.IsHeapNumber() || type.IsString(); - LInstruction* branch = new (zone()) LBranch(UseRegister(value)); - if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) && - (expected & ToBooleanHint::kNeedsMap)) || - expected != ToBooleanHint::kAny)) { - branch = AssignEnvironment(branch); - } - return branch; -} - - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new (zone()) LDebugBreak(); -} - - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new (zone()) LCmpMapAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) { - info()->MarkAsRequiresFrame(); - LOperand* value = UseRegister(instr->value()); - return DefineAsRegister(new (zone()) LArgumentsLength(value)); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new (zone()) LArgumentsElements); -} - - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LHasInPrototypeChainAndBranch* result = - new (zone()) LHasInPrototypeChainAndBranch(object, prototype); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegisterAtStart(instr->receiver()); - LOperand* function = UseRegisterAtStart(instr->function()); - LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), r4); - LOperand* receiver = UseFixed(instr->receiver(), r3); - LOperand* length = UseFixed(instr->length(), r5); - LOperand* elements = UseFixed(instr->elements(), r6); - LApplyArguments* result = - new (zone()) LApplyArguments(function, receiver, length, elements); - return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - for (int i = 0; i < argc; ++i) { - LOperand* argument = Use(instr->argument(i)); - AddInstruction(new (zone()) LPushArgument(argument), instr); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoStoreCodeEntry( - HStoreCodeEntry* store_code_entry) { - LOperand* function = UseRegister(store_code_entry->function()); - LOperand* code_object = UseTempRegister(store_code_entry->code_object()); - return new (zone()) LStoreCodeEntry(function, code_object); -} - - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister(new (zone()) - LInnerAllocatedObject(base_object, offset)); -} - - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() ? NULL - : DefineAsRegister(new (zone()) LThisFunction); -} - - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new (zone()) LContext, cp); - } - - return DefineAsRegister(new (zone()) LContext); -} - - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new (zone()) LDeclareGlobals(context), instr); -} - - -LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), cp); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - for (; i < descriptor.GetParameterCount(); i++) { - op = UseAny(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - AddInstruction(new (zone()) LPushArgument(op), instr); - } - - LCallWithDescriptor* result = - new (zone()) LCallWithDescriptor(descriptor, ops, zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, r3), instr); -} - - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* function = UseFixed(instr->function(), r4); - LInvokeFunction* result = new (zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathFloor: - return DoMathFloor(instr); - case kMathRound: - return DoMathRound(instr); - case kMathFround: - return DoMathFround(instr); - case kMathAbs: - return DoMathAbs(instr); - case kMathLog: - return DoMathLog(instr); - case kMathCos: - return DoMathCos(instr); - case kMathSin: - return DoMathSin(instr); - case kMathExp: - return DoMathExp(instr); - case kMathSqrt: - return DoMathSqrt(instr); - case kMathPowHalf: - return DoMathPowHalf(instr); - case kMathClz32: - return DoMathClz32(instr); - default: - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegister(instr->value()); - if (instr->representation().IsInteger32()) { - LMathFloorI* result = new (zone()) LMathFloorI(input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - } else { - DCHECK(instr->representation().IsDouble()); - LMathFloorD* result = new (zone()) LMathFloorD(input); - return DefineAsRegister(result); - } -} - -LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegister(instr->value()); - if (instr->representation().IsInteger32()) { - LOperand* temp = TempDoubleRegister(); - LMathRoundI* result = new (zone()) LMathRoundI(input, temp); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - } else { - DCHECK(instr->representation().IsDouble()); - LMathRoundD* result = new (zone()) LMathRoundD(input); - return DefineAsRegister(result); - } -} - -LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { - Representation r = instr->value()->representation(); - LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) - ? NULL - : UseFixed(instr->context(), cp); - LOperand* input = UseRegister(instr->value()); - LInstruction* result = - DefineAsRegister(new (zone()) LMathAbs(context, input)); - if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; -} - - -LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d0), instr); -} - - -LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathClz32* result = new (zone()) LMathClz32(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr); -} - -LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr); -} - -LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr); -} - - -LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathSqrt* result = new (zone()) LMathSqrt(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathPowHalf* result = new (zone()) LMathPowHalf(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* constructor = UseFixed(instr->constructor(), r4); - LCallNewArray* result = new (zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, r3), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r3), instr); -} - - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - return DefineAsRegister(new (zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = - DefineAsRegister(new (zone()) LDivI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow) && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) || - (!instr->IsMathFloorOfDiv() && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) - ? NULL - : TempRegister(); - LInstruction* result = DefineAsRegister( - new (zone()) LFlooringDivByConstI(dividend, divisor, temp)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = - DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow) && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineAsRegister(new (zone()) LModByConstI(dividend, divisor)); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = - DefineAsRegister(new (zone()) LModI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoModByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoModByConstI(instr); - } else { - return DoModI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - HValue* left = instr->BetterLeftOperand(); - HValue* right = instr->BetterRightOperand(); - LOperand* left_op; - LOperand* right_op; - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); - - int32_t constant_value = 0; - if (right->IsConstant()) { - HConstant* constant = HConstant::cast(right); - constant_value = constant->Integer32Value(); - // Constants -1, 0 and 1 can be optimized if the result can overflow. - // For other constants, it can be optimized only without overflow. - if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) { - left_op = UseRegisterAtStart(left); - right_op = UseConstant(right); - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - LMulI* mul = new (zone()) LMulI(left_op, right_op); - if (right_op->IsConstantOperand() - ? ((can_overflow && constant_value == -1) || - (bailout_on_minus_zero && constant_value <= 0)) - : (can_overflow || bailout_on_minus_zero)) { - AssignEnvironment(mul); - } - return DefineAsRegister(mul); - - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - - if (instr->left()->IsConstant() && - !instr->CheckFlag(HValue::kCanOverflow)) { - // If lhs is constant, do reverse subtraction instead. - return DoRSub(instr); - } - - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LSubI* sub = new (zone()) LSubI(left, right); - LInstruction* result = DefineAsRegister(sub); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoRSub(HSub* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - - // Note: The lhs of the subtraction becomes the rhs of the - // reverse-subtraction. - LOperand* left = UseRegisterAtStart(instr->right()); - LOperand* right = UseOrConstantAtStart(instr->left()); - LRSubI* rsb = new (zone()) LRSubI(left, right); - LInstruction* result = DefineAsRegister(rsb); - return result; -} - - -LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { - LOperand* multiplier_op = UseRegisterAtStart(mul->left()); - LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); - LOperand* addend_op = UseRegisterAtStart(addend); - return DefineSameAsFirst( - new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op)); -} - - -LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) { - LOperand* minuend_op = UseRegisterAtStart(minuend); - LOperand* multiplier_op = UseRegisterAtStart(mul->left()); - LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); - - return DefineSameAsFirst( - new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op)); -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - LAddI* add = new (zone()) LAddI(left, right); - LInstruction* result = DefineAsRegister(add); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LAddI* add = new (zone()) LAddI(left, right); - LInstruction* result = DefineAsRegister(add); - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::ADD, instr); - } else { - return DoArithmeticT(Token::ADD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - return DefineAsRegister(new (zone()) LMathMinMax(left, right)); -} - - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - DCHECK(instr->representation().IsDouble()); - // We call a C function for double power. It can't trigger a GC. - // We need to use fixed result register for the call. - Representation exponent_type = instr->right()->representation(); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseFixedDouble(instr->left(), d1); - LOperand* right = - exponent_type.IsDouble() - ? UseFixedDouble(instr->right(), d2) - : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent()); - LPower* result = new (zone()) LPower(left, right); - return MarkAsCall(DefineFixedDouble(result, d3), instr, - CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r4); - LOperand* right = UseFixed(instr->right(), r3); - LCmpT* result = new (zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, r3), instr); -} - - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return new (zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new (zone()) LCompareNumericAndBranch(left, right); - } -} - - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new (zone()) LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) LCmpHoleAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new (zone()) LIsStringAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new (zone()) LIsSmiAndBranch(Use(instr->value())); -} - - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) LIsUndetectableAndBranch(value, TempRegister()); -} - - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r4); - LOperand* right = UseFixed(instr->right(), r3); - LStringCompareAndBranch* result = - new (zone()) LStringCompareAndBranch(context, left, right); - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) LHasInstanceTypeAndBranch(value); -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegister(instr->value()); - return new (zone()) LClassOfTestAndBranch(value, TempRegister()); -} - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index)); -} - - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegisterAtStart(instr->index()) - : UseRegisterOrConstantAtStart(instr->index()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL; - return new (zone()) LSeqStringSetChar(context, string, index, value); -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseRegisterOrConstantAtStart(instr->length()) - : UseRegisterAtStart(instr->length()); - LInstruction* result = new (zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; } - - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new (zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LInstruction* result = - DefineAsRegister(new (zone()) LNumberUntagD(value)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new (zone()) LDummyUse(value)); - } - return AssignEnvironment( - DefineSameAsFirst(new (zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new (zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempDoubleRegister(); - LInstruction* result = - DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LUnallocated* result_temp = TempRegister(); - LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2); - return AssignPointerMap(Define(result, result_temp)); - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - return AssignEnvironment( - DefineAsRegister(new (zone()) LDoubleToSmi(value))); - } else { - DCHECK(to.IsInteger32()); - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value)); - if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); - return result; - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - if (!instr->CheckFlag(HValue::kCanOverflow)) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new (zone()) LSmiTag(value)); - } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } else { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val))); - } else { - return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val))); - } - } - } - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new (zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new (zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new (zone()) LCheckInstanceType(value); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new (zone()) LCheckValue(value)); -} - - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - LInstruction* result = - AssignEnvironment(new (zone()) LCheckMaps(value, temp)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - return DefineAsRegister(new (zone()) LClampDToUint8(reg)); - } else if (input_rep.IsInteger32()) { - return DefineAsRegister(new (zone()) LClampIToUint8(reg)); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - LClampTToUint8* result = - new (zone()) LClampTToUint8(reg, TempDoubleRegister()); - return AssignEnvironment(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new (zone()) - LReturn(UseFixed(instr->value(), r3), context, parameter_count); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new (zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new (zone()) LConstantI); - } else if (r.IsDouble()) { - return DefineAsRegister(new (zone()) LConstantD); - } else if (r.IsExternal()) { - return DefineAsRegister(new (zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new (zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new (zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context; - LOperand* value; - if (instr->NeedsWriteBarrier()) { - context = UseTempRegister(instr->context()); - value = UseTempRegister(instr->value()); - } else { - context = UseRegister(instr->context()); - value = UseRegister(instr->value()); - } - LInstruction* result = new (zone()) LStoreContextSlot(context, value); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - LOperand* obj = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new (zone()) LLoadNamedField(obj)); -} - - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - return AssignEnvironment(DefineAsRegister( - new (zone()) LLoadFunctionPrototype(UseRegister(instr->function())))); -} - - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new (zone()) LLoadRoot); -} - - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK(instr->key()->representation().IsSmiOrInteger32()); - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LInstruction* result = NULL; - - if (!instr->is_fixed_typed_array()) { - LOperand* obj = NULL; - if (instr->representation().IsDouble()) { - obj = UseRegister(instr->elements()); - } else { - obj = UseRegisterAtStart(instr->elements()); - } - result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr)); - } else { - DCHECK((instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(elements_kind)) || - (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(elements_kind))); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - result = DefineAsRegister( - new (zone()) LLoadKeyed(backing_store, key, backing_store_owner)); - } - - bool needs_environment; - if (instr->is_fixed_typed_array()) { - // see LCodeGen::DoLoadKeyedExternalArray - needs_environment = elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32); - } else { - // see LCodeGen::DoLoadKeyedFixedDoubleArray and - // LCodeGen::DoLoadKeyedFixedArray - needs_environment = - instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub()); - } - - if (needs_environment) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - if (!instr->is_fixed_typed_array()) { - DCHECK(instr->elements()->representation().IsTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* object = NULL; - LOperand* key = NULL; - LOperand* val = NULL; - - if (instr->value()->representation().IsDouble()) { - object = UseRegisterAtStart(instr->elements()); - val = UseRegister(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } else { - if (needs_write_barrier) { - object = UseTempRegister(instr->elements()); - val = UseTempRegister(instr->value()); - key = UseTempRegister(instr->key()); - } else { - object = UseRegisterAtStart(instr->elements()); - val = UseRegisterAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } - } - - return new (zone()) LStoreKeyed(object, key, val, nullptr); - } - - DCHECK((instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); - DCHECK(instr->elements()->representation().IsExternal()); - LOperand* val = UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner); -} - - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LOperand* new_map_reg = TempRegister(); - LTransitionElementsKind* result = - new (zone()) LTransitionElementsKind(object, NULL, new_map_reg); - return result; - } else { - LOperand* object = UseFixed(instr->object(), r3); - LOperand* context = UseFixed(instr->context(), cp); - LTransitionElementsKind* result = - new (zone()) LTransitionElementsKind(object, context, NULL); - return MarkAsCall(result, instr); - } -} - - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LTrapAllocationMemento* result = - new (zone()) LTrapAllocationMemento(object, temp1, temp2); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = Use(instr->object()); - LOperand* elements = Use(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, r3); - return AssignPointerMap(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool is_in_object = instr->access().IsInobject(); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = - instr->has_transition() && instr->NeedsWriteBarrierForMap(); - - LOperand* obj; - if (needs_write_barrier) { - obj = is_in_object ? UseRegister(instr->object()) - : UseTempRegister(instr->object()); - } else { - obj = needs_write_barrier_for_map ? UseRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - } - - LOperand* val; - if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - } else if (instr->field_representation().IsDouble()) { - val = UseRegisterAtStart(instr->value()); - } else { - val = UseRegister(instr->value()); - } - - // We need a temporary register for write barrier of the map field. - LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; - - return new (zone()) LStoreNamedField(obj, val, temp); -} - - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r4); - LOperand* right = UseFixed(instr->right(), r3); - return MarkAsCall( - DefineFixed(new (zone()) LStringAdd(context, left, right), r3), instr); -} - - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseTempRegister(instr->string()); - LOperand* index = UseTempRegister(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new (zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new (zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = UseRegisterOrConstant(instr->size()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - if (instr->IsAllocationFolded()) { - LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new (zone()) LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new (zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk()->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kTooManySpillSlotsNeededForOSR); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseRegisterOrConstantAtStart(instr->length()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* value = UseFixed(instr->value(), r6); - LTypeof* result = new (zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, r3), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value())); -} - - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new (zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new (zone()) LStackCheck(context))); - } -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new (zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = - current_block_->last_environment()->DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - - return pop; -} - - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->enumerable(), r3); - LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment( - DefineAsRegister(new (zone()) LForInCacheArray(map))); -} - - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegisterAtStart(instr->map()); - return AssignEnvironment(new (zone()) LCheckMapValue(value, map)); -} - - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* index = UseTempRegister(instr->index()); - LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/ppc/lithium-ppc.h b/src/crankshaft/ppc/lithium-ppc.h deleted file mode 100644 index a504c67e48..0000000000 --- a/src/crankshaft/ppc/lithium-ppc.h +++ /dev/null @@ -1,2415 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_ -#define V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckNonSmi) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathCos) \ - V(MathSin) \ - V(MathExp) \ - V(MathFloorD) \ - V(MathFloorI) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRoundD) \ - V(MathRoundI) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(MultiplyAddD) \ - V(MultiplySubD) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArgument) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(RSubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { return H##type::cast(hydrogen_value()); } - - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) {} - - virtual ~LInstruction() {} - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { -// Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - -// Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall(); - } - - // Interface to the register allocator and iterators. - bool IsMarkedAsCall() const { return IsCall(); } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - -#ifdef DEBUG - void VerifyCall(); -#endif - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - - private: - // Iterator support. - friend class InputIterator; - - friend class TempIterator; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - class IsCallBits : public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int bit_field_; -}; - - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return R != 0 && result() != NULL; } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - protected: - EmbeddedContainer results_; -}; - - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const override { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new (zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) {} - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) {} - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - - private: - HBasicBlock* block_; -}; - - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - LLazyBailout() : gap_instructions_size_(0) {} - - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") - - void set_gap_instructions_size(int gap_instructions_size) { - gap_instructions_size_ = gap_instructions_size; - } - int gap_instructions_size() { return gap_instructions_size_; } - - private: - int gap_instructions_size_; -}; - - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -template -class LControlInstruction : public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) {} - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } - - Label* false_label_; - Label* true_label_; -}; - - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> { - public: - LWrapReceiver(LOperand* receiver, LOperand* function) { - inputs_[0] = receiver; - inputs_[1] = function; - } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } -}; - - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } -}; - - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LModByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModI final : public LTemplateInstruction<1, 2, 0> { - public: - LModI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivI final : public LTemplateInstruction<1, 2, 0> { - public: - LDivI(LOperand* dividend, LOperand* divisor) { - inputs_[0] = dividend; - inputs_[1] = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 1> { - public: - LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor) { - inputs_[0] = dividend; - inputs_[1] = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - - -class LMulI final : public LTemplateInstruction<1, 2, 0> { - public: - LMulI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -// Instruction for computing multiplier * multiplicand + addend. -class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> { - public: - LMultiplyAddD(LOperand* addend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = addend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* addend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") -}; - - -// Instruction for computing minuend - multiplier * multiplicand. -class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> { - public: - LMultiplySubD(LOperand* minuend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = minuend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* minuend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d") -}; - - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { return hydrogen()->representation().IsDouble(); } - - void PrintDataTo(StringStream* stream) override; -}; - -// Math.floor with a double result. -class LMathFloorD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloorD(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.floor with an integer result. -class LMathFloorI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloorI(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.round with a double result. -class LMathRoundD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathRoundD(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.round with an integer result. -class LMathRoundI final : public LTemplateInstruction<1, 1, 1> { - public: - LMathRoundI(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathFround final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFround(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - - -class LMathAbs final : public LTemplateInstruction<1, 2, 0> { - public: - LMathAbs(LOperand* context, LOperand* value) { - inputs_[1] = context; - inputs_[0] = value; - } - - LOperand* context() { return inputs_[1]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathLog final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathLog(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - - -class LMathClz32 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathClz32(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - -class LMathCos final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathCos(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - - -class LMathSin final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSin(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - - -class LMathExp final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathExp(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - - -class LMathSqrt final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSqrt(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - - -class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) -}; - - -class LCmpHoleAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 1> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) { - inputs_[0] = object; - inputs_[1] = prototype; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return hydrogen()->op(); } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) -}; - - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LRSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LRSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - - -class LConstantD final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - double value() const { return hydrogen()->DoubleValue(); } - uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); } -}; - - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - - -class LBranch final : public LControlInstruction<1, 0> { - public: - explicit LBranch(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LCmpMapAndBranch final : public LControlInstruction<1, 1> { - public: - LCmpMapAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> { - public: - LSeqStringGetChar(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; - } - - LOperand* string() const { return inputs_[0]; } - LOperand* index() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> { - public: - LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index, - LOperand* value) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - } - - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LMathMinMax final : public LTemplateInstruction<1, 2, 0> { - public: - LMathMinMax(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, LOperand* context, LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - Token::Value op() const { return op_; } - - Opcode opcode() const override { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - LOperand* value() { return inputs_[0]; } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - LOperand* parameter_count() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") -}; - - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; } - - LOperand* function() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) -}; - - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - - -class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = backing_store_owner; - } - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* backing_store_owner() { return inputs_[2]; } - ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreContextSlot(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LPushArgument final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LPushArgument(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) {} - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object) { - inputs_[0] = function; - inputs_[1] = code_object; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : descriptor_(descriptor), - inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - const CallInterfaceDescriptor descriptor() { return descriptor_; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - CallInterfaceDescriptor descriptor_; - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { inputs_[0] = context; } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - - -class LNumberTagI final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") -}; - - -class LNumberTagU final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - - -class LNumberTagD final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToI(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 2> { - public: - LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - bool needs_check() const { return needs_check_; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - private: - bool needs_check_; -}; - - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { - inputs_[0] = object; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; - - Representation representation() const { - return hydrogen()->field_representation(); - } -}; - - -class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> { - public: - LStoreKeyed(LOperand* object, LOperand* key, LOperand* value, - LOperand* backing_store_owner) { - inputs_[0] = object; - inputs_[1] = key; - inputs_[2] = value; - inputs_[3] = backing_store_owner; - } - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - LOperand* backing_store_owner() { return inputs_[3]; } - ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) - - void PrintDataTo(StringStream* stream) override; - bool NeedsCanonicalization() { - if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() || - hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) { - return false; - } - return hydrogen()->NeedsCanonicalization(); - } - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> { - public: - LTransitionElementsKind(LOperand* object, LOperand* context, - LOperand* new_map_temp) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = new_map_temp; - } - - LOperand* context() { return inputs_[1]; } - LOperand* object() { return inputs_[0]; } - LOperand* new_map_temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() { return hydrogen()->from_kind(); } - ElementsKind to_kind() { return hydrogen()->to_kind(); } -}; - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> { - public: - LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) { - inputs_[0] = object; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento") -}; - - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; } - - LOperand* view() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - - -class LCheckMaps final : public LTemplateInstruction<0, 1, 1> { - public: - explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - - -class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* unclamped, LOperand* temp) { - inputs_[0] = unclamped; - temps_[0] = temp; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") -}; - - -class LAllocate final : public LTemplateInstruction<1, 2, 2> { - public: - LAllocate(LOperand* context, LOperand* size, LOperand* temp1, - LOperand* temp2) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 2> { - public: - LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { - inputs_[0] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* size() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIsAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - LOsrEntry() {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { inputs_[0] = context; } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); } -}; - - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> { - public: - LCheckMapValue(LOperand* value, LOperand* map) { - inputs_[0] = value; - inputs_[1] = map; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {} - - int GetNextSpillIndex(RegisterKind kind); - LOperand* GetNextSpillSlot(RegisterKind kind); -}; - - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - -// Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); - LInstruction* DoMultiplySub(HValue* minuend, HMul* mul); - LInstruction* DoRSub(HSub* instr); - - static bool HasMagicNumberForDivisor(int32_t divisor); - - LInstruction* DoMathFloor(HUnaryMathOperation* instr); - LInstruction* DoMathRound(HUnaryMathOperation* instr); - LInstruction* DoMathFround(HUnaryMathOperation* instr); - LInstruction* DoMathAbs(HUnaryMathOperation* instr); - LInstruction* DoMathLog(HUnaryMathOperation* instr); - LInstruction* DoMathCos(HUnaryMathOperation* instr); - LInstruction* DoMathSin(HUnaryMathOperation* instr); - LInstruction* DoMathExp(HUnaryMathOperation* instr); - LInstruction* DoMathSqrt(HUnaryMathOperation* instr); - LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); - LInstruction* DoMathClz32(HUnaryMathOperation* instr); - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HDiv* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(DoubleRegister reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, - DoubleRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register that may be trashed. - MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - - // An input operand in a register or stack slot. - MUST_USE_RESULT LOperand* Use(HValue* value); - MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - - // An input operand in a register, stack slot or a constant operand. - MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // An input operand in a constant operand. - MUST_USE_RESULT LOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - MUST_USE_RESULT LOperand* UseAny(HValue* value) override; - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - MUST_USE_RESULT LUnallocated* TempDoubleRegister(); - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg); - LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, - DoubleRegister reg); - LInstruction* AssignEnvironment(LInstruction* instr); - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - // By default we assume that instruction sequences generated for calls - // cannot deoptimize eagerly and we do not attach environment to this - // instruction. - LInstruction* MarkAsCall( - LInstruction* instr, HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_ diff --git a/src/crankshaft/s390/OWNERS b/src/crankshaft/s390/OWNERS deleted file mode 100644 index 752e8e3d81..0000000000 --- a/src/crankshaft/s390/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -jyan@ca.ibm.com -dstence@us.ibm.com -joransiu@ca.ibm.com -mbrandy@us.ibm.com -michael_dawson@ca.ibm.com -bjaideep@ca.ibm.com diff --git a/src/crankshaft/s390/lithium-codegen-s390.cc b/src/crankshaft/s390/lithium-codegen-s390.cc deleted file mode 100644 index 06d3dbbdee..0000000000 --- a/src/crankshaft/s390/lithium-codegen-s390.cc +++ /dev/null @@ -1,5568 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/s390/lithium-codegen-s390.h" - -#include "src/base/bits.h" -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/crankshaft/s390/lithium-gap-resolver-s390.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" - -namespace v8 { -namespace internal { - -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {} - virtual ~SafepointGenerator() {} - - void BeforeCall(int call_size) const override {} - - void AfterCall() const override { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - -LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope( - LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->info()->is_calling()); - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - StoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->CallStub(&stub); -} - -LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - RestoreRegistersStateStub stub(codegen_->isolate()); - codegen_->masm_->CallStub(&stub); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; -} - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // NONE indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::NONE); - - return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && - GenerateJumpTable() && GenerateSafepointTable(); -} - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); -} - -void LCodeGen::SaveCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ StoreDouble(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - -void LCodeGen::RestoreCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Restore clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ LoadDouble(DoubleRegister::from_code(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - - // r3: Callee's JS function. - // cp: Callee's context. - // fp: Caller's frame pointer. - // lr: Caller's pc. - // ip: Our own function entry (required by the prologue) - } - - int prologue_offset = masm_->pc_offset(); - - if (prologue_offset) { - // Prologue logic requires its starting address in ip and the - // corresponding offset from the function entry. Need to add - // 4 bytes for the size of AHI/AGHI that AddP expands into. - prologue_offset += sizeof(FourByteInstr); - __ AddP(ip, ip, Operand(prologue_offset)); - } - info()->set_prologue_offset(prologue_offset); - if (NeedsEagerFrame()) { - if (info()->IsStub()) { - __ StubPrologue(StackFrame::STUB, ip, prologue_offset); - } else { - __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset); - } - frame_is_built_ = true; - } - - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - if (slots > 0) { - __ lay(sp, MemOperand(sp, -(slots * kPointerSize))); - if (FLAG_debug_code) { - __ Push(r2, r3); - __ mov(r2, Operand(slots * kPointerSize)); - __ mov(r3, Operand(kSlotsZapValue)); - Label loop; - __ bind(&loop); - __ StoreP(r3, MemOperand(sp, r2, kPointerSize)); - __ lay(r2, MemOperand(r2, -kPointerSize)); - __ CmpP(r2, Operand::Zero()); - __ bne(&loop); - __ Pop(r2, r3); - } - } - - if (info()->saves_caller_doubles()) { - SaveCallerDoubles(); - } - return !is_aborted(); -} - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Possibly allocate a local context. - if (info()->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is in r3. - int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ push(r3); - __ Push(info()->scope()->scope_info()); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), - Operand(slots)); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of the FastNewFunctionContext builtin is always in new space. - need_write_barrier = false; - } else { - __ push(r3); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - - // Context is returned in both r2 and cp. It replaces the context - // passed to us. It's saved in the stack and kept live in cp. - __ LoadRR(cp, r2); - __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset)); - // Copy any necessary parameters into the context. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ LoadP(r2, MemOperand(fp, parameter_offset)); - // Store it in the context. - MemOperand target = ContextMemOperand(cp, var->index()); - __ StoreP(r2, target); - // Update the write barrier. This clobbers r5 and r2. - if (need_write_barrier) { - __ RecordWriteContextSlot(cp, target.offset(), r2, r5, - GetLinkRegisterState(), kSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(cp, r2, &done); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } -} - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment( - ";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - __ bind(code->entry()); - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - __ Load(scratch0(), - Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ PushCommonFrame(scratch0()); - Comment(";;; Deferred code"); - } - code->Generate(); - if (NeedsDeferredFrame()) { - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - __ PopCommonFrame(scratch0()); - frame_is_built_ = false; - } - __ b(code->exit()); - } - } - - return !is_aborted(); -} - -bool LCodeGen::GenerateJumpTable() { - // Check that the jump table is accessible from everywhere in the function - // code, i.e. that offsets in halfworld to the table can be encoded in the - // 32-bit signed immediate of a branch instruction. - // To simplify we consider the code size from the first instruction to the - // end of the jump table. We also don't consider the pc load delta. - // Each entry in the jump table generates one instruction and inlines one - // 32bit data after it. - // TODO(joransiu): The Int24 condition can likely be relaxed for S390 - if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) { - Abort(kGeneratedCodeIsTooLarge); - } - - if (jump_table_.length() > 0) { - Label needs_frame, call_deopt_entry; - - Comment(";;; -------------------- Jump table --------------------"); - Address base = jump_table_[0].address; - - Register entry_offset = scratch0(); - - int length = jump_table_.length(); - for (int i = 0; i < length; i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); - - DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type); - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - - // Second-level deopt table entries are contiguous and small, so instead - // of loading the full, absolute address of each one, load an immediate - // offset which will be added to the base address later. - __ mov(entry_offset, Operand(entry - base)); - - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - Comment(";;; call deopt with frame"); - __ PushCommonFrame(); - __ b(r14, &needs_frame); - } else { - __ b(r14, &call_deopt_entry); - } - } - - if (needs_frame.is_linked()) { - __ bind(&needs_frame); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - DCHECK(info()->IsStub()); - __ Load(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB))); - __ push(ip); - DCHECK(info()->IsStub()); - } - - Comment(";;; call deopt"); - __ bind(&call_deopt_entry); - - if (info()->saves_caller_doubles()) { - DCHECK(info()->IsStub()); - RestoreCallerDoubles(); - } - - // Add the base address to the offset previously loaded in entry_offset. - __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); - __ AddP(ip, entry_offset, ip); - __ Jump(ip); - } - - // The deoptimization jump table is the last part of the instruction - // sequence. Mark the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - -Register LCodeGen::ToRegister(int code) const { - return Register::from_code(code); -} - -DoubleRegister LCodeGen::ToDoubleRegister(int code) const { - return DoubleRegister::from_code(code); -} - -Register LCodeGen::ToRegister(LOperand* op) const { - DCHECK(op->IsRegister()); - return ToRegister(op->index()); -} - -Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { - if (op->IsRegister()) { - return ToRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk_->LookupConstant(const_op); - Handle literal = constant->handle(isolate()); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - AllowDeferredHandleDereference get_number; - DCHECK(literal->IsNumber()); - __ LoadIntLiteral(scratch, static_cast(literal->Number())); - } else if (r.IsDouble()) { - Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); - } else { - DCHECK(r.IsSmiOrTagged()); - __ Move(scratch, literal); - } - return scratch; - } else if (op->IsStackSlot()) { - __ LoadP(scratch, ToMemOperand(op)); - return scratch; - } - UNREACHABLE(); -} - -void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op, - Register dst) { - DCHECK(IsInteger32(const_op)); - HConstant* constant = chunk_->LookupConstant(const_op); - int32_t value = constant->Integer32Value(); - if (IsSmi(const_op)) { - __ LoadSmiLiteral(dst, Smi::FromInt(value)); - } else { - __ LoadIntLiteral(dst, value); - } -} - -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - DCHECK(op->IsDoubleRegister()); - return ToDoubleRegister(op->index()); -} - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - -bool LCodeGen::IsInteger32(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - -bool LCodeGen::IsSmi(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - return ToRepresentation(op, Representation::Integer32()); -} - -intptr_t LCodeGen::ToRepresentation(LConstantOperand* op, - const Representation& r) const { - HConstant* constant = chunk_->LookupConstant(op); - int32_t value = constant->Integer32Value(); - if (r.IsInteger32()) return value; - DCHECK(r.IsSmiOrTagged()); - return reinterpret_cast(Smi::FromInt(value)); -} - -Smi* LCodeGen::ToSmi(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - return Smi::FromInt(constant->Integer32Value()); -} - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - -Operand LCodeGen::ToOperand(LOperand* op) { - if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - HConstant* constant = chunk()->LookupConstant(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsSmi()) { - DCHECK(constant->HasSmiValue()); - return Operand(Smi::FromInt(constant->Integer32Value())); - } else if (r.IsInteger32()) { - DCHECK(constant->HasInteger32Value()); - return Operand(constant->Integer32Value()); - } else if (r.IsDouble()) { - Abort(kToOperandUnsupportedDoubleImmediate); - } - DCHECK(r.IsTagged()); - return Operand(constant->handle(isolate())); - } else if (op->IsRegister()) { - return Operand(ToRegister(op)); - } else if (op->IsDoubleRegister()) { - Abort(kToOperandIsDoubleRegisterUnimplemented); - return Operand::Zero(); - } - // Stack slots not implemented, use ToMemOperand instead. - UNREACHABLE(); -} - -static int ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize; -} - -MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index())); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); - } -} - -MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { - DCHECK(op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return MemOperand(sp, - ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); - } -} - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation( - environment, translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); - } -} - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, LOperand* op, - bool is_tagged, bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, translation, value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - DoubleRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - -void LCodeGen::CallCode(Handle code, RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); -} - -void LCodeGen::CallCodeGeneric(Handle code, RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode) { - DCHECK(instr != NULL); - __ Call(code, mode); - RecordSafepointWithLazyDeopt(instr, safepoint_mode); - - // Signal that we don't inline smi code before these stubs in the - // optimizing code generator. - if (code->kind() == Code::COMPARE_IC) { - __ nop(); - } -} - -void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr, SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - - __ CallRuntime(function, num_arguments, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); -} - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - __ Move(cp, ToRegister(context)); - } else if (context->IsStackSlot()) { - __ LoadP(cp, ToMemOperand(context)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ Move(cp, Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, - LInstruction* instr, LOperand* context) { - LoadContextFromDeferred(context); - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters(instr->pointer_map(), argc, - Safepoint::kNoLazyDeopt); -} - -void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, zone()); - } -} - -void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type, - CRegister cr) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - return; - } - - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { - Register scratch = scratch0(); - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - Label no_deopt; - - // Store the condition on the stack if necessary - if (cond != al) { - Label done; - __ LoadImmP(scratch, Operand::Zero()); - __ b(NegateCondition(cond), &done, Label::kNear); - __ LoadImmP(scratch, Operand(1)); - __ bind(&done); - __ push(scratch); - } - - Label done; - __ Push(r3); - __ mov(scratch, Operand(count)); - __ LoadW(r3, MemOperand(scratch)); - __ Sub32(r3, r3, Operand(1)); - __ Cmp32(r3, Operand::Zero()); - __ bne(&no_deopt, Label::kNear); - - __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times)); - __ StoreW(r3, MemOperand(scratch)); - __ Pop(r3); - - if (cond != al) { - // Clean up the stack before the deoptimizer call - __ pop(scratch); - } - - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - - __ b(&done); - - __ bind(&no_deopt); - __ StoreW(r3, MemOperand(scratch)); - __ Pop(r3); - - if (cond != al) { - // Clean up the stack before the deoptimizer call - __ pop(scratch); - } - - __ bind(&done); - - if (cond != al) { - cond = ne; - __ CmpP(scratch, Operand::Zero()); - } - } - - if (info()->ShouldTrapOnDeopt()) { - __ stop("trap_on_deopt", cond, kDefaultStopCode, cr); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - // Go through jump table if we need to handle condition, build frame, or - // restore caller doubles. - if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) { - __ Call(entry, RelocInfo::RUNTIME_ENTRY); - } else { - Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, - !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - __ b(cond, &jump_table_.last().label /*, cr*/); - } -} - -void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, - DeoptimizeReason deopt_reason, CRegister cr) { - Deoptimizer::BailoutType bailout_type = - info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr); -} - -void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - RecordSafepointWithRegisters(instr->pointer_map(), 0, - Safepoint::kLazyDeopt); - } -} - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, - int arguments, Safepoint::DeoptMode deopt_mode) { - DCHECK(expected_safepoint_kind_ == kind); - - const ZoneList* operands = pointers->GetNormalizedOperands(); - Safepoint safepoint = - safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); -} - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, deopt_mode); -} - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); -} - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, label->hydrogen_value()->id(), - label->block_id(), LabelType(label)); - __ bind(label->label()); - current_block_ = label->block_id(); - DoGap(label); -} - -void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); } - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } -} - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); } - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t shift = WhichPowerOf2Abs(divisor); - Label dividend_is_not_negative, done; - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ CmpP(dividend, Operand::Zero()); - __ bge(÷nd_is_not_negative, Label::kNear); - if (shift) { - // Note that this is correct even for kMinInt operands. - __ LoadComplementRR(dividend, dividend); - __ ExtractBitRange(dividend, dividend, shift - 1, 0); - __ LoadComplementRR(dividend, dividend); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ mov(dividend, Operand::Zero()); - } else { - DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero); - } - __ b(&done, Label::kNear); - } - - __ bind(÷nd_is_not_negative); - if (shift) { - __ ExtractBitRange(dividend, dividend, shift - 1, 0); - } else { - __ mov(dividend, Operand::Zero()); - } - __ bind(&done); -} - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - __ mov(ip, Operand(Abs(divisor))); - __ Mul(result, result, ip); - __ SubP(result, dividend, result /*, LeaveOE, SetRC*/); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ bne(&remainder_not_zero, Label::kNear /*, cr0*/); - __ Cmp32(dividend, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(&remainder_not_zero); - } -} - -void LCodeGen::DoModI(LModI* instr) { - HMod* hmod = instr->hydrogen(); - Register left_reg = ToRegister(instr->left()); - Register right_reg = ToRegister(instr->right()); - Register result_reg = ToRegister(instr->result()); - Label done; - - // Check for x % 0. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - __ Cmp32(right_reg, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for kMinInt % -1, dr will return undefined, which is not what we - // want. We have to deopt if we care about -0, because we can't return that. - if (hmod->CheckFlag(HValue::kCanOverflow)) { - Label no_overflow_possible; - __ Cmp32(left_reg, Operand(kMinInt)); - __ bne(&no_overflow_possible, Label::kNear); - __ Cmp32(right_reg, Operand(-1)); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } else { - __ b(ne, &no_overflow_possible, Label::kNear); - __ mov(result_reg, Operand::Zero()); - __ b(&done, Label::kNear); - } - __ bind(&no_overflow_possible); - } - - // Divide instruction dr will implicity use register pair - // r0 & r1 below. - DCHECK(!left_reg.is(r1)); - DCHECK(!right_reg.is(r1)); - DCHECK(!result_reg.is(r1)); - __ LoadRR(r0, left_reg); - __ srda(r0, Operand(32)); - __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder - - __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg - - // If we care about -0, test if the dividend is <0 and the result is 0. - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ bne(&done, Label::kNear); - __ Cmp32(left_reg, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - - __ bind(&done); -} - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ Cmp32(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - __ Cmp32(dividend, Operand(0x80000000)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } - - int32_t shift = WhichPowerOf2Abs(divisor); - - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { - __ TestBitRange(dividend, shift - 1, 0, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0); - } - - if (divisor == -1) { // Nice shortcut, not needed for correctness. - __ LoadComplementRR(result, dividend); - return; - } - if (shift == 0) { - __ LoadRR(result, dividend); - } else { - if (shift == 1) { - __ ShiftRight(result, dividend, Operand(31)); - } else { - __ ShiftRightArith(result, dividend, Operand(31)); - __ ShiftRight(result, result, Operand(32 - shift)); - } - __ AddP(result, dividend, result); - __ ShiftRightArith(result, result, Operand(shift)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - } - if (divisor < 0) __ LoadComplementRR(result, result); -} - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ Cmp32(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ LoadComplementRR(result, result); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - Register scratch = scratch0(); - __ mov(ip, Operand(divisor)); - __ Mul(scratch, result, ip); - __ Cmp32(scratch, dividend); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); - } -} - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - const Register dividend = ToRegister(instr->dividend()); - const Register divisor = ToRegister(instr->divisor()); - Register result = ToRegister(instr->result()); - - DCHECK(!dividend.is(result)); - DCHECK(!divisor.is(result)); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ Cmp32(divisor, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ Cmp32(dividend, Operand::Zero()); - __ bne(÷nd_not_zero, Label::kNear); - __ Cmp32(divisor, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - Label dividend_not_min_int; - __ Cmp32(dividend, Operand(kMinInt)); - __ bne(÷nd_not_min_int, Label::kNear); - __ Cmp32(divisor, Operand(-1)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - __ bind(÷nd_not_min_int); - } - - __ LoadRR(r0, dividend); - __ srda(r0, Operand(32)); - __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient - - __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - // Deoptimize if remainder is not 0. - __ Cmp32(r0, Operand::Zero()); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); - } -} - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register result = ToRegister(instr->result()); - int32_t divisor = instr->divisor(); - bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt); - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - int32_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 0) { - if (shift || !result.is(dividend)) { - __ ShiftRightArith(result, dividend, Operand(shift)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - } - return; - } - -// If the divisor is negative, we have to negate and handle edge cases. -#if V8_TARGET_ARCH_S390X - if (divisor == -1 && can_overflow) { - __ Cmp32(dividend, Operand(0x80000000)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } -#endif - - __ LoadComplementRR(result, dividend); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0); - } - -// If the negation could not overflow, simply shifting is OK. -#if !V8_TARGET_ARCH_S390X - if (!can_overflow) { -#endif - if (shift) { - __ ShiftRightArithP(result, result, Operand(shift)); - } - return; -#if !V8_TARGET_ARCH_S390X - } - - // Dividing by -1 is basically negation, unless we overflow. - if (divisor == -1) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); - return; - } - - Label overflow_label, done; - __ b(overflow, &overflow_label, Label::kNear); - __ ShiftRightArith(result, result, Operand(shift)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - __ b(&done, Label::kNear); - __ bind(&overflow_label); - __ mov(result, Operand(kMinInt / divisor)); - __ bind(&done); -#endif -} - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(!dividend.is(result)); - - if (divisor == 0) { - DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ Cmp32(dividend, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ LoadComplementRR(result, result); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister(instr->temp()); - DCHECK(!temp.is(dividend) && !temp.is(result)); - Label needs_adjustment, done; - __ Cmp32(dividend, Operand::Zero()); - __ b(divisor > 0 ? lt : gt, &needs_adjustment); - __ TruncatingDiv(result, dividend, Abs(divisor)); - if (divisor < 0) __ LoadComplementRR(result, result); - __ b(&done, Label::kNear); - __ bind(&needs_adjustment); - __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1)); - __ TruncatingDiv(result, temp, Abs(divisor)); - if (divisor < 0) __ LoadComplementRR(result, result); - __ SubP(result, result, Operand(1)); - __ bind(&done); -} - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - const Register dividend = ToRegister(instr->dividend()); - const Register divisor = ToRegister(instr->divisor()); - Register result = ToRegister(instr->result()); - - DCHECK(!dividend.is(result)); - DCHECK(!divisor.is(result)); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ Cmp32(divisor, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ Cmp32(dividend, Operand::Zero()); - __ bne(÷nd_not_zero, Label::kNear); - __ Cmp32(divisor, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - Label no_overflow_possible; - __ Cmp32(dividend, Operand(kMinInt)); - __ bne(&no_overflow_possible, Label::kNear); - __ Cmp32(divisor, Operand(-1)); - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); - } else { - __ bne(&no_overflow_possible, Label::kNear); - __ LoadRR(result, dividend); - } - __ bind(&no_overflow_possible); - } - - __ LoadRR(r0, dividend); - __ srda(r0, Operand(32)); - __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient - - __ lr(result, r1); // Move quotient to result register - - Label done; - Register scratch = scratch0(); - // If both operands have the same sign then we are done. - __ Xor(scratch, dividend, divisor); - __ ltr(scratch, scratch); // use 32 bit version LoadAndTestRR even in 64 bit - __ bge(&done, Label::kNear); - - // If there is no remainder then we are done. - if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) { - __ msrkc(scratch, result, divisor); - } else { - __ lr(scratch, result); - __ msr(scratch, divisor); - } - __ Cmp32(dividend, scratch); - __ beq(&done, Label::kNear); - - // We performed a truncating division. Correct the result. - __ Sub32(result, result, Operand(1)); - __ bind(&done); -} - -void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { - DoubleRegister addend = ToDoubleRegister(instr->addend()); - DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); - DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - DoubleRegister result = ToDoubleRegister(instr->result()); - - // Unable to use madbr as the intermediate value is not rounded - // to proper precision - __ ldr(result, multiplier); - __ mdbr(result, multiplicand); - __ adbr(result, addend); -} - -void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { - DoubleRegister minuend = ToDoubleRegister(instr->minuend()); - DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); - DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - DoubleRegister result = ToDoubleRegister(instr->result()); - - // Unable to use msdbr as the intermediate value is not rounded - // to proper precision - __ ldr(result, multiplier); - __ mdbr(result, multiplicand); - __ sdbr(result, minuend); -} - -void LCodeGen::DoMulI(LMulI* instr) { - Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - // Note that result may alias left. - Register left = ToRegister(instr->left()); - LOperand* right_op = instr->right(); - - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - - if (right_op->IsConstantOperand()) { - int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); - - if (bailout_on_minus_zero && (constant < 0)) { - // The case of a null constant will be handled separately. - // If constant is negative and left is null, the result should be -0. - __ CmpP(left, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - - switch (constant) { - case -1: - if (can_overflow) { -#if V8_TARGET_ARCH_S390X - if (instr->hydrogen()->representation().IsSmi()) { -#endif - __ LoadComplementRR(result, left); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); -#if V8_TARGET_ARCH_S390X - } else { - __ LoadComplementRR(result, left); - __ TestIfInt32(result); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - } -#endif - } else { - __ LoadComplementRR(result, left); - } - break; - case 0: - if (bailout_on_minus_zero) { -// If left is strictly negative and the constant is null, the -// result is -0. Deoptimize if required, otherwise return 0. -#if V8_TARGET_ARCH_S390X - if (instr->hydrogen()->representation().IsSmi()) { -#endif - __ Cmp32(left, Operand::Zero()); -#if V8_TARGET_ARCH_S390X - } else { - __ Cmp32(left, Operand::Zero()); - } -#endif - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - __ LoadImmP(result, Operand::Zero()); - break; - case 1: - __ Move(result, left); - break; - default: - // Multiplying by powers of two and powers of two plus or minus - // one can be done faster with shifted operands. - // For other constants we emit standard code. - int32_t mask = constant >> 31; - uint32_t constant_abs = (constant + mask) ^ mask; - - if (base::bits::IsPowerOfTwo32(constant_abs)) { - int32_t shift = WhichPowerOf2(constant_abs); - __ ShiftLeftP(result, left, Operand(shift)); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ LoadComplementRR(result, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { - int32_t shift = WhichPowerOf2(constant_abs - 1); - __ ShiftLeftP(scratch, left, Operand(shift)); - __ AddP(result, scratch, left); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ LoadComplementRR(result, result); - } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { - int32_t shift = WhichPowerOf2(constant_abs + 1); - __ ShiftLeftP(scratch, left, Operand(shift)); - __ SubP(result, scratch, left); - // Correct the sign of the result if the constant is negative. - if (constant < 0) __ LoadComplementRR(result, result); - } else { - // Generate standard code. - __ Move(result, left); - __ MulP(result, Operand(constant)); - } - } - - } else { - DCHECK(right_op->IsRegister()); - Register right = ToRegister(right_op); - - if (can_overflow) { - if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) { - // result = left * right. - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(scratch, right); - __ MulPWithCondition(result, left, scratch); - } else { - __ msrkc(result, left, right); - __ LoadW(result, result); - } - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } else { -#if V8_TARGET_ARCH_S390X - // result = left * right. - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ SmiUntag(scratch, right); - __ msgr(result, scratch); - } else { - __ LoadRR(result, left); - __ msgr(result, right); - } - __ TestIfInt32(result); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiTag(result); - } -#else - // r0:scratch = scratch * right - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ lgfr(result, result); - __ msgfr(result, right); - } else { - // r0:scratch = scratch * right - __ lgfr(result, left); - __ msgfr(result, right); - } - __ TestIfInt32(result); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); -#endif - } - } else { - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ Mul(result, result, right); - } else { - __ Mul(result, left, right); - } - } - - if (bailout_on_minus_zero) { - Label done; -#if V8_TARGET_ARCH_S390X - if (instr->hydrogen()->representation().IsSmi()) { -#endif - __ XorP(r0, left, right); - __ LoadAndTestRR(r0, r0); - __ bge(&done, Label::kNear); -#if V8_TARGET_ARCH_S390X - } else { - __ XorP(r0, left, right); - __ Cmp32(r0, Operand::Zero()); - __ bge(&done, Label::kNear); - } -#endif - // Bail out if the result is minus zero. - __ CmpP(result, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } -} - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left_op = instr->left(); - LOperand* right_op = instr->right(); - DCHECK(left_op->IsRegister()); - Register left = ToRegister(left_op); - Register result = ToRegister(instr->result()); - - if (right_op->IsConstantOperand()) { - switch (instr->op()) { - case Token::BIT_AND: - __ AndP(result, left, Operand(ToOperand(right_op))); - break; - case Token::BIT_OR: - __ OrP(result, left, Operand(ToOperand(right_op))); - break; - case Token::BIT_XOR: - __ XorP(result, left, Operand(ToOperand(right_op))); - break; - default: - UNREACHABLE(); - break; - } - } else if (right_op->IsStackSlot()) { - // Reg-Mem instruction clobbers, so copy src to dst first. - if (!left.is(result)) __ LoadRR(result, left); - switch (instr->op()) { - case Token::BIT_AND: - __ AndP(result, ToMemOperand(right_op)); - break; - case Token::BIT_OR: - __ OrP(result, ToMemOperand(right_op)); - break; - case Token::BIT_XOR: - __ XorP(result, ToMemOperand(right_op)); - break; - default: - UNREACHABLE(); - break; - } - } else { - DCHECK(right_op->IsRegister()); - - switch (instr->op()) { - case Token::BIT_AND: - __ AndP(result, left, ToRegister(right_op)); - break; - case Token::BIT_OR: - __ OrP(result, left, ToRegister(right_op)); - break; - case Token::BIT_XOR: - __ XorP(result, left, ToRegister(right_op)); - break; - default: - UNREACHABLE(); - break; - } - } -} - -void LCodeGen::DoShiftI(LShiftI* instr) { - // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so - // result may alias either of them. - LOperand* right_op = instr->right(); - Register left = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - if (right_op->IsRegister()) { - // Mask the right_op operand. - __ AndP(scratch, ToRegister(right_op), Operand(0x1F)); - switch (instr->op()) { - case Token::ROR: - // rotate_right(a, b) == rotate_left(a, 32 - b) - __ LoadComplementRR(scratch, scratch); - __ rll(result, left, scratch, Operand(32)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - break; - case Token::SAR: - __ ShiftRightArith(result, left, scratch); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - break; - case Token::SHR: - __ ShiftRight(result, left, scratch); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - if (instr->can_deopt()) { -#if V8_TARGET_ARCH_S390X - __ ltgfr(result, result /*, SetRC*/); -#else - __ ltr(result, result); // Set the <,==,> condition -#endif - DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0); - } - break; - case Token::SHL: - __ ShiftLeft(result, left, scratch); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - break; - default: - UNREACHABLE(); - break; - } - } else { - // Mask the right_op operand. - int value = ToInteger32(LConstantOperand::cast(right_op)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::ROR: - if (shift_count != 0) { - __ rll(result, left, Operand(32 - shift_count)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - } else { - __ Move(result, left); - } - break; - case Token::SAR: - if (shift_count != 0) { - __ ShiftRightArith(result, left, Operand(shift_count)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - } else { - __ Move(result, left); - } - break; - case Token::SHR: - if (shift_count != 0) { - __ ShiftRight(result, left, Operand(shift_count)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - } else { - if (instr->can_deopt()) { - __ Cmp32(left, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue); - } - __ Move(result, left); - } - break; - case Token::SHL: - if (shift_count != 0) { -#if V8_TARGET_ARCH_S390X - if (instr->hydrogen_value()->representation().IsSmi()) { - __ ShiftLeftP(result, left, Operand(shift_count)); -#else - if (instr->hydrogen_value()->representation().IsSmi() && - instr->can_deopt()) { - if (shift_count != 1) { - __ ShiftLeft(result, left, Operand(shift_count - 1)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - __ SmiTagCheckOverflow(result, result, scratch); - } else { - __ SmiTagCheckOverflow(result, left, scratch); - } - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); -#endif - } else { - __ ShiftLeft(result, left, Operand(shift_count)); -#if V8_TARGET_ARCH_S390X - __ lgfr(result, result); -#endif - } - } else { - __ Move(result, left); - } - break; - default: - UNREACHABLE(); - break; - } - } -} - -void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - - bool isInteger = !(instr->hydrogen()->representation().IsSmi() || - instr->hydrogen()->representation().IsExternal()); - -#if V8_TARGET_ARCH_S390X - // The overflow detection needs to be tested on the lower 32-bits. - // As a result, on 64-bit, we need to force 32-bit arithmetic operations - // to set the CC overflow bit properly. The result is then sign-extended. - bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); -#else - bool checkOverflow = true; -#endif - - if (right->IsConstantOperand()) { - if (!isInteger || !checkOverflow) { - __ SubP(ToRegister(result), ToRegister(left), ToOperand(right)); - } else { - // -(MinInt) will overflow - if (ToInteger32(LConstantOperand::cast(right)) == kMinInt) { - __ Load(scratch0(), ToOperand(right)); - __ Sub32(ToRegister(result), ToRegister(left), scratch0()); - } else { - __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right)); - } - } - } else if (right->IsRegister()) { - if (!isInteger) - __ SubP(ToRegister(result), ToRegister(left), ToRegister(right)); - else if (!checkOverflow) - __ SubP_ExtendSrc(ToRegister(result), ToRegister(left), - ToRegister(right)); - else - __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right)); - } else { - if (!left->Equals(instr->result())) - __ LoadRR(ToRegister(result), ToRegister(left)); - - MemOperand mem = ToMemOperand(right); - if (!isInteger) { - __ SubP(ToRegister(result), mem); - } else { -#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN - // We want to read the 32-bits directly from memory - MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4); -#else - MemOperand Upper32Mem = ToMemOperand(right); -#endif - if (checkOverflow) { - __ Sub32(ToRegister(result), Upper32Mem); - } else { - __ SubP_ExtendSrc(ToRegister(result), Upper32Mem); - } - } - } - -#if V8_TARGET_ARCH_S390X - if (isInteger && checkOverflow) - __ lgfr(ToRegister(result), ToRegister(result)); -#endif - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } -} - -void LCodeGen::DoConstantI(LConstantI* instr) { - Register dst = ToRegister(instr->result()); - if (instr->value() == 0) - __ XorP(dst, dst); - else - __ Load(dst, Operand(instr->value())); -} - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ LoadSmiLiteral(ToRegister(instr->result()), instr->value()); -} - -void LCodeGen::DoConstantD(LConstantD* instr) { - DCHECK(instr->result()->IsDoubleRegister()); - DoubleRegister result = ToDoubleRegister(instr->result()); - uint64_t bits = instr->bits(); - __ LoadDoubleLiteral(result, bits, scratch0()); -} - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ mov(ToRegister(instr->result()), Operand(instr->value())); -} - -void LCodeGen::DoConstantT(LConstantT* instr) { - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ Move(ToRegister(instr->result()), object); -} - -MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToInteger32(LConstantOperand::cast(index)); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldMemOperand(string, SeqString::kHeaderSize + offset); - } - Register scratch = scratch0(); - DCHECK(!scratch.is(string)); - DCHECK(!scratch.is(ToRegister(index))); - // TODO(joransiu) : Fold Add into FieldMemOperand - if (encoding == String::ONE_BYTE_ENCODING) { - __ AddP(scratch, string, ToRegister(index)); - } else { - STATIC_ASSERT(kUC16Size == 2); - __ ShiftLeftP(scratch, ToRegister(index), Operand(1)); - __ AddP(scratch, string, scratch); - } - return FieldMemOperand(scratch, SeqString::kHeaderSize); -} - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - if (FLAG_debug_code) { - Register scratch = scratch0(); - __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); - __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - __ AndP(scratch, scratch, - Operand(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ CmpP(scratch, - Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type - : two_byte_seq_type)); - __ Check(eq, kUnexpectedStringType); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ llc(result, operand); - } else { - __ llh(result, operand); - } -} - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - Register value = ToRegister(instr->value()); - - if (FLAG_debug_code) { - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type - : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); - } - - MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ stc(value, operand); - } else { - __ sth(value, operand); - } -} - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool isInteger = !(instr->hydrogen()->representation().IsSmi() || - instr->hydrogen()->representation().IsExternal()); -#if V8_TARGET_ARCH_S390X - // The overflow detection needs to be tested on the lower 32-bits. - // As a result, on 64-bit, we need to force 32-bit arithmetic operations - // to set the CC overflow bit properly. The result is then sign-extended. - bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); -#else - bool checkOverflow = true; -#endif - - if (right->IsConstantOperand()) { - if (!isInteger || !checkOverflow) - __ AddP(ToRegister(result), ToRegister(left), ToOperand(right)); - else - __ Add32(ToRegister(result), ToRegister(left), ToOperand(right)); - } else if (right->IsRegister()) { - if (!isInteger) - __ AddP(ToRegister(result), ToRegister(left), ToRegister(right)); - else if (!checkOverflow) - __ AddP_ExtendSrc(ToRegister(result), ToRegister(left), - ToRegister(right)); - else - __ Add32(ToRegister(result), ToRegister(left), ToRegister(right)); - } else { - if (!left->Equals(instr->result())) - __ LoadRR(ToRegister(result), ToRegister(left)); - - MemOperand mem = ToMemOperand(right); - if (!isInteger) { - __ AddP(ToRegister(result), mem); - } else { -#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN - // We want to read the 32-bits directly from memory - MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4); -#else - MemOperand Upper32Mem = ToMemOperand(right); -#endif - if (checkOverflow) { - __ Add32(ToRegister(result), Upper32Mem); - } else { - __ AddP_ExtendSrc(ToRegister(result), Upper32Mem); - } - } - } - -#if V8_TARGET_ARCH_S390X - if (isInteger && checkOverflow) - __ lgfr(ToRegister(result), ToRegister(result)); -#endif - // Doptimize on overflow - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } -} - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - HMathMinMax::Operation operation = instr->hydrogen()->operation(); - Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; - if (instr->hydrogen()->representation().IsSmiOrInteger32()) { - Register left_reg = ToRegister(left); - Register right_reg = EmitLoadRegister(right, ip); - Register result_reg = ToRegister(instr->result()); - Label return_left, done; -#if V8_TARGET_ARCH_S390X - if (instr->hydrogen_value()->representation().IsSmi()) { -#endif - __ CmpP(left_reg, right_reg); -#if V8_TARGET_ARCH_S390X - } else { - __ Cmp32(left_reg, right_reg); - } -#endif - __ b(cond, &return_left, Label::kNear); - __ Move(result_reg, right_reg); - __ b(&done, Label::kNear); - __ bind(&return_left); - __ Move(result_reg, left_reg); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - DoubleRegister left_reg = ToDoubleRegister(left); - DoubleRegister right_reg = ToDoubleRegister(right); - DoubleRegister result_reg = ToDoubleRegister(instr->result()); - Label check_nan_left, check_zero, return_left, return_right, done; - __ cdbr(left_reg, right_reg); - __ bunordered(&check_nan_left, Label::kNear); - __ beq(&check_zero); - __ b(cond, &return_left, Label::kNear); - __ b(&return_right, Label::kNear); - - __ bind(&check_zero); - __ lzdr(kDoubleRegZero); - __ cdbr(left_reg, kDoubleRegZero); - __ bne(&return_left, Label::kNear); // left == right != 0. - - // At this point, both left and right are either 0 or -0. - // N.B. The following works because +0 + -0 == +0 - if (operation == HMathMinMax::kMathMin) { - // For min we want logical-or of sign bit: -(-L + -R) - __ lcdbr(left_reg, left_reg); - __ ldr(result_reg, left_reg); - if (left_reg.is(right_reg)) { - __ adbr(result_reg, right_reg); - } else { - __ sdbr(result_reg, right_reg); - } - __ lcdbr(result_reg, result_reg); - } else { - // For max we want logical-and of sign bit: (L + R) - __ ldr(result_reg, left_reg); - __ adbr(result_reg, right_reg); - } - __ b(&done, Label::kNear); - - __ bind(&check_nan_left); - __ cdbr(left_reg, left_reg); - __ bunordered(&return_left, Label::kNear); // left == NaN. - - __ bind(&return_right); - if (!right_reg.is(result_reg)) { - __ ldr(result_reg, right_reg); - } - __ b(&done, Label::kNear); - - __ bind(&return_left); - if (!left_reg.is(result_reg)) { - __ ldr(result_reg, left_reg); - } - __ bind(&done); - } -} - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - DoubleRegister result = ToDoubleRegister(instr->result()); - switch (instr->op()) { - case Token::ADD: - if (CpuFeatures::IsSupported(VECTOR_FACILITY)) { - __ vfa(result, left, right); - } else { - DCHECK(result.is(left)); - __ adbr(result, right); - } - break; - case Token::SUB: - if (CpuFeatures::IsSupported(VECTOR_FACILITY)) { - __ vfs(result, left, right); - } else { - DCHECK(result.is(left)); - __ sdbr(result, right); - } - break; - case Token::MUL: - if (CpuFeatures::IsSupported(VECTOR_FACILITY)) { - __ vfm(result, left, right); - } else { - DCHECK(result.is(left)); - __ mdbr(result, right); - } - break; - case Token::DIV: - if (CpuFeatures::IsSupported(VECTOR_FACILITY)) { - __ vfd(result, left, right); - } else { - DCHECK(result.is(left)); - __ ddbr(result, right); - } - break; - case Token::MOD: { - __ PrepareCallCFunction(0, 2, scratch0()); - __ MovToFloatParameters(left, right); - __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), - 0, 2); - // Move the result in the double result register. - __ MovFromFloatResult(result); - break; - } - default: - UNREACHABLE(); - break; - } -} - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r3)); - DCHECK(ToRegister(instr->right()).is(r2)); - DCHECK(ToRegister(instr->result()).is(r2)); - - UNREACHABLE(); -} - -template -void LCodeGen::EmitBranch(InstrType instr, Condition cond) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - - if (right_block == left_block || cond == al) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block)); - } else if (right_block == next_block) { - __ b(cond, chunk_->GetAssemblyLabel(left_block)); - } else { - __ b(cond, chunk_->GetAssemblyLabel(left_block)); - __ b(chunk_->GetAssemblyLabel(right_block)); - } -} - -template -void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) { - int true_block = instr->TrueDestination(chunk_); - __ b(cond, chunk_->GetAssemblyLabel(true_block)); -} - -template -void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) { - int false_block = instr->FalseDestination(chunk_); - __ b(cond, chunk_->GetAssemblyLabel(false_block)); -} - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); } - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - DoubleRegister dbl_scratch = double_scratch0(); - - if (r.IsInteger32()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - __ Cmp32(reg, Operand::Zero()); - EmitBranch(instr, ne); - } else if (r.IsSmi()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - __ CmpP(reg, Operand::Zero()); - EmitBranch(instr, ne); - } else if (r.IsDouble()) { - DCHECK(!info()->IsStub()); - DoubleRegister reg = ToDoubleRegister(instr->value()); - __ lzdr(kDoubleRegZero); - __ cdbr(reg, kDoubleRegZero); - // Test the double value. Zero and NaN are false. - Condition lt_gt = static_cast(lt | gt); - - EmitBranch(instr, lt_gt); - } else { - DCHECK(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ CompareRoot(reg, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - __ CmpP(reg, Operand::Zero()); - EmitBranch(instr, ne); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, al); - } else if (type.IsHeapNumber()) { - DCHECK(!info()->IsStub()); - __ LoadDouble(dbl_scratch, - FieldMemOperand(reg, HeapNumber::kValueOffset)); - // Test the double value. Zero and NaN are false. - __ lzdr(kDoubleRegZero); - __ cdbr(dbl_scratch, kDoubleRegZero); - Condition lt_gt = static_cast(lt | gt); - EmitBranch(instr, lt_gt); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); - __ CmpP(ip, Operand::Zero()); - EmitBranch(instr, ne); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - // Avoid deopts in the case where we've never executed this path before. - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); - __ beq(instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kBoolean) { - // Boolean -> its value. - __ CompareRoot(reg, Heap::kTrueValueRootIndex); - __ beq(instr->TrueLabel(chunk_)); - __ CompareRoot(reg, Heap::kFalseValueRootIndex); - __ beq(instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ CompareRoot(reg, Heap::kNullValueRootIndex); - __ beq(instr->FalseLabel(chunk_)); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - __ CmpP(reg, Operand::Zero()); - __ beq(instr->FalseLabel(chunk_)); - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a Smi -> deopt. - __ TestIfSmi(reg); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); - } - - const Register map = scratch0(); - if (expected & ToBooleanHint::kNeedsMap) { - __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); - - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ tm(FieldMemOperand(map, Map::kBitFieldOffset), - Operand(1 << Map::kIsUndetectable)); - __ bne(instr->FalseLabel(chunk_)); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE); - __ bge(instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); - __ bge(¬_string, Label::kNear); - __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); - __ CmpP(ip, Operand::Zero()); - __ bne(instr->TrueLabel(chunk_)); - __ b(instr->FalseLabel(chunk_)); - __ bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - __ CompareInstanceType(map, ip, SYMBOL_TYPE); - __ beq(instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kHeapNumber) { - // heap number -> false iff +0, -0, or NaN. - Label not_heap_number; - __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); - __ bne(¬_heap_number, Label::kNear); - __ LoadDouble(dbl_scratch, - FieldMemOperand(reg, HeapNumber::kValueOffset)); - __ lzdr(kDoubleRegZero); - __ cdbr(dbl_scratch, kDoubleRegZero); - __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false. - __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false. - __ b(instr->TrueLabel(chunk_)); - __ bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject); - } - } - } -} - -void LCodeGen::EmitGoto(int block) { - if (!IsNextEmittedBlock(block)) { - __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); - } -} - -void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); } - -Condition LCodeGen::TokenToCondition(Token::Value op) { - Condition cond = kNoCondition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = eq; - break; - case Token::NE: - case Token::NE_STRICT: - cond = ne; - break; - case Token::LT: - cond = lt; - break; - case Token::GT: - cond = gt; - break; - case Token::LTE: - cond = le; - break; - case Token::GTE: - cond = ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cond = TokenToCondition(instr->op()); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - // Compare left and right operands as doubles and load the - // resulting flags into the normal status register. - __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right)); - // If a NaN is involved, i.e. the result is unordered, - // jump to false block label. - __ bunordered(instr->FalseLabel(chunk_)); - } else { - if (right->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(right)); - if (instr->hydrogen_value()->representation().IsSmi()) { - if (is_unsigned) { - __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); - } else { - __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); - } - } else { - if (is_unsigned) { - __ CmpLogical32(ToRegister(left), ToOperand(right)); - } else { - __ Cmp32(ToRegister(left), ToOperand(right)); - } - } - } else if (left->IsConstantOperand()) { - int32_t value = ToInteger32(LConstantOperand::cast(left)); - if (instr->hydrogen_value()->representation().IsSmi()) { - if (is_unsigned) { - __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); - } else { - __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); - } - } else { - if (is_unsigned) { - __ CmpLogical32(ToRegister(right), ToOperand(left)); - } else { - __ Cmp32(ToRegister(right), ToOperand(left)); - } - } - // We commuted the operands, so commute the condition. - cond = CommuteCondition(cond); - } else if (instr->hydrogen_value()->representation().IsSmi()) { - if (is_unsigned) { - __ CmpLogicalP(ToRegister(left), ToRegister(right)); - } else { - __ CmpP(ToRegister(left), ToRegister(right)); - } - } else { - if (is_unsigned) { - __ CmpLogical32(ToRegister(left), ToRegister(right)); - } else { - __ Cmp32(ToRegister(left), ToRegister(right)); - } - } - } - EmitBranch(instr, cond); - } -} - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - - __ CmpP(left, right); - EmitBranch(instr, eq); -} - -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ CmpP(input_reg, Operand(factory()->the_hole_value())); - EmitBranch(instr, eq); - return; - } - - DoubleRegister input_reg = ToDoubleRegister(instr->object()); - __ cdbr(input_reg, input_reg); - EmitFalseBranch(instr, ordered); - - Register scratch = scratch0(); - // Convert to GPR and examine the upper 32 bits - __ lgdr(scratch, input_reg); - __ srlg(scratch, scratch, Operand(32)); - __ Cmp32(scratch, Operand(kHoleNanUpper32)); - EmitBranch(instr, eq); -} - -Condition LCodeGen::EmitIsString(Register input, Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); - - return lt; -} - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp1 = ToRegister(instr->temp()); - - SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK - : INLINE_SMI_CHECK; - Condition true_cond = - EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond); -} - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Register input_reg = EmitLoadRegister(instr->value(), ip); - __ TestIfSmi(input_reg); - EmitBranch(instr, eq); -} - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ tm(FieldMemOperand(temp, Map::kBitFieldOffset), - Operand(1 << Map::kIsUndetectable)); - EmitBranch(instr, ne); -} - -static Condition ComputeCompareCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return eq; - case Token::LT: - return lt; - case Token::GT: - return gt; - case Token::LTE: - return le; - case Token::GTE: - return ge; - default: - UNREACHABLE(); - } -} - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r3)); - DCHECK(ToRegister(instr->right()).is(r2)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ CompareRoot(r2, Heap::kTrueValueRootIndex); - EmitBranch(instr, eq); -} - -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK(from == to || to == LAST_TYPE); - return from; -} - -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return eq; - if (to == LAST_TYPE) return ge; - if (from == FIRST_TYPE) return le; - UNREACHABLE(); -} - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register scratch = scratch0(); - Register input = ToRegister(instr->value()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - - __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); - EmitBranch(instr, BranchCondition(instr->hydrogen())); -} - -// Branches to a label or falls through with the answer in flags. Trashes -// the temp registers, but not the input. -void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handle class_name, Register input, - Register temp, Register temp2) { - DCHECK(!input.is(temp)); - DCHECK(!input.is(temp2)); - DCHECK(!temp.is(temp2)); - - __ JumpIfSmi(input, is_false); - - __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ bge(is_true); - } else { - __ bge(is_false); - } - - // Check if the constructor in the map is a function. - Register instance_type = ip; - __ GetMapConstructor(temp, temp, temp2, instance_type); - - // Objects with a non-function constructor have class 'Object'. - __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE)); - if (String::Equals(isolate()->factory()->Object_string(), class_name)) { - __ bne(is_true); - } else { - __ bne(is_false); - } - - // temp now contains the constructor function. Grab the - // instance class name from there. - __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); - __ LoadP(temp, - FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - __ CmpP(temp, Operand(class_name)); - // End with the answer in flags. -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = scratch0(); - Register temp2 = ToRegister(instr->temp()); - Handle class_name = instr->hydrogen()->class_name(); - - EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - class_name, input, temp, temp2); - - EmitBranch(instr, eq); -} - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - __ mov(temp, Operand(instr->map())); - __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); - EmitBranch(instr, eq); -} - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = scratch0(); - Register const object_instance_type = ip; - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - __ TestIfSmi(object); - EmitFalseBranch(instr, eq); - } - // Loop through the {object}s prototype chain looking for the {prototype}. - __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); - Label loop; - __ bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ LoadlB(object_instance_type, - FieldMemOperand(object_map, Map::kBitFieldOffset)); - __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0); - // Deoptimize for proxies. - __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); - DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy); - __ LoadP(object_prototype, - FieldMemOperand(object_map, Map::kPrototypeOffset)); - __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); - EmitFalseBranch(instr, eq); - __ CmpP(object_prototype, prototype); - EmitTrueBranch(instr, eq); - __ LoadP(object_map, - FieldMemOperand(object_prototype, HeapObject::kMapOffset)); - __ b(&loop); -} - -void LCodeGen::DoCmpT(LCmpT* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - Token::Value op = instr->op(); - - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - // This instruction also signals no smi code inlined - __ CmpP(r2, Operand::Zero()); - - Condition condition = ComputeCompareCondition(op); - Label true_value, done; - - __ b(condition, &true_value, Label::kNear); - - __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); - __ b(&done, Label::kNear); - - __ bind(&true_value); - __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); - - __ bind(&done); -} - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in r2. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ push(r2); - __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - if (info()->saves_caller_doubles()) { - RestoreCallerDoubles(); - } - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - int32_t sp_delta = (parameter_count + 1) * kPointerSize; - if (NeedsEagerFrame()) { - masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta); - } else if (sp_delta != 0) { - // TODO(joransiu): Clean this up into Macro Assembler - if (sp_delta >= 0 && sp_delta < 4096) - __ la(sp, MemOperand(sp, sp_delta)); - else - __ lay(sp, MemOperand(sp, sp_delta)); - } - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register reg = ToRegister(instr->parameter_count()); - // The argument count parameter is a smi - if (NeedsEagerFrame()) { - masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); - } - __ SmiToPtrArrayOffset(r0, reg); - __ AddP(sp, sp, r0); - } - - __ Ret(); -} - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - __ LoadP(result, ContextMemOperand(context, instr->slot_index())); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } else { - Label skip; - __ bne(&skip, Label::kNear); - __ mov(result, Operand(factory()->undefined_value())); - __ bind(&skip); - } - } -} - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - Register scratch = scratch0(); - MemOperand target = ContextMemOperand(context, instr->slot_index()); - - Label skip_assignment; - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadP(scratch, target); - __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } else { - __ bne(&skip_assignment); - } - } - - __ StoreP(value, target); - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK - : INLINE_SMI_CHECK; - __ RecordWriteContextSlot(context, target.offset(), value, scratch, - GetLinkRegisterState(), kSaveFPRegs, - EMIT_REMEMBERED_SET, check_needed); - } - - __ bind(&skip_assignment); -} - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - Register object = ToRegister(instr->object()); - - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - MemOperand operand = MemOperand(object, offset); - __ LoadRepresentation(result, operand, access.representation(), r0); - return; - } - - if (instr->hydrogen()->representation().IsDouble()) { - DCHECK(access.IsInobject()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ LoadDouble(result, FieldMemOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - if (!access.IsInobject()) { - __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - object = result; - } - - Representation representation = access.representation(); - -#if V8_TARGET_ARCH_S390X - // 64-bit Smi optimization - if (representation.IsSmi() && - instr->hydrogen()->representation().IsInteger32()) { - // Read int value directly from upper half of the smi. - offset = SmiWordOffset(offset); - representation = Representation::Integer32(); - } -#endif - - __ LoadRepresentation(result, FieldMemOperand(object, offset), representation, - r0); -} - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register scratch = scratch0(); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - - // Get the prototype or initial map from the function. - __ LoadP(result, - FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - - // If the function does not have an initial map, we're done. - Label done; - __ CompareObjectType(result, scratch, scratch, MAP_TYPE); - __ bne(&done, Label::kNear); - - // Get the prototype from the initial map. - __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); - - // All done. - __ bind(&done); -} - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - if (instr->length()->IsConstantOperand()) { - int const_length = ToInteger32(LConstantOperand::cast(instr->length())); - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int index = (const_length - const_index) + 1; - __ LoadP(result, MemOperand(arguments, index * kPointerSize)); - } else { - Register index = ToRegister(instr->index()); - __ SubP(result, index, Operand(const_length + 1)); - __ LoadComplementRR(result, result); - __ ShiftLeftP(result, result, Operand(kPointerSizeLog2)); - __ LoadP(result, MemOperand(arguments, result)); - } - } else if (instr->index()->IsConstantOperand()) { - Register length = ToRegister(instr->length()); - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int loc = const_index - 1; - if (loc != 0) { - __ SubP(result, length, Operand(loc)); - __ ShiftLeftP(result, result, Operand(kPointerSizeLog2)); - __ LoadP(result, MemOperand(arguments, result)); - } else { - __ ShiftLeftP(result, length, Operand(kPointerSizeLog2)); - __ LoadP(result, MemOperand(arguments, result)); - } - } else { - Register length = ToRegister(instr->length()); - Register index = ToRegister(instr->index()); - __ SubP(result, length, index); - __ AddP(result, result, Operand(1)); - __ ShiftLeftP(result, result, Operand(kPointerSizeLog2)); - __ LoadP(result, MemOperand(arguments, result)); - } -} - -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - bool keyMaybeNegative = instr->hydrogen()->IsDehoisted(); - int base_offset = instr->base_offset(); - bool use_scratch = false; - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - DoubleRegister result = ToDoubleRegister(instr->result()); - if (key_is_constant) { - base_offset += constant_key << element_size_shift; - if (!is_int20(base_offset)) { - __ mov(scratch0(), Operand(base_offset)); - base_offset = 0; - use_scratch = true; - } - } else { - __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi, - keyMaybeNegative); - use_scratch = true; - } - if (elements_kind == FLOAT32_ELEMENTS) { - if (!use_scratch) { - __ ldeb(result, MemOperand(external_pointer, base_offset)); - } else { - __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset)); - } - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - if (!use_scratch) { - __ LoadDouble(result, MemOperand(external_pointer, base_offset)); - } else { - __ LoadDouble(result, - MemOperand(scratch0(), external_pointer, base_offset)); - } - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = - PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, - constant_key, element_size_shift, base_offset, - keyMaybeNegative); - switch (elements_kind) { - case INT8_ELEMENTS: - __ LoadB(result, mem_operand); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ LoadlB(result, mem_operand); - break; - case INT16_ELEMENTS: - __ LoadHalfWordP(result, mem_operand); - break; - case UINT16_ELEMENTS: - __ LoadLogicalHalfWordP(result, mem_operand); - break; - case INT32_ELEMENTS: - __ LoadW(result, mem_operand, r0); - break; - case UINT32_ELEMENTS: - __ LoadlW(result, mem_operand, r0); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ CmpLogical32(result, Operand(0x80000000)); - DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { - Register elements = ToRegister(instr->elements()); - bool key_is_constant = instr->key()->IsConstantOperand(); - Register key = no_reg; - DoubleRegister result = ToDoubleRegister(instr->result()); - Register scratch = scratch0(); - - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - bool keyMaybeNegative = instr->hydrogen()->IsDehoisted(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - - bool use_scratch = false; - intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize; - if (!key_is_constant) { - use_scratch = true; - __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi, - keyMaybeNegative); - } - - // Memory references support up to 20-bits signed displacement in RXY form - // Include Register::kExponentOffset in check, so we are guaranteed not to - // overflow displacement later. - if (!is_int20(base_offset + Register::kExponentOffset)) { - use_scratch = true; - if (key_is_constant) { - __ mov(scratch, Operand(base_offset)); - } else { - __ AddP(scratch, Operand(base_offset)); - } - base_offset = 0; - } - - if (!use_scratch) { - __ LoadDouble(result, MemOperand(elements, base_offset)); - } else { - __ LoadDouble(result, MemOperand(scratch, elements, base_offset)); - } - - if (instr->hydrogen()->RequiresHoleCheck()) { - if (!use_scratch) { - __ LoadlW(r0, - MemOperand(elements, base_offset + Register::kExponentOffset)); - } else { - __ LoadlW(r0, MemOperand(scratch, elements, - base_offset + Register::kExponentOffset)); - } - __ Cmp32(r0, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } -} - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - HLoadKeyed* hinstr = instr->hydrogen(); - Register elements = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - int offset = instr->base_offset(); - - if (instr->key()->IsConstantOperand()) { - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - } else { - Register key = ToRegister(instr->key()); - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (hinstr->key()->representation().IsSmi()) { - __ SmiToPtrArrayOffset(scratch, key); - } else { - __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2)); - } - } - - bool requires_hole_check = hinstr->RequiresHoleCheck(); - Representation representation = hinstr->representation(); - -#if V8_TARGET_ARCH_S390X - // 64-bit Smi optimization - if (representation.IsInteger32() && - hinstr->elements_kind() == FAST_SMI_ELEMENTS) { - DCHECK(!requires_hole_check); - // Read int value directly from upper half of the smi. - offset = SmiWordOffset(offset); - } -#endif - - if (instr->key()->IsConstantOperand()) { - __ LoadRepresentation(result, MemOperand(elements, offset), representation, - r1); - } else { - __ LoadRepresentation(result, MemOperand(scratch, elements, offset), - representation, r1); - } - - // Check for the hole value. - if (requires_hole_check) { - if (IsFastSmiElementsKind(hinstr->elements_kind())) { - __ TestIfSmi(result); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); - } else { - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); - } - } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ CmpP(result, scratch); - __ bne(&done); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise - // it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset)); - __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kHole); - } - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ bind(&done); - } -} - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } -} - -MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base, - bool key_is_constant, bool key_is_smi, - int constant_key, - int element_size_shift, - int base_offset, - bool keyMaybeNegative) { - Register scratch = scratch0(); - - if (key_is_constant) { - int offset = (base_offset + (constant_key << element_size_shift)); - if (!is_int20(offset)) { - __ mov(scratch, Operand(offset)); - return MemOperand(base, scratch); - } else { - return MemOperand(base, - (constant_key << element_size_shift) + base_offset); - } - } - - bool needs_shift = - (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0)); - - if (needs_shift) { - __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi, - keyMaybeNegative); - } else { - scratch = key; - } - - if (!is_int20(base_offset)) { - __ AddP(scratch, Operand(base_offset)); - base_offset = 0; - } - return MemOperand(scratch, base, base_offset); -} - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - __ lay(result, MemOperand(sp, -2 * kPointerSize)); - } else if (instr->hydrogen()->arguments_adaptor()) { - // Check if the calling frame is an arguments adaptor frame. - Label done, adapted; - __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP( - result, - MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ CmpP(result, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ beq(&adapted, Label::kNear); - __ LoadRR(result, fp); - __ b(&done, Label::kNear); - - __ bind(&adapted); - __ LoadRR(result, scratch); - __ bind(&done); - } else { - __ LoadRR(result, fp); - } -} - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register elem = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - __ CmpP(fp, elem); - __ mov(result, Operand(scope()->num_parameters())); - __ beq(&done, Label::kNear); - - // Arguments adaptor frame present. Get argument length from there. - __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(result, - MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(result); - - // Argument length is in result register. - __ bind(&done); -} - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. - Label global_object, result_in_receiver; - - if (!instr->hydrogen()->known_function()) { - // Do not transform the receiver to object for strict mode - // functions or builtins. - __ LoadP(scratch, - FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ LoadlW(scratch, FieldMemOperand( - scratch, SharedFunctionInfo::kCompilerHintsOffset)); - __ AndP(r0, scratch, - Operand(SharedFunctionInfo::IsStrictBit::kMask | - SharedFunctionInfo::IsNativeBit::kMask)); - __ bne(&result_in_receiver, Label::kNear); - } - - // Normal function. Replace undefined or null with global receiver. - __ CompareRoot(receiver, Heap::kNullValueRootIndex); - __ beq(&global_object, Label::kNear); - __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); - __ beq(&global_object, Label::kNear); - - // Deoptimize if the receiver is not a JS object. - __ TestIfSmi(receiver); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); - __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); - DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject); - - __ b(&result_in_receiver, Label::kNear); - __ bind(&global_object); - __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); - __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); - __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); - - if (result.is(receiver)) { - __ bind(&result_in_receiver); - } else { - Label result_ok; - __ b(&result_ok, Label::kNear); - __ bind(&result_in_receiver); - __ LoadRR(result, receiver); - __ bind(&result_ok); - } -} - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); - Register scratch = scratch0(); - DCHECK(receiver.is(r2)); // Used for parameter count. - DCHECK(function.is(r3)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).is(r2)); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - __ CmpLogicalP(length, Operand(kArgumentsLimit)); - DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments); - - // Push the receiver and use the register to keep the original - // number of arguments. - __ push(receiver); - __ LoadRR(receiver, length); - // The arguments are at a one pointer size offset from elements. - __ AddP(elements, Operand(1 * kPointerSize)); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ CmpP(length, Operand::Zero()); - __ beq(&invoke, Label::kNear); - __ bind(&loop); - __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2)); - __ LoadP(scratch, MemOperand(elements, r1)); - __ push(scratch); - __ BranchOnCount(length, &loop); - - __ bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(r2); - // It is safe to use r5, r6 and r7 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) r5 (new.target) will be initialized below. - PrepareForTailCall(actual, r5, r6, r7); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - // The number of arguments is stored in receiver which is r2, as expected - // by InvokeFunction. - ParameterCount actual(receiver); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { - Abort(kDoPushArgumentNotImplementedForDoubleType); - } else { - Register argument_reg = EmitLoadRegister(argument, ip); - __ push(argument_reg); - } -} - -void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); } - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); -} - -void LCodeGen::DoContext(LContext* instr) { - // If there is a non-return use, the context must be moved to a register. - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in cp. - DCHECK(result.is(cp)); - } -} - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - __ Move(scratch0(), instr->hydrogen()->declarations()); - __ push(scratch0()); - __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags())); - __ push(scratch0()); - __ Move(scratch0(), instr->hydrogen()->feedback_vector()); - __ push(scratch0()); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - Register function_reg = r3; - - LPointerMap* pointers = instr->pointer_map(); - - if (can_invoke_directly) { - // Change context. - __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); - __ mov(r2, Operand(arity)); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); - if (is_tail_call) { - __ JumpToJSEntry(ip); - } else { - __ CallJSEntry(ip); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - } - } else { - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, expected, actual, flag, generator); - } -} - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - DCHECK(instr->context() != NULL); - DCHECK(ToRegister(instr->context()).is(cp)); - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // Deoptimize if not a heap number. - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - - Label done; - Register exponent = scratch0(); - scratch = no_reg; - __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, just - // return it. - __ Cmp32(exponent, Operand::Zero()); - // Move the input to the result if necessary. - __ Move(result, input); - __ bge(&done); - - // Input is negative. Reverse its sign. - // Preserve the value of all registers. - { - PushSafepointRegistersScope scope(this); - - // Registers were saved at the safepoint, so we can use - // many scratch registers. - Register tmp1 = input.is(r3) ? r2 : r3; - Register tmp2 = input.is(r4) ? r2 : r4; - Register tmp3 = input.is(r5) ? r2 : r5; - Register tmp4 = input.is(r6) ? r2 : r6; - - // exponent: floating point exponent value. - - Label allocated, slow; - __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); - __ b(&allocated); - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, - instr->context()); - // Set the pointer to the new heap number in tmp. - if (!tmp1.is(r2)) __ LoadRR(tmp1, r2); - // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input, input); - __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); - - __ bind(&allocated); - // exponent: floating point exponent value. - // tmp1: allocated heap number. - - // Clear the sign bit. - __ nilf(exponent, Operand(~HeapNumber::kSignMask)); - __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); - __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); - __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - - __ StoreToSafepointRegisterSlot(tmp1, result); - } - - __ bind(&done); -} - -void LCodeGen::EmitMathAbs(LMathAbs* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - __ LoadPositiveP(result, input); - // Deoptimize on overflow. - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); -} - -#if V8_TARGET_ARCH_S390X -void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - __ LoadPositive32(result, input); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); -} -#endif - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - // Class for deferred case. - class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { - public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { - codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LMathAbs* instr_; - }; - - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsDouble()) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ lpdbr(result, input); -#if V8_TARGET_ARCH_S390X - } else if (r.IsInteger32()) { - EmitInteger32MathAbs(instr); - } else if (r.IsSmi()) { -#else - } else if (r.IsSmiOrInteger32()) { -#endif - EmitMathAbs(instr); - } else { - // Representation is tagged. - DeferredMathAbsTaggedHeapNumber* deferred = - new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr); - Register input = ToRegister(instr->value()); - // Smi check. - __ JumpIfNotSmi(input, deferred->entry()); - // If smi, handle it directly. - EmitMathAbs(instr); - __ bind(deferred->exit()); - } -} - -void LCodeGen::DoMathFloor(LMathFloor* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - Register input_high = scratch0(); - Register scratch = ip; - Label done, exact; - - __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, - &exact); - DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); - - __ bind(&exact); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - __ CmpP(result, Operand::Zero()); - __ bne(&done, Label::kNear); - __ Cmp32(input_high, Operand::Zero()); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - __ bind(&done); -} - -void LCodeGen::DoMathRound(LMathRound* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - Register result = ToRegister(instr->result()); - DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); - DoubleRegister input_plus_dot_five = double_scratch1; - Register scratch1 = scratch0(); - Register scratch2 = ip; - DoubleRegister dot_five = double_scratch0(); - Label convert, done; - - __ LoadDoubleLiteral(dot_five, 0.5, r0); - __ lpdbr(double_scratch1, input); - __ cdbr(double_scratch1, dot_five); - DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN); - // If input is in [-0.5, -0], the result is -0. - // If input is in [+0, +0.5[, the result is +0. - // If the input is +0.5, the result is 1. - __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5]. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // [-0.5, -0] (negative) yields minus zero. - __ TestDoubleSign(input, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - Label return_zero; - __ cdbr(input, dot_five); - __ bne(&return_zero, Label::kNear); - __ LoadImmP(result, Operand(1)); // +0.5. - __ b(&done, Label::kNear); - // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on - // flag kBailoutOnMinusZero. - __ bind(&return_zero); - __ LoadImmP(result, Operand::Zero()); - __ b(&done, Label::kNear); - - __ bind(&convert); - __ ldr(input_plus_dot_five, input); - __ adbr(input_plus_dot_five, dot_five); - // Reuse dot_five (double_scratch0) as we no longer need this value. - __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, - double_scratch0(), &done, &done); - DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); - __ bind(&done); -} - -void LCodeGen::DoMathFround(LMathFround* instr) { - DoubleRegister input_reg = ToDoubleRegister(instr->value()); - DoubleRegister output_reg = ToDoubleRegister(instr->result()); - - // Round double to float - __ ledbr(output_reg, input_reg); - // Extend from float to double - __ ldebr(output_reg, output_reg); -} - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - DoubleRegister result = ToDoubleRegister(instr->result()); - LOperand* input = instr->value(); - if (input->IsDoubleRegister()) { - __ Sqrt(result, ToDoubleRegister(instr->value())); - } else { - __ Sqrt(result, ToMemOperand(input)); - } -} - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister temp = double_scratch0(); - - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label skip, done; - - __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0()); - __ cdbr(input, temp); - __ bne(&skip, Label::kNear); - __ lcdbr(result, temp); - __ b(&done, Label::kNear); - - // Add +0 to convert -0 to +0. - __ bind(&skip); - __ ldr(result, input); - __ lzdr(kDoubleRegZero); - __ adbr(result, kDoubleRegZero); - __ sqdbr(result, result); - __ bind(&done); -} - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - // Having marked this as a call, we can use any registers. - // Just make sure that the input/output registers are the expected ones. - Register tagged_exponent = MathPowTaggedDescriptor::exponent(); - DCHECK(!instr->right()->IsDoubleRegister() || - ToDoubleRegister(instr->right()).is(d2)); - DCHECK(!instr->right()->IsRegister() || - ToRegister(instr->right()).is(tagged_exponent)); - DCHECK(ToDoubleRegister(instr->left()).is(d1)); - DCHECK(ToDoubleRegister(instr->result()).is(d3)); - - if (exponent_type.IsSmi()) { - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsTagged()) { - Label no_deopt; - __ JumpIfSmi(tagged_exponent, &no_deopt); - __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); - __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - __ bind(&no_deopt); - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsInteger32()) { - MathPowStub stub(isolate(), MathPowStub::INTEGER); - __ CallStub(&stub); - } else { - DCHECK(exponent_type.IsDouble()); - MathPowStub stub(isolate(), MathPowStub::DOUBLE); - __ CallStub(&stub); - } -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathLog(LMathLog* instr) { - __ PrepareCallCFunction(0, 1, scratch0()); - __ MovToFloatParameter(ToDoubleRegister(instr->value())); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1); - __ MovFromFloatResult(ToDoubleRegister(instr->result())); -} - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Label done; - __ llgfr(result, input); - __ flogr(r0, result); - __ LoadRR(result, r0); - __ CmpP(r0, Operand::Zero()); - __ beq(&done, Label::kNear); - __ SubP(result, Operand(32)); - __ bind(&done); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(scratch3, - MemOperand(scratch2, StandardFrameConstants::kContextOffset)); - __ CmpP(scratch3, - Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - __ bne(&no_arguments_adaptor); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ LoadRR(fp, scratch2); - __ LoadP(caller_args_count_reg, - MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(caller_args_count_reg); - __ b(&formal_parameter_count_loaded); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count - __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3); - - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->function()).is(r3)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use r5, r6 and r7 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) r5 (new.target) will be initialized below. - PrepareForTailCall(actual, r5, r6, r7); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(r3, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } -} - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(ToRegister(instr->result()).is(r2)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - __ Jump(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ JumpToJSEntry(ip); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - __ Call(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(target)); - __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ CallJSEntry(ip); - } - generator.AfterCall(); - } -} - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->constructor()).is(r3)); - DCHECK(ToRegister(instr->result()).is(r2)); - - __ mov(r2, Operand(instr->arity())); - __ Move(r4, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - // We might need a change here - // look at the first argument - __ LoadP(r7, MemOperand(sp, 0)); - __ CmpP(r7, Operand::Zero()); - __ beq(&packed_case, Label::kNear); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ b(&done, Label::kNear); - __ bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } -} - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); -} - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - __ lay(code_object, - MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag)); - __ StoreP(code_object, - FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0); -} - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ lay(result, MemOperand(base, ToInteger32(offset))); - } else { - Register offset = ToRegister(instr->offset()); - __ lay(result, MemOperand(base, offset)); - } -} - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - HStoreNamedField* hinstr = instr->hydrogen(); - Representation representation = instr->representation(); - - Register object = ToRegister(instr->object()); - Register scratch = scratch0(); - HObjectAccess access = hinstr->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - Register value = ToRegister(instr->value()); - MemOperand operand = MemOperand(object, offset); - __ StoreRepresentation(value, operand, representation, r0); - return; - } - - __ AssertNotSmi(object); - -#if V8_TARGET_ARCH_S390X - DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || - IsInteger32(LConstantOperand::cast(instr->value()))); -#else - DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || - IsSmi(LConstantOperand::cast(instr->value()))); -#endif - if (!FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!hinstr->has_transition()); - DCHECK(!hinstr->NeedsWriteBarrier()); - DoubleRegister value = ToDoubleRegister(instr->value()); - DCHECK(offset >= 0); - __ StoreDouble(value, FieldMemOperand(object, offset)); - return; - } - - if (hinstr->has_transition()) { - Handle transition = hinstr->transition_map(); - AddDeprecationDependency(transition); - __ mov(scratch, Operand(transition)); - __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0); - if (hinstr->NeedsWriteBarrierForMap()) { - Register temp = ToRegister(instr->temp()); - // Update the write barrier for the map field. - __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(), - kSaveFPRegs); - } - } - - // Do the store. - Register record_dest = object; - Register record_value = no_reg; - Register record_scratch = scratch; -#if V8_TARGET_ARCH_S390X - if (FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - DoubleRegister value = ToDoubleRegister(instr->value()); - __ StoreDouble(value, FieldMemOperand(object, offset)); - if (hinstr->NeedsWriteBarrier()) { - record_value = ToRegister(instr->value()); - } - } else { - if (representation.IsSmi() && - hinstr->value()->representation().IsInteger32()) { - DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); - // 64-bit Smi optimization - // Store int value directly to upper half of the smi. - offset = SmiWordOffset(offset); - representation = Representation::Integer32(); - } -#endif - if (access.IsInobject()) { - Register value = ToRegister(instr->value()); - MemOperand operand = FieldMemOperand(object, offset); - __ StoreRepresentation(value, operand, representation, r0); - record_value = value; - } else { - Register value = ToRegister(instr->value()); - __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); - MemOperand operand = FieldMemOperand(scratch, offset); - __ StoreRepresentation(value, operand, representation, r0); - record_dest = scratch; - record_value = value; - record_scratch = object; - } -#if V8_TARGET_ARCH_S390X - } -#endif - - if (hinstr->NeedsWriteBarrier()) { - __ RecordWriteField(record_dest, offset, record_value, record_scratch, - GetLinkRegisterState(), kSaveFPRegs, - EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(), - hinstr->PointersToHereCheckForValue()); - } -} - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Representation representation = instr->hydrogen()->length()->representation(); - DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); - DCHECK(representation.IsSmiOrInteger32()); - Register temp = scratch0(); - - Condition cc = instr->hydrogen()->allow_equality() ? lt : le; - if (instr->length()->IsConstantOperand()) { - int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); - Register index = ToRegister(instr->index()); - if (representation.IsSmi()) { - __ CmpLogicalSmiLiteral(index, Smi::FromInt(length), temp); - } else { - __ CmpLogical32(index, Operand(length)); - } - cc = CommuteCondition(cc); - } else if (instr->index()->IsConstantOperand()) { - int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); - Register length = ToRegister(instr->length()); - if (representation.IsSmi()) { - __ CmpLogicalSmiLiteral(length, Smi::FromInt(index), temp); - } else { - __ CmpLogical32(length, Operand(index)); - } - } else { - Register index = ToRegister(instr->index()); - Register length = ToRegister(instr->length()); - if (representation.IsSmi()) { - __ CmpLogicalP(length, index); - } else { - __ CmpLogical32(length, index); - } - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - Label done; - __ b(NegateCondition(cc), &done, Label::kNear); - __ stop("eliminated bounds check failed"); - __ bind(&done); - } else { - DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); - } -} - -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - bool keyMaybeNegative = instr->hydrogen()->IsDehoisted(); - int base_offset = instr->base_offset(); - - if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - Register address = scratch0(); - DoubleRegister value(ToDoubleRegister(instr->value())); - if (key_is_constant) { - if (constant_key != 0) { - base_offset += constant_key << element_size_shift; - if (!is_int20(base_offset)) { - __ mov(address, Operand(base_offset)); - __ AddP(address, external_pointer); - } else { - __ AddP(address, external_pointer, Operand(base_offset)); - } - base_offset = 0; - } else { - address = external_pointer; - } - } else { - __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi, - keyMaybeNegative); - __ AddP(address, external_pointer); - } - if (elements_kind == FLOAT32_ELEMENTS) { - __ ledbr(double_scratch0(), value); - __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset)); - } else { // Storing doubles, not floats. - __ StoreDouble(value, MemOperand(address, base_offset)); - } - } else { - Register value(ToRegister(instr->value())); - MemOperand mem_operand = - PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, - constant_key, element_size_shift, base_offset, - keyMaybeNegative); - switch (elements_kind) { - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - case INT8_ELEMENTS: - if (key_is_constant) { - __ StoreByte(value, mem_operand, r0); - } else { - __ StoreByte(value, mem_operand); - } - break; - case INT16_ELEMENTS: - case UINT16_ELEMENTS: - if (key_is_constant) { - __ StoreHalfWord(value, mem_operand, r0); - } else { - __ StoreHalfWord(value, mem_operand); - } - break; - case INT32_ELEMENTS: - case UINT32_ELEMENTS: - if (key_is_constant) { - __ StoreW(value, mem_operand, r0); - } else { - __ StoreW(value, mem_operand); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - DoubleRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = no_reg; - Register scratch = scratch0(); - DoubleRegister double_scratch = double_scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - - // Calculate the effective address of the slot in the array to store the - // double value. - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); - bool keyMaybeNegative = instr->hydrogen()->IsDehoisted(); - int base_offset = instr->base_offset() + constant_key * kDoubleSize; - bool use_scratch = false; - intptr_t address_offset = base_offset; - - if (key_is_constant) { - // Memory references support up to 20-bits signed displacement in RXY form - if (!is_int20((address_offset))) { - __ mov(scratch, Operand(address_offset)); - address_offset = 0; - use_scratch = true; - } - } else { - use_scratch = true; - __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi, - keyMaybeNegative); - // Memory references support up to 20-bits signed displacement in RXY form - if (!is_int20((address_offset))) { - __ AddP(scratch, Operand(address_offset)); - address_offset = 0; - } - } - - if (instr->NeedsCanonicalization()) { - // Turn potential sNaN value into qNaN. - __ CanonicalizeNaN(double_scratch, value); - DCHECK(address_offset >= 0); - if (use_scratch) - __ StoreDouble(double_scratch, - MemOperand(scratch, elements, address_offset)); - else - __ StoreDouble(double_scratch, MemOperand(elements, address_offset)); - } else { - if (use_scratch) - __ StoreDouble(value, MemOperand(scratch, elements, address_offset)); - else - __ StoreDouble(value, MemOperand(elements, address_offset)); - } -} - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - HStoreKeyed* hinstr = instr->hydrogen(); - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - Register scratch = scratch0(); - int offset = instr->base_offset(); - - // Do the store. - if (instr->key()->IsConstantOperand()) { - DCHECK(!hinstr->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset += ToInteger32(const_operand) * kPointerSize; - } else { - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (hinstr->key()->representation().IsSmi()) { - __ SmiToPtrArrayOffset(scratch, key); - } else { - if (instr->hydrogen()->IsDehoisted() || - !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { -#if V8_TARGET_ARCH_S390X - // If array access is dehoisted, the key, being an int32, can contain - // a negative value, as needs to be sign-extended to 64-bit for - // memory access. - __ lgfr(key, key); -#endif - __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2)); - } else { - // Small optimization to reduce pathlength. After Bounds Check, - // the key is guaranteed to be non-negative. Leverage RISBG, - // which also performs zero-extension. - __ risbg(scratch, key, Operand(32 - kPointerSizeLog2), - Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2), - true); - } - } - } - - Representation representation = hinstr->value()->representation(); - -#if V8_TARGET_ARCH_S390X - // 64-bit Smi optimization - if (representation.IsInteger32()) { - DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); - DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); - // Store int value directly to upper half of the smi. - offset = SmiWordOffset(offset); - } -#endif - - if (instr->key()->IsConstantOperand()) { - __ StoreRepresentation(value, MemOperand(elements, offset), representation, - scratch); - } else { - __ StoreRepresentation(value, MemOperand(scratch, elements, offset), - representation, r0); - } - - if (hinstr->NeedsWriteBarrier()) { - SmiCheck check_needed = hinstr->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK - : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - if (instr->key()->IsConstantOperand()) { - __ lay(key, MemOperand(elements, offset)); - } else { - __ lay(key, MemOperand(scratch, elements, offset)); - } - __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, - EMIT_REMEMBERED_SET, check_needed, - hinstr->PointersToHereCheckForValue()); - } -} - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - // By cases: external, fast double - if (instr->is_fixed_typed_array()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = r2; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ b(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ Cmp32(ToRegister(current_capacity), Operand(constant_key)); - __ ble(deferred->entry()); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ Cmp32(ToRegister(key), Operand(constant_capacity)); - __ bge(deferred->entry()); - } else { - __ Cmp32(ToRegister(key), ToRegister(current_capacity)); - __ bge(deferred->entry()); - } - - if (instr->elements()->IsRegister()) { - __ Move(result, ToRegister(instr->elements())); - } else { - __ LoadP(result, ToMemOperand(instr->elements())); - } - - __ bind(deferred->exit()); -} - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = r2; - __ LoadImmP(result, Operand::Zero()); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - if (instr->object()->IsRegister()) { - __ Move(result, ToRegister(instr->object())); - } else { - __ LoadP(result, ToMemOperand(instr->object())); - } - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - LConstantOperand* constant_key = LConstantOperand::cast(key); - int32_t int_key = ToInteger32(constant_key); - if (Smi::IsValid(int_key)) { - __ LoadSmiLiteral(r5, Smi::FromInt(int_key)); - } else { - Abort(kArrayIndexConstantValueTooBig); - } - } else { - Label is_smi; -#if V8_TARGET_ARCH_S390X - __ SmiTag(r5, ToRegister(key)); -#else - // Deopt if the key is outside Smi range. The stub expects Smi and would - // bump the elements into dictionary mode (and trigger a deopt) anyways. - __ Add32(r5, ToRegister(key), ToRegister(key)); - __ b(nooverflow, &is_smi); - __ PopSafepointRegisters(); - DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0); - __ bind(&is_smi); -#endif - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - __ TestIfSmi(result); - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); -} - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound); - __ bind(&no_memento_found); -} - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(cp)); - DCHECK(ToRegister(instr->left()).is(r3)); - DCHECK(ToRegister(instr->right()).is(r2)); - StringAddStub stub(isolate(), instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt final : public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new (zone()) DeferredStringCharCodeAt(this, instr); - - StringCharLoadGenerator::Generate( - masm(), ToRegister(instr->string()), ToRegister(instr->index()), - ToRegister(instr->result()), deferred->entry()); - __ bind(deferred->exit()); -} - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ LoadImmP(result, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - __ push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - if (instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - __ LoadSmiLiteral(scratch, Smi::FromInt(const_index)); - __ push(scratch); - } else { - Register index = ToRegister(instr->index()); - __ SmiTag(index); - __ push(index); - } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, - instr->context()); - __ AssertSmi(r2); - __ SmiUntag(r2); - __ StoreToSafepointRegisterSlot(r2, result); -} - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode final : public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { - codegen()->DoDeferredStringCharFromCode(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new (zone()) DeferredStringCharFromCode(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - DCHECK(!char_code.is(result)); - - __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode)); - __ bgt(deferred->entry()); - __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2)); - __ AddP(result, r0); - __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize)); - __ CompareRoot(result, Heap::kUndefinedValueRootIndex); - __ beq(deferred->entry()); - __ bind(deferred->exit()); -} - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ LoadImmP(result, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - __ SmiTag(char_code); - __ push(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(r2, result); -} - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister() || input->IsStackSlot()); - LOperand* output = instr->result(); - DCHECK(output->IsDoubleRegister()); - if (input->IsStackSlot()) { - Register scratch = scratch0(); - __ LoadP(scratch, ToMemOperand(input)); - __ ConvertIntToDouble(ToDoubleRegister(output), scratch); - } else { - __ ConvertIntToDouble(ToDoubleRegister(output), ToRegister(input)); - } -} - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - __ ConvertUnsignedIntToDouble(ToDoubleRegister(output), ToRegister(input)); -} - -void LCodeGen::DoNumberTagI(LNumberTagI* instr) { - class DeferredNumberTagI final : public LDeferredCode { - public: - DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), - instr_->temp2(), SIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagI* instr_; - }; - - Register src = ToRegister(instr->value()); - Register dst = ToRegister(instr->result()); - - DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr); -#if V8_TARGET_ARCH_S390X - __ SmiTag(dst, src); -#else - // Add src to itself to defect SMI overflow. - __ Add32(dst, src, src); - __ b(overflow, deferred->entry()); -#endif - __ bind(deferred->exit()); -} - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU final : public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), - instr_->temp2(), UNSIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagU* instr_; - }; - - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - - DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr); - __ CmpLogicalP(input, Operand(Smi::kMaxValue)); - __ bgt(deferred->entry()); - __ SmiTag(result, input); - __ bind(deferred->exit()); -} - -void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, - LOperand* temp1, LOperand* temp2, - IntegerSignedness signedness) { - Label done, slow; - Register src = ToRegister(value); - Register dst = ToRegister(instr->result()); - Register tmp1 = scratch0(); - Register tmp2 = ToRegister(temp1); - Register tmp3 = ToRegister(temp2); - DoubleRegister dbl_scratch = double_scratch0(); - - if (signedness == SIGNED_INT32) { - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - if (dst.is(src)) { - __ SmiUntag(src, dst); - __ xilf(src, Operand(HeapNumber::kSignMask)); - } - __ ConvertIntToDouble(dbl_scratch, src); - } else { - __ ConvertUnsignedIntToDouble(dbl_scratch, src); - } - - if (FLAG_inline_new) { - __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); - __ b(&done); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // TODO(3095996): Put a valid pointer value in the stack slot where the - // result register is stored, as this register is in the pointer map, but - // contains an integer value. - __ LoadImmP(dst, Operand::Zero()); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!dst.is(cp)) { - __ LoadImmP(cp, Operand::Zero()); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters(instr->pointer_map(), 0, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r2, dst); - } - - // Done. Put the value in dbl_scratch into the value of the allocated heap - // number. - __ bind(&done); - __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); -} - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD final : public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagD* instr_; - }; - - DoubleRegister input_reg = ToDoubleRegister(instr->value()); - Register scratch = scratch0(); - Register reg = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); - - DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); - } else { - __ b(deferred->entry()); - } - __ bind(deferred->exit()); - __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); -} - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ LoadImmP(reg, Operand::Zero()); - - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(cp)) { - __ LoadImmP(cp, Operand::Zero()); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters(instr->pointer_map(), 0, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r2, reg); -} - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - Register output = ToRegister(instr->result()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - __ TestUnsignedSmiCandidate(input, r0); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0); - } -#if !V8_TARGET_ARCH_S390X - if (hchange->CheckFlag(HValue::kCanOverflow) && - !hchange->value()->CheckFlag(HValue::kUint32)) { - __ SmiTagCheckOverflow(output, input, r0); - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); - } else { -#endif - __ SmiTag(output, input); -#if !V8_TARGET_ARCH_S390X - } -#endif -} - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - if (instr->needs_check()) { - __ tmll(input, Operand(kHeapObjectTag)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); - __ SmiUntag(result, input); - } else { - __ SmiUntag(result, input); - } -} - -void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, - DoubleRegister result_reg, - NumberUntagDMode mode) { - bool can_convert_undefined_to_nan = instr->truncating(); - bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); - - Register scratch = scratch0(); - DCHECK(!result_reg.is(double_scratch0())); - - Label convert, load_smi, done; - - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - - // Heap number map check. - __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex)); - - if (can_convert_undefined_to_nan) { - __ bne(&convert, Label::kNear); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - } - // load heap number - __ LoadDouble(result_reg, - FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - __ TestDoubleIsMinusZero(result_reg, scratch, ip); - DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); - } - __ b(&done, Label::kNear); - if (can_convert_undefined_to_nan) { - __ bind(&convert); - // Convert undefined (and hole) to NaN. - __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ LoadRoot(scratch, Heap::kNanValueRootIndex); - __ LoadDouble(result_reg, - FieldMemOperand(scratch, HeapNumber::kValueOffset)); - __ b(&done, Label::kNear); - } - } else { - __ SmiUntag(scratch, input_reg); - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - } - // Smi to double register conversion - __ bind(&load_smi); - // scratch: untagged value of input_reg - __ ConvertIntToDouble(result_reg, scratch); - __ bind(&done); -} - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { - Register input_reg = ToRegister(instr->value()); - Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->temp()); - DoubleRegister double_scratch = double_scratch0(); - DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); - - DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); - DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); - - Label done; - - // Heap number map check. - __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex); - - if (instr->truncating()) { - Label truncate; - __ beq(&truncate); - __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball); - __ bind(&truncate); - __ LoadRR(scratch2, input_reg); - __ TruncateHeapNumberToI(input_reg, scratch2); - } else { - // Deoptimize if we don't have a heap number. - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); - - __ LoadDouble(double_scratch2, - FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // preserve heap number pointer in scratch2 for minus zero check below - __ LoadRR(scratch2, input_reg); - } - __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, - double_scratch); - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ CmpP(input_reg, Operand::Zero()); - __ bne(&done, Label::kNear); - __ TestHeapNumberSign(scratch2, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - } - } - __ bind(&done); -} - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI final : public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LTaggedToI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - DCHECK(input->Equals(instr->result())); - - Register input_reg = ToRegister(input); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiUntag(input_reg); - } else { - DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr); - - // Branch to deferred code if the input is a HeapObject. - __ JumpIfNotSmi(input_reg, deferred->entry()); - - __ SmiUntag(input_reg); - __ bind(deferred->exit()); - } -} - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - DoubleRegister result_reg = ToDoubleRegister(result); - - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI - : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - EmitNumberUntagD(instr, input_reg, result_reg, mode); -} - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = scratch0(); - DoubleRegister double_input = ToDoubleRegister(instr->value()); - DoubleRegister double_scratch = double_scratch0(); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, - double_scratch); - // Deoptimize if the input wasn't a int32 (inside a double). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ CmpP(result_reg, Operand::Zero()); - __ bne(&done, Label::kNear); - __ TestDoubleSign(double_input, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } -} - -void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { - Register result_reg = ToRegister(instr->result()); - Register scratch1 = scratch0(); - DoubleRegister double_input = ToDoubleRegister(instr->value()); - DoubleRegister double_scratch = double_scratch0(); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, double_input); - } else { - __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, - double_scratch); - // Deoptimize if the input wasn't a int32 (inside a double). - DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label done; - __ CmpP(result_reg, Operand::Zero()); - __ bne(&done, Label::kNear); - __ TestDoubleSign(double_input, scratch1); - DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } - } -#if V8_TARGET_ARCH_S390X - __ SmiTag(result_reg); -#else - __ SmiTagCheckOverflow(result_reg, r0); - DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); -#endif -} - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->value(); - if (input->IsRegister()) { - __ TestIfSmi(ToRegister(input)); - } else if (input->IsStackSlot()) { - MemOperand value = ToMemOperand(input); -#if !V8_TARGET_LITTLE_ENDIAN -#if V8_TARGET_ARCH_S390X - __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7)); -#else - __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3)); -#endif -#else - __ TestIfSmi(value); -#endif - } - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); -} - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - LOperand* input = instr->value(); - if (input->IsRegister()) { - __ TestIfSmi(ToRegister(input)); - } else if (input->IsStackSlot()) { - MemOperand value = ToMemOperand(input); -#if !V8_TARGET_LITTLE_ENDIAN -#if V8_TARGET_ARCH_S390X - __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7)); -#else - __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3)); -#endif -#else - __ TestIfSmi(value); -#endif - } else { - UNIMPLEMENTED(); - } - DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); - } -} - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - Register view = ToRegister(instr->view()); - Register scratch = scratch0(); - - __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); - __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); - __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0); -} - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - Register scratch = scratch0(); - - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset), - Operand(first)); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); - } else { - DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType); - // Omit check for the last type. - if (last != LAST_TYPE) { - __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset), - Operand(last)); - DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType); - } - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); - __ AndP(scratch, Operand(mask)); - DeoptimizeIf(tag == 0 ? ne : eq, instr, - DeoptimizeReason::kWrongInstanceType); - } else { - __ AndP(scratch, Operand(mask)); - __ CmpP(scratch, Operand(tag)); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); - } - } -} - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Register reg = ToRegister(instr->value()); - Handle object = instr->hydrogen()->object().handle(); - AllowDeferredHandleDereference smi_check; - if (isolate()->heap()->InNewSpace(*object)) { - Register reg = ToRegister(instr->value()); - Handle cell = isolate()->factory()->NewCell(object); - __ mov(ip, Operand(cell)); - __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset)); - } else { - __ CmpP(reg, Operand(object)); - } - DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch); -} - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Register temp = ToRegister(instr->temp()); - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset)); - __ LoadlW(temp, FieldMemOperand(temp, Map::kBitField3Offset)); - __ TestBitMask(temp, Map::Deprecated::kMask, r0); - __ beq(&deopt); - - { - PushSafepointRegistersScope scope(this); - __ push(object); - __ LoadImmP(cp, Operand::Zero()); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters(instr->pointer_map(), 1, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r2, temp); - } - __ TestIfSmi(temp); - __ bne(&done); - - __ bind(&deopt); - // In case of "al" condition the operand is not used so just pass cr0 there. - DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0); - - __ bind(&done); -} - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps final : public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - void Generate() override { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - LInstruction* instr() override { return instr_; } - - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register reg = ToRegister(input); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new (zone()) DeferredCheckMaps(this, instr, reg); - __ bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMap(reg, map, &success); - __ beq(&success); - } - - Handle map = maps->at(maps->size() - 1).handle(); - __ CompareMap(reg, map, &success); - if (instr->hydrogen()->HasMigrationTarget()) { - __ bne(deferred->entry()); - } else { - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); - } - - __ bind(&success); -} - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); -} - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - Register unclamped_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampUint8(result_reg, unclamped_reg); -} - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - Register scratch = scratch0(); - Register input_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); - Label is_smi, done, heap_number; - - // Both smi and heap number cases are handled. - __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); - - // Check for heap number - __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ CmpP(scratch, Operand(factory()->heap_number_map())); - __ beq(&heap_number, Label::kNear); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - __ CmpP(input_reg, Operand(factory()->undefined_value())); - DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ LoadImmP(result_reg, Operand::Zero()); - __ b(&done, Label::kNear); - - // Heap number - __ bind(&heap_number); - __ LoadDouble(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); - __ b(&done, Label::kNear); - - // smi - __ bind(&is_smi); - __ ClampUint8(result_reg, result_reg); - - __ bind(&done); -} - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate final : public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, LAllocate* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredAllocate(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr); - - Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } else { - Register size = ToRegister(instr->size()); - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); - } - - __ bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ LoadIntLiteral(scratch, size); - } else { - scratch = ToRegister(instr->size()); - } - __ lay(scratch, MemOperand(scratch, -kPointerSize)); - Label loop; - __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); - __ bind(&loop); - __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag)); -#if V8_TARGET_ARCH_S390X - __ lay(scratch, MemOperand(scratch, -kPointerSize)); -#else - // TODO(joransiu): Improve the following sequence. - // Need to use AHI instead of LAY as top nibble is not set with LAY, causing - // incorrect result with the signed compare - __ AddP(scratch, Operand(-kPointerSize)); -#endif - __ CmpP(scratch, Operand::Zero()); - __ bge(&loop); - } -} - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ LoadSmiLiteral(result, Smi::kZero); - - PushSafepointRegistersScope scope(this); - if (instr->size()->IsRegister()) { - Register size = ToRegister(instr->size()); - DCHECK(!size.is(result)); - __ SmiTag(size); - __ push(size); - } else { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); -#if !V8_TARGET_ARCH_S390X - if (size >= 0 && size <= Smi::kMaxValue) { -#endif - __ Push(Smi::FromInt(size)); -#if !V8_TARGET_ARCH_S390X - } else { - // We should never get here at runtime => abort - __ stop("invalid allocation size"); - return; - } -#endif - } - - int flags = AllocateDoubleAlignFlag::encode( - instr->hydrogen()->MustAllocateDoubleAligned()); - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ Push(Smi::FromInt(flags)); - - CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(r2, result); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - Register top_address = scratch0(); - __ SubP(r2, r2, Operand(kHeapObjectTag)); - __ mov(top_address, Operand(allocation_top)); - __ StoreP(r2, MemOperand(top_address)); - __ AddP(r2, r2, Operand(kHeapObjectTag)); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register scratch1 = ToRegister(instr->temp1()); - Register scratch2 = ToRegister(instr->temp2()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, scratch1, scratch2, flags); - } -} - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->value()).is(r5)); - DCHECK(ToRegister(instr->result()).is(r2)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ mov(r2, Operand(isolate()->factory()->number_string())); - __ b(&end); - __ bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ bind(&end); -} - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->value()); - - Condition final_branch_condition = - EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input, - instr->type_literal()); - if (final_branch_condition != kNoCondition) { - EmitBranch(instr, final_branch_condition); - } -} - -Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label, - Register input, Handle type_name) { - Condition final_branch_condition = kNoCondition; - Register scratch = scratch0(); - Factory* factory = isolate()->factory(); - if (String::Equals(type_name, factory->number_string())) { - __ JumpIfSmi(input, true_label); - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->string_string())) { - __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); - final_branch_condition = lt; - - } else if (String::Equals(type_name, factory->symbol_string())) { - __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->boolean_string())) { - __ CompareRoot(input, Heap::kTrueValueRootIndex); - __ beq(true_label); - __ CompareRoot(input, Heap::kFalseValueRootIndex); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->undefined_string())) { - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ beq(false_label); - __ JumpIfSmi(input, false_label); - // Check for undetectable objects => true. - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ ExtractBit(r0, scratch, Map::kIsUndetectable); - __ CmpP(r0, Operand::Zero()); - final_branch_condition = ne; - - } else if (String::Equals(type_name, factory->function_string())) { - __ JumpIfSmi(input, false_label); - __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ AndP(scratch, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - __ CmpP(scratch, Operand(1 << Map::kIsCallable)); - final_branch_condition = eq; - - } else if (String::Equals(type_name, factory->object_string())) { - __ JumpIfSmi(input, false_label); - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ beq(true_label); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE); - __ blt(false_label); - // Check for callable or undetectable objects => false. - __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ AndP(r0, scratch, - Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - __ CmpP(r0, Operand::Zero()); - final_branch_condition = eq; - - } else { - __ b(false_label); - } - - return final_branch_condition; -} - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - DCHECK_EQ(0, padding_size % 2); - while (padding_size > 0) { - __ nop(); - padding_size -= 2; - } - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && type == Deoptimizer::EAGER) { - type = Deoptimizer::LAZY; - } - - DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type); -} - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck final : public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredStackCheck(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex)); - __ bge(&done, Label::kNear); - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(cp)); - CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, - instr); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new (zone()) DeferredStackCheck(this, instr); - __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex)); - __ blt(deferred_stack_check->entry()); - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - Label use_cache, call_runtime; - __ CheckEnumCache(&call_runtime); - - __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ b(&use_cache); - - // Get the set of properties to enumerate. - __ bind(&call_runtime); - __ push(r2); - CallRuntime(Runtime::kForInEnumerate, instr); - __ bind(&use_cache); -} - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - __ EnumLength(result, map); - __ CmpSmiLiteral(result, Smi::kZero, r0); - __ bne(&load_cache, Label::kNear); - __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); - __ b(&done, Label::kNear); - - __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ LoadP(result, - FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); - __ CmpP(result, Operand::Zero()); - DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache); - - __ bind(&done); -} - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - Register map = ToRegister(instr->map()); - __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); - __ CmpP(map, scratch0()); - DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); -} - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register result, Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ Push(object, index); - __ LoadImmP(cp, Operand::Zero()); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters(instr->pointer_map(), 2, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r2, result); -} - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr, - Register result, Register object, Register index) - : LDeferredCode(codegen), - instr_(instr), - result_(result), - object_(object), - index_(index) {} - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register result_; - Register object_; - Register index_; - }; - - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - - DeferredLoadMutableDouble* deferred; - deferred = new (zone()) - DeferredLoadMutableDouble(this, instr, result, object, index); - - Label out_of_object, done; - - __ TestBitMask(index, reinterpret_cast(Smi::FromInt(1)), r0); - __ bne(deferred->entry()); - __ ShiftRightArithP(index, index, Operand(1)); - - __ CmpP(index, Operand::Zero()); - __ blt(&out_of_object, Label::kNear); - - __ SmiToPtrArrayOffset(r0, index); - __ AddP(scratch, object, r0); - __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); - - __ b(&done, Label::kNear); - - __ bind(&out_of_object); - __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - // Index is equal to negated out of object property index plus 1. - __ SmiToPtrArrayOffset(r0, index); - __ SubP(scratch, result, r0); - __ LoadP(result, - FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); - __ bind(deferred->exit()); - __ bind(&done); -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/s390/lithium-codegen-s390.h b/src/crankshaft/s390/lithium-codegen-s390.h deleted file mode 100644 index a8d59ff5b1..0000000000 --- a/src/crankshaft/s390/lithium-codegen-s390.h +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_ -#define V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_ - -#include "src/ast/scopes.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/crankshaft/s390/lithium-gap-resolver-s390.h" -#include "src/crankshaft/s390/lithium-s390.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class SafepointGenerator; - -class LCodeGen : public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - LinkRegisterStatus GetLinkRegisterState() const { - return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; - } - - // Support for converting LOperands to assembler types. - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LConstantOperand must be an Integer32 or Smi - void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - intptr_t ToRepresentation(LConstantOperand* op, - const Representation& r) const; - int32_t ToInteger32(LConstantOperand* op) const; - Smi* ToSmi(LConstantOperand* op) const; - double ToDouble(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Returns a MemOperand pointing to the high word of a DoubleStackSlot. - MemOperand ToHighMemOperand(LOperand* op) const; - - bool IsInteger32(LConstantOperand* op) const; - bool IsSmi(LConstantOperand* op) const; - Handle ToHandle(LConstantOperand* op) const; - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - // Deferred code support. - void DoDeferredNumberTagD(LNumberTagD* instr); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, - LOperand* temp1, LOperand* temp2, - IntegerSignedness signedness); - - void DoDeferredTaggedToI(LTaggedToI* instr); - void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocate(LAllocate* instr); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result, - Register object, Register index); - - // Parallel move support. - void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); - - MemOperand PrepareKeyedOperand(Register key, Register base, - bool key_is_constant, bool key_is_tagged, - int constant_key, int element_size_shift, - int base_offset, - bool keyMaybeNegative = true); - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - -// Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - Scope* scope() const { return scope_; } - - Register scratch0() { return kLithiumScratch; } - DoubleRegister double_scratch0() { return kScratchDoubleReg; } - - LInstruction* GetNextInstruction(); - - void EmitClassOfTest(Label* if_true, Label* if_false, - Handle class_name, Register input, - Register temporary, Register temporary2); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - void SaveCallerDoubles(); - void RestoreCallerDoubles(); - - // Code generation passes. Returns true if code generation should - // continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS - }; - - void CallCode(Handle code, RelocInfo::Mode mode, LInstruction* instr); - - void CallCodeGeneric(Handle code, RelocInfo::Mode mode, - LInstruction* instr, SafepointMode safepoint_mode); - - void CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, int num_arguments, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, num_arguments, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void LoadContextFromDeferred(LOperand* context); - void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, - LInstruction* instr, LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in r4. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode); - - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type, CRegister cr = cr7); - void DeoptimizeIf(Condition condition, LInstruction* instr, - DeoptimizeReason deopt_reason, CRegister cr = cr7); - - void AddToTranslation(LEnvironment* environment, Translation* translation, - LOperand* op, bool is_tagged, bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - Register ToRegister(int index) const; - DoubleRegister ToDoubleRegister(int index) const; - - MemOperand BuildSeqStringOperand(Register string, LOperand* index, - String::Encoding encoding); - - void EmitMathAbs(LMathAbs* instr); -#if V8_TARGET_ARCH_S390X - void EmitInteger32MathAbs(LMathAbs* instr); -#endif - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, - int arguments, Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, - Safepoint::DeoptMode mode); - - static Condition TokenToCondition(Token::Value op); - void EmitGoto(int block); - - // EmitBranch expects to be the last instruction of a block. - template - void EmitBranch(InstrType instr, Condition condition); - template - void EmitTrueBranch(InstrType instr, Condition condition); - template - void EmitFalseBranch(InstrType instr, Condition condition); - void EmitNumberUntagD(LNumberUntagD* instr, Register input, - DoubleRegister result, NumberUntagDMode mode); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input, - Handle type_name); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, Register temp1, Label* is_not_string, - SmiCheck check_needed); - - // Emits optimized code to deep-copy the contents of statically known - // object graphs (e.g. object literal boilerplate). - void EmitDeepCopy(Handle object, Register result, Register source, - int* offset, AllocationSiteMode mode); - - void EnsureSpaceForLazyDeopt(int space_needed) override; - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); - - template - void EmitVectorLoadICRegisters(T* instr); - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - class PushSafepointRegistersScope final BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen); - - ~PushSafepointRegistersScope(); - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - -class LDeferredCode : public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() {} - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; - int instruction_index_; -}; -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_ diff --git a/src/crankshaft/s390/lithium-gap-resolver-s390.cc b/src/crankshaft/s390/lithium-gap-resolver-s390.cc deleted file mode 100644 index cffcede226..0000000000 --- a/src/crankshaft/s390/lithium-gap-resolver-s390.cc +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2015 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/s390/lithium-gap-resolver-s390.h" - -#include "src/crankshaft/s390/lithium-codegen-s390.h" - -namespace v8 { -namespace internal { - -static const Register kSavedValueRegister = {1}; - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), - moves_(32, owner->zone()), - root_index_(0), - in_cycle_(false), - saved_destination_(NULL) {} - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(moves_.is_empty()); - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - root_index_ = i; // Any cycle is found when by reaching this move again. - PerformMove(i); - if (in_cycle_) { - RestoreValue(); - } - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated()) { - DCHECK(moves_[i].source()->IsConstantOperand()); - EmitMove(i); - } - } - - moves_.Rewind(0); -} - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); - } - Verify(); -} - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. - - // We can only find a cycle, when doing a depth-first traversal of moves, - // be encountering the starting move again. So by spilling the source of - // the starting move, we break the cycle. All moves are then unblocked, - // and the starting move is completed by writing the spilled value to - // its destination. All other moves from the spilled source have been - // completed prior to breaking the cycle. - // An additional complication is that moves to MemOperands with large - // offsets (more than 1K or 4K) require us to spill this spilled value to - // the stack, to free up the register. - DCHECK(!moves_[index].IsPending()); - DCHECK(!moves_[index].IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved in a stack allocated local. Multiple moves can - // be pending because this function is recursive. - DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. - LOperand* destination = moves_[index].destination(); - moves_[index].set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - PerformMove(i); - // If there is a blocking, pending move it must be moves_[root_index_] - // and all other moves with the same source as moves_[root_index_] are - // sucessfully executed (because they are cycle-free) by this loop. - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - moves_[index].set_destination(destination); - - // The move may be blocked on a pending move, which must be the starting move. - // In this case, we have a cycle, and we save the source of this move to - // a scratch register to break it. - LMoveOperands other_move = moves_[root_index_]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - BreakCycle(index); - return; - } - - // This move is no longer blocked. - EmitMove(index); -} - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - -#define __ ACCESS_MASM(cgen_->masm()) - -void LGapResolver::BreakCycle(int index) { - // We save in a register the value that should end up in the source of - // moves_[root_index]. After performing all moves in the tree rooted - // in that move, we save the value to that source. - DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); - DCHECK(!in_cycle_); - in_cycle_ = true; - LOperand* source = moves_[index].source(); - saved_destination_ = moves_[index].destination(); - if (source->IsRegister()) { - __ LoadRR(kSavedValueRegister, cgen_->ToRegister(source)); - } else if (source->IsStackSlot()) { - __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source)); - } else if (source->IsDoubleRegister()) { - __ ldr(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); - } else if (source->IsDoubleStackSlot()) { - __ LoadDouble(kScratchDoubleReg, cgen_->ToMemOperand(source)); - } else { - UNREACHABLE(); - } - // This move will be done by restoring the saved value to the destination. - moves_[index].Eliminate(); -} - -void LGapResolver::RestoreValue() { - DCHECK(in_cycle_); - DCHECK(saved_destination_ != NULL); - - // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister. - if (saved_destination_->IsRegister()) { - __ LoadRR(cgen_->ToRegister(saved_destination_), kSavedValueRegister); - } else if (saved_destination_->IsStackSlot()) { - __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); - } else if (saved_destination_->IsDoubleRegister()) { - __ ldr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); - } else if (saved_destination_->IsDoubleStackSlot()) { - __ StoreDouble(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); - } else { - UNREACHABLE(); - } - - in_cycle_ = false; - saved_destination_ = NULL; -} - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - - if (source->IsRegister()) { - Register source_register = cgen_->ToRegister(source); - if (destination->IsRegister()) { - __ LoadRR(cgen_->ToRegister(destination), source_register); - } else { - DCHECK(destination->IsStackSlot()); - __ StoreP(source_register, cgen_->ToMemOperand(destination)); - } - } else if (source->IsStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsRegister()) { - __ LoadP(cgen_->ToRegister(destination), source_operand); - } else { - DCHECK(destination->IsStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - __ LoadP(ip, source_operand); - __ StoreP(ip, destination_operand); - } else { - __ LoadP(kSavedValueRegister, source_operand); - __ StoreP(kSavedValueRegister, destination_operand); - } - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - if (cgen_->IsInteger32(constant_source)) { - cgen_->EmitLoadIntegerConstant(constant_source, dst); - } else { - __ Move(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - DoubleRegister result = cgen_->ToDoubleRegister(destination); - double v = cgen_->ToDouble(constant_source); - __ LoadDoubleLiteral(result, v, ip); - } else { - DCHECK(destination->IsStackSlot()); - DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. - if (cgen_->IsInteger32(constant_source)) { - cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister); - } else { - __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source)); - } - __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsDoubleRegister()) { - DoubleRegister source_register = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - __ ldr(cgen_->ToDoubleRegister(destination), source_register); - } else { - DCHECK(destination->IsDoubleStackSlot()); - __ StoreDouble(source_register, cgen_->ToMemOperand(destination)); - } - - } else if (source->IsDoubleStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); - if (destination->IsDoubleRegister()) { - __ LoadDouble(cgen_->ToDoubleRegister(destination), source_operand); - } else { - DCHECK(destination->IsDoubleStackSlot()); - MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { -// kSavedDoubleValueRegister was used to break the cycle, -// but kSavedValueRegister is free. -#if V8_TARGET_ARCH_S390X - __ lg(kSavedValueRegister, source_operand); - __ stg(kSavedValueRegister, destination_operand); -#else - MemOperand source_high_operand = cgen_->ToHighMemOperand(source); - MemOperand destination_high_operand = - cgen_->ToHighMemOperand(destination); - __ LoadlW(kSavedValueRegister, source_operand); - __ StoreW(kSavedValueRegister, destination_operand); - __ LoadlW(kSavedValueRegister, source_high_operand); - __ StoreW(kSavedValueRegister, destination_high_operand); -#endif - } else { - __ LoadDouble(kScratchDoubleReg, source_operand); - __ StoreDouble(kScratchDoubleReg, destination_operand); - } - } - } else { - UNREACHABLE(); - } - - moves_[index].Eliminate(); -} - -#undef __ -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/s390/lithium-gap-resolver-s390.h b/src/crankshaft/s390/lithium-gap-resolver-s390.h deleted file mode 100644 index 087224c861..0000000000 --- a/src/crankshaft/s390/lithium-gap-resolver-s390.h +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_ -#define V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class LGapResolver final BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // If a cycle is found in the series of moves, save the blocking value to - // a scratch register. The cycle must be found by hitting the root of the - // depth-first search. - void BreakCycle(int index); - - // After a cycle has been resolved, restore the value from the scratch - // register to its proper destination. - void RestoreValue(); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Verify the move list before performing moves. - void Verify(); - - LCodeGen* cgen_; - - // List of moves not yet resolved. - ZoneList moves_; - - int root_index_; - bool in_cycle_; - LOperand* saved_destination_; -}; -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_ diff --git a/src/crankshaft/s390/lithium-s390.cc b/src/crankshaft/s390/lithium-s390.cc deleted file mode 100644 index 198a98e702..0000000000 --- a/src/crankshaft/s390/lithium-s390.cc +++ /dev/null @@ -1,2140 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/s390/lithium-s390.h" - -#include - -#include "src/crankshaft/lithium-inl.h" -#include "src/crankshaft/s390/lithium-codegen-s390.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy()); - } -} -#endif - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: - return "add-d"; - case Token::SUB: - return "sub-d"; - case Token::MUL: - return "mul-d"; - case Token::DIV: - return "div-d"; - case Token::MOD: - return "mod-d"; - default: - UNREACHABLE(); - } -} - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: - return "add-t"; - case Token::SUB: - return "sub-t"; - case Token::MUL: - return "mul-t"; - case Token::MOD: - return "mod-t"; - case Token::DIV: - return "div-t"; - case Token::BIT_AND: - return "bit-and-t"; - case Token::BIT_OR: - return "bit-or-t"; - case Token::BIT_XOR: - return "bit-xor-t"; - case Token::ROR: - return "ror-t"; - case Token::SHL: - return "shl-t"; - case Token::SAR: - return "sar-t"; - case Token::SHR: - return "shr-t"; - default: - UNREACHABLE(); - } -} - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), true_block_id(), - false_block_id()); -} - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - stream->Add(" length "); - length()->PrintTo(stream); - stream->Add(" index "); - index()->PrintTo(stream); -} - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access() << " <- "; - stream->Add(os.str().c_str()); - value()->PrintTo(stream); -} - -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", base_offset()); - } else { - stream->Add("]"); - } -} - -void LStoreKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", base_offset()); - } else { - stream->Add("] <- "); - } - - if (value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - value()->PrintTo(stream); - } -} - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); -} - -int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { - // Skip a slot if for a double-width slot. - if (kind == DOUBLE_REGISTERS) current_frame_slots_++; - return current_frame_slots_++; -} - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - int index = GetNextSpillIndex(kind); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new (zone()) LPlatformChunk(info(), graph()); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - -LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) { - return Use(value, ToUnallocated(reg)); -} - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, - new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new (zone()) LUnallocated(LUnallocated::NONE)); -} - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new (zone()) LUnallocated(LUnallocated::NONE, - LUnallocated::USED_AT_START)); -} - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - -LOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value, new (zone()) LUnallocated(LUnallocated::ANY)); -} - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, int index) { - return Define(instr, - new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - -LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg) { - return Define(instr, ToUnallocated(reg)); -} - -LInstruction* LChunkBuilder::DefineFixedDouble( - LTemplateResultInstruction<1>* instr, DoubleRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new (zone()) LPointerMap(zone())); - return instr; -} - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - -LUnallocated* LChunkBuilder::TempDoubleRegister() { - LUnallocated* operand = - new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - -LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new (zone()) LLabel(instr->block()); -} - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value()))); -} - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new (zone()) LDeoptimize); -} - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); - } - } else { - right = UseRegisterAtStart(right_value); - } - - // Shift operations can only deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result = - DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; - } else { - return DoArithmeticT(op, instr); - } -} - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (op == Token::MOD) { - LOperand* left = UseFixedDouble(instr->left(), d1); - LOperand* right = UseFixedDouble(instr->right(), d2); - LArithmeticD* result = new (zone()) LArithmeticD(op, left, right); - // We call a C function for double modulo. It can't trigger a GC. We need - // to use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - return MarkAsCall(DefineFixedDouble(result, d1), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); - LArithmeticD* result = new (zone()) LArithmeticD(op, left, right); - return CpuFeatures::IsSupported(VECTOR_FACILITY) - ? DefineAsRegister(result) - : DefineSameAsFirst(result); - } -} - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - HValue* left = instr->left(); - HValue* right = instr->right(); - DCHECK(left->representation().IsTagged()); - DCHECK(right->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left_operand = UseFixed(left, r3); - LOperand* right_operand = UseFixed(right, r2); - LArithmeticT* result = - new (zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, r2), instr); -} - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - DCHECK(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new (zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new (zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new (zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - instr = new (zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new (zone()) LGoto(instr->FirstSuccessor()); -} - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - ToBooleanHints expected = instr->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || - type.IsJSArray() || type.IsHeapNumber() || type.IsString(); - LInstruction* branch = new (zone()) LBranch(UseRegister(value)); - if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) && - (expected & ToBooleanHint::kNeedsMap)) || - expected != ToBooleanHint::kAny)) { - branch = AssignEnvironment(branch); - } - return branch; -} - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new (zone()) LDebugBreak(); -} - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegister(instr->value()); - LOperand* temp = TempRegister(); - return new (zone()) LCmpMapAndBranch(value, temp); -} - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) { - info()->MarkAsRequiresFrame(); - LOperand* value = UseRegister(instr->value()); - return DefineAsRegister(new (zone()) LArgumentsLength(value)); -} - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new (zone()) LArgumentsElements); -} - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LHasInPrototypeChainAndBranch* result = - new (zone()) LHasInPrototypeChainAndBranch(object, prototype); - return AssignEnvironment(result); -} - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegisterAtStart(instr->receiver()); - LOperand* function = UseRegisterAtStart(instr->function()); - LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function); - return AssignEnvironment(DefineAsRegister(result)); -} - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), r3); - LOperand* receiver = UseFixed(instr->receiver(), r2); - LOperand* length = UseFixed(instr->length(), r4); - LOperand* elements = UseFixed(instr->elements(), r5); - LApplyArguments* result = - new (zone()) LApplyArguments(function, receiver, length, elements); - return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY); -} - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - for (int i = 0; i < argc; ++i) { - LOperand* argument = Use(instr->argument(i)); - AddInstruction(new (zone()) LPushArgument(argument), instr); - } - return NULL; -} - -LInstruction* LChunkBuilder::DoStoreCodeEntry( - HStoreCodeEntry* store_code_entry) { - LOperand* function = UseRegister(store_code_entry->function()); - LOperand* code_object = UseTempRegister(store_code_entry->code_object()); - return new (zone()) LStoreCodeEntry(function, code_object); -} - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister(new (zone()) - LInnerAllocatedObject(base_object, offset)); -} - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() ? NULL - : DefineAsRegister(new (zone()) LThisFunction); -} - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new (zone()) LContext, cp); - } - - return DefineAsRegister(new (zone()) LContext); -} - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new (zone()) LDeclareGlobals(context), instr); -} - -LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), cp); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - for (; i < descriptor.GetParameterCount(); i++) { - op = UseAny(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - AddInstruction(new (zone()) LPushArgument(op), instr); - } - - LCallWithDescriptor* result = - new (zone()) LCallWithDescriptor(descriptor, ops, zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, r2), instr); -} - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* function = UseFixed(instr->function(), r3); - LInvokeFunction* result = new (zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, r2), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathFloor: - return DoMathFloor(instr); - case kMathRound: - return DoMathRound(instr); - case kMathFround: - return DoMathFround(instr); - case kMathAbs: - return DoMathAbs(instr); - case kMathLog: - return DoMathLog(instr); - case kMathCos: - return DoMathCos(instr); - case kMathSin: - return DoMathSin(instr); - case kMathExp: - return DoMathExp(instr); - case kMathSqrt: - return DoMathSqrt(instr); - case kMathPowHalf: - return DoMathPowHalf(instr); - case kMathClz32: - return DoMathClz32(instr); - default: - UNREACHABLE(); - } -} - -LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFloor* result = new (zone()) LMathFloor(input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); -} - -LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LOperand* temp = TempDoubleRegister(); - LMathRound* result = new (zone()) LMathRound(input, temp); - return AssignEnvironment(DefineAsRegister(result)); -} - -LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { - Representation r = instr->value()->representation(); - LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) - ? NULL - : UseFixed(instr->context(), cp); - LOperand* input = UseRegister(instr->value()); - LInstruction* result = - DefineAsRegister(new (zone()) LMathAbs(context, input)); - if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; -} - -LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d0), instr); -} - -LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathClz32* result = new (zone()) LMathClz32(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr); -} - -LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr); -} - -LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), d0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr); -} - -LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseAtStart(instr->value()); - LMathSqrt* result = new (zone()) LMathSqrt(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathPowHalf* result = new (zone()) LMathPowHalf(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* constructor = UseFixed(instr->constructor(), r3); - LCallNewArray* result = new (zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, r2), instr); -} - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r2), instr); -} - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - return DefineAsRegister(new (zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = - DefineAsRegister(new (zone()) LDivI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow) && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) || - (!instr->IsMathFloorOfDiv() && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) - ? NULL - : TempRegister(); - LInstruction* result = DefineAsRegister( - new (zone()) LFlooringDivByConstI(dividend, divisor, temp)); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = - DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - (instr->CheckFlag(HValue::kCanOverflow) && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = - DefineAsRegister(new (zone()) LModByConstI(dividend, divisor)); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = - DefineAsRegister(new (zone()) LModI(dividend, divisor)); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoModByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoModByConstI(instr); - } else { - return DoModI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - HValue* left = instr->BetterLeftOperand(); - HValue* right = instr->BetterRightOperand(); - LOperand* left_op; - LOperand* right_op; - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); - - int32_t constant_value = 0; - if (right->IsConstant()) { - HConstant* constant = HConstant::cast(right); - constant_value = constant->Integer32Value(); - // Constants -1, 0 and 1 can be optimized if the result can overflow. - // For other constants, it can be optimized only without overflow. - if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) { - left_op = UseRegisterAtStart(left); - right_op = UseConstant(right); - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - } else { - if (bailout_on_minus_zero) { - left_op = UseRegister(left); - } else { - left_op = UseRegisterAtStart(left); - } - right_op = UseRegister(right); - } - LMulI* mul = new (zone()) LMulI(left_op, right_op); - if (right_op->IsConstantOperand() - ? ((can_overflow && constant_value == -1) || - (bailout_on_minus_zero && constant_value <= 0)) - : (can_overflow || bailout_on_minus_zero)) { - AssignEnvironment(mul); - } - return DefineAsRegister(mul); - - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LSubI* sub = new (zone()) LSubI(left, right); - LInstruction* result = DefineAsRegister(sub); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - -LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { - LOperand* multiplier_op = UseRegister(mul->left()); - LOperand* multiplicand_op = UseRegister(mul->right()); - LOperand* addend_op = UseRegister(addend); - return DefineAsRegister( - new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op)); -} - -LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) { - LOperand* minuend_op = UseRegister(minuend); - LOperand* multiplier_op = UseRegister(mul->left()); - LOperand* multiplicand_op = UseRegister(mul->right()); - - return DefineAsRegister( - new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op)); -} - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - LAddI* add = new (zone()) LAddI(left, right); - LInstruction* result = DefineAsRegister(add); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LAddI* add = new (zone()) LAddI(left, right); - LInstruction* result = DefineAsRegister(add); - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::ADD, instr); - } else { - return DoArithmeticT(Token::ADD, instr); - } -} - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - left = UseRegister(instr->left()); - right = UseRegister(instr->right()); - } - return DefineAsRegister(new (zone()) LMathMinMax(left, right)); -} - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - DCHECK(instr->representation().IsDouble()); - // We call a C function for double power. It can't trigger a GC. - // We need to use fixed result register for the call. - Representation exponent_type = instr->right()->representation(); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseFixedDouble(instr->left(), d1); - LOperand* right = exponent_type.IsDouble() - ? UseFixedDouble(instr->right(), d2) - : UseFixed(instr->right(), r4); - LPower* result = new (zone()) LPower(left, right); - return MarkAsCall(DefineFixedDouble(result, d3), instr, - CAN_DEOPTIMIZE_EAGERLY); -} - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r3); - LOperand* right = UseFixed(instr->right(), r2); - LCmpT* result = new (zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, r2), instr); -} - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return new (zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new (zone()) LCompareNumericAndBranch(left, right); - } -} - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return new (zone()) LCmpObjectEqAndBranch(left, right); -} - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) LCmpHoleAndBranch(value); -} - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new (zone()) LIsStringAndBranch(value, temp); -} - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new (zone()) LIsSmiAndBranch(Use(instr->value())); -} - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) LIsUndetectableAndBranch(value, TempRegister()); -} - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r3); - LOperand* right = UseFixed(instr->right(), r2); - LStringCompareAndBranch* result = - new (zone()) LStringCompareAndBranch(context, left, right); - return MarkAsCall(result, instr); -} - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) LHasInstanceTypeAndBranch(value); -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegister(instr->value()); - return new (zone()) LClassOfTestAndBranch(value, TempRegister()); -} - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index)); -} - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegisterAtStart(instr->index()) - : UseRegisterOrConstantAtStart(instr->index()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL; - return new (zone()) LSeqStringSetChar(context, string, index, value); -} - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseRegisterOrConstantAtStart(instr->length()) - : UseRegisterAtStart(instr->length()); - LInstruction* result = new (zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; } - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new (zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LInstruction* result = - DefineAsRegister(new (zone()) LNumberUntagD(value)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new (zone()) LDummyUse(value)); - } - return AssignEnvironment( - DefineSameAsFirst(new (zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new (zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempDoubleRegister(); - LInstruction* result = - DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LUnallocated* result_temp = TempRegister(); - LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2); - return AssignPointerMap(Define(result, result_temp)); - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - return AssignEnvironment( - DefineAsRegister(new (zone()) LDoubleToSmi(value))); - } else { - DCHECK(to.IsInteger32()); - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value)); - if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); - return result; - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - if (!instr->CheckFlag(HValue::kCanOverflow)) { - LOperand* value = UseRegisterAtStart(val); - return DefineAsRegister(new (zone()) LSmiTag(value)); - } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } else { - LOperand* value = UseRegisterAtStart(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val))); - } else { - return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val))); - } - } - } - UNREACHABLE(); -} - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseAtStart(instr->value()); - LInstruction* result = new (zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseAtStart(instr->value()); - return AssignEnvironment(new (zone()) LCheckSmi(value)); -} - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view); - return AssignEnvironment(result); -} - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new (zone()) LCheckInstanceType(value); - return AssignEnvironment(result); -} - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new (zone()) LCheckValue(value)); -} - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - LInstruction* result = - AssignEnvironment(new (zone()) LCheckMaps(value, temp)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - return DefineAsRegister(new (zone()) LClampDToUint8(reg)); - } else if (input_rep.IsInteger32()) { - return DefineAsRegister(new (zone()) LClampIToUint8(reg)); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - LClampTToUint8* result = - new (zone()) LClampTToUint8(reg, TempDoubleRegister()); - return AssignEnvironment(DefineAsRegister(result)); - } -} - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new (zone()) - LReturn(UseFixed(instr->value(), r2), context, parameter_count); -} - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new (zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new (zone()) LConstantI); - } else if (r.IsDouble()) { - return DefineAsRegister(new (zone()) LConstantD); - } else if (r.IsExternal()) { - return DefineAsRegister(new (zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new (zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new (zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context; - LOperand* value; - if (instr->NeedsWriteBarrier()) { - context = UseTempRegister(instr->context()); - value = UseTempRegister(instr->value()); - } else { - context = UseRegister(instr->context()); - value = UseRegister(instr->value()); - } - LInstruction* result = new (zone()) LStoreContextSlot(context, value); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - LOperand* obj = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new (zone()) LLoadNamedField(obj)); -} - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - return AssignEnvironment(DefineAsRegister( - new (zone()) LLoadFunctionPrototype(UseRegister(instr->function())))); -} - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new (zone()) LLoadRoot); -} - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK(instr->key()->representation().IsSmiOrInteger32()); - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LInstruction* result = NULL; - - if (!instr->is_fixed_typed_array()) { - LOperand* obj = NULL; - if (instr->representation().IsDouble()) { - obj = UseRegister(instr->elements()); - } else { - obj = UseRegisterAtStart(instr->elements()); - } - result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr)); - } else { - DCHECK((instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(elements_kind)) || - (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(elements_kind))); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - result = DefineAsRegister( - new (zone()) LLoadKeyed(backing_store, key, backing_store_owner)); - } - - bool needs_environment; - if (instr->is_fixed_typed_array()) { - // see LCodeGen::DoLoadKeyedExternalArray - needs_environment = elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32); - } else { - // see LCodeGen::DoLoadKeyedFixedDoubleArray and - // LCodeGen::DoLoadKeyedFixedArray - needs_environment = - instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub()); - } - - if (needs_environment) { - result = AssignEnvironment(result); - } - return result; -} - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - if (!instr->is_fixed_typed_array()) { - DCHECK(instr->elements()->representation().IsTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* object = NULL; - LOperand* key = NULL; - LOperand* val = NULL; - - if (instr->value()->representation().IsDouble()) { - object = UseRegisterAtStart(instr->elements()); - val = UseRegister(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } else { - if (needs_write_barrier) { - object = UseTempRegister(instr->elements()); - val = UseTempRegister(instr->value()); - key = UseTempRegister(instr->key()); - } else { - object = UseRegisterAtStart(instr->elements()); - val = UseRegisterAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } - } - - return new (zone()) LStoreKeyed(object, key, val, nullptr); - } - - DCHECK((instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); - DCHECK(instr->elements()->representation().IsExternal()); - LOperand* val = UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner); -} - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LOperand* new_map_reg = TempRegister(); - LTransitionElementsKind* result = - new (zone()) LTransitionElementsKind(object, NULL, new_map_reg); - return result; - } else { - LOperand* object = UseFixed(instr->object(), r2); - LOperand* context = UseFixed(instr->context(), cp); - LTransitionElementsKind* result = - new (zone()) LTransitionElementsKind(object, context, NULL); - return MarkAsCall(result, instr); - } -} - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LTrapAllocationMemento* result = - new (zone()) LTrapAllocationMemento(object, temp1, temp2); - return AssignEnvironment(result); -} - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = Use(instr->object()); - LOperand* elements = Use(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, r2); - return AssignPointerMap(AssignEnvironment(result)); -} - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool is_in_object = instr->access().IsInobject(); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = - instr->has_transition() && instr->NeedsWriteBarrierForMap(); - - LOperand* obj; - if (needs_write_barrier) { - obj = is_in_object ? UseRegister(instr->object()) - : UseTempRegister(instr->object()); - } else { - obj = needs_write_barrier_for_map ? UseRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - } - - LOperand* val; - if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - } else if (instr->field_representation().IsDouble()) { - val = UseRegisterAtStart(instr->value()); - } else { - val = UseRegister(instr->value()); - } - - // We need a temporary register for write barrier of the map field. - LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; - - return new (zone()) LStoreNamedField(obj, val, temp); -} - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* left = UseFixed(instr->left(), r3); - LOperand* right = UseFixed(instr->right(), r2); - return MarkAsCall( - DefineFixed(new (zone()) LStringAdd(context, left, right), r2), instr); -} - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseTempRegister(instr->string()); - LOperand* index = UseTempRegister(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new (zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new (zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = UseRegisterOrConstant(instr->size()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - if (instr->IsAllocationFolded()) { - LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2); - return AssignPointerMap(DefineAsRegister(result)); - } -} - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new (zone()) LOsrEntry); -} - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new (zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk()->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kTooManySpillSlotsNeededForOSR); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index); -} - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseRegisterOrConstantAtStart(instr->length()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index)); -} - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* value = UseFixed(instr->value(), r5); - LTypeof* result = new (zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, r2), instr); -} - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value())); -} - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), cp); - return MarkAsCall(new (zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new (zone()) LStackCheck(context))); - } -} - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new (zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = - current_block_->last_environment()->DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - - return pop; -} - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->enumerable(), r2); - LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY); -} - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment( - DefineAsRegister(new (zone()) LForInCacheArray(map))); -} - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegisterAtStart(instr->map()); - return AssignEnvironment(new (zone()) LCheckMapValue(value, map)); -} - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* index = UseTempRegister(instr->index()); - LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - -} // namespace internal -} // namespace v8 diff --git a/src/crankshaft/s390/lithium-s390.h b/src/crankshaft/s390/lithium-s390.h deleted file mode 100644 index f9710b1092..0000000000 --- a/src/crankshaft/s390/lithium-s390.h +++ /dev/null @@ -1,2248 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_S390_LITHIUM_S390_H_ -#define V8_CRANKSHAFT_S390_LITHIUM_S390_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckNonSmi) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathCos) \ - V(MathSin) \ - V(MathExp) \ - V(MathFloor) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(MultiplyAddD) \ - V(MultiplySubD) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArgument) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { return H##type::cast(hydrogen_value()); } - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) {} - - virtual ~LInstruction() {} - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { -// Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - -// Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall(); - } - - // Interface to the register allocator and iterators. - bool IsMarkedAsCall() const { return IsCall(); } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - -#ifdef DEBUG - void VerifyCall(); -#endif - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - - private: - // Iterator support. - friend class InputIterator; - - friend class TempIterator; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - class IsCallBits : public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int bit_field_; -}; - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return R != 0 && result() != NULL; } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - protected: - EmbeddedContainer results_; -}; - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const override { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new (zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) {} - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) {} - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - - private: - HBasicBlock* block_; -}; - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - LLazyBailout() : gap_instructions_size_(0) {} - - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") - - void set_gap_instructions_size(int gap_instructions_size) { - gap_instructions_size_ = gap_instructions_size; - } - int gap_instructions_size() { return gap_instructions_size_; } - - private: - int gap_instructions_size_; -}; - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - -template -class LControlInstruction : public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) {} - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } - - Label* false_label_; - Label* true_label_; -}; - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> { - public: - LWrapReceiver(LOperand* receiver, LOperand* function) { - inputs_[0] = receiver; - inputs_[1] = function; - } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } -}; - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } -}; - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - void PrintDataTo(StringStream* stream) override; -}; - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - -class LModByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LModByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - -class LModI final : public LTemplateInstruction<1, 2, 0> { - public: - LModI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - -class LDivByConstI final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByConstI(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - -class LDivI final : public LTemplateInstruction<1, 2, 0> { - public: - LDivI(LOperand* dividend, LOperand* divisor) { - inputs_[0] = dividend; - inputs_[1] = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 1> { - public: - LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor) { - inputs_[0] = dividend; - inputs_[1] = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - -class LMulI final : public LTemplateInstruction<1, 2, 0> { - public: - LMulI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - -// Instruction for computing multiplier * multiplicand + addend. -class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> { - public: - LMultiplyAddD(LOperand* addend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = addend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* addend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") -}; - -// Instruction for computing minuend - multiplier * multiplicand. -class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> { - public: - LMultiplySubD(LOperand* minuend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = minuend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* minuend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d") -}; - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { return hydrogen()->representation().IsDouble(); } - - void PrintDataTo(StringStream* stream) override; -}; - -class LMathFloor final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloor(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -class LMathRound final : public LTemplateInstruction<1, 1, 1> { - public: - LMathRound(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -class LMathFround final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFround(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - -class LMathAbs final : public LTemplateInstruction<1, 2, 0> { - public: - LMathAbs(LOperand* context, LOperand* value) { - inputs_[1] = context; - inputs_[0] = value; - } - - LOperand* context() { return inputs_[1]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -class LMathLog final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathLog(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - -class LMathClz32 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathClz32(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - -class LMathCos final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathCos(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - -class LMathSin final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSin(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - -class LMathExp final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathExp(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - -class LMathSqrt final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSqrt(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - -class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) -}; - -class LCmpHoleAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - - void PrintDataTo(StringStream* stream) override; -}; - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 1> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - Token::Value op() const { return hydrogen()->token(); } -}; - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) { - inputs_[0] = object; - inputs_[1] = prototype; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return hydrogen()->op(); } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) -}; - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - private: - Token::Value op_; - bool can_deopt_; -}; - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - -class LConstantD final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - double value() const { return hydrogen()->DoubleValue(); } - - uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); } -}; - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - -class LBranch final : public LControlInstruction<1, 0> { - public: - explicit LBranch(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpMapAndBranch final : public LControlInstruction<1, 1> { - public: - LCmpMapAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> { - public: - LSeqStringGetChar(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; - } - - LOperand* string() const { return inputs_[0]; } - LOperand* index() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> { - public: - LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index, - LOperand* value) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - } - - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - -class LMathMinMax final : public LTemplateInstruction<1, 2, 0> { - public: - LMathMinMax(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, LOperand* context, LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - Token::Value op() const { return op_; } - - Opcode opcode() const override { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - LOperand* value() { return inputs_[0]; } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - LOperand* parameter_count() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") -}; - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; } - - LOperand* function() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) -}; - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - -class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = backing_store_owner; - } - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* backing_store_owner() { return inputs_[2]; } - ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreContextSlot(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - -class LPushArgument final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LPushArgument(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) {} - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object) { - inputs_[0] = function; - inputs_[1] = code_object; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : descriptor_(descriptor), - inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - const CallInterfaceDescriptor descriptor() { return descriptor_; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - CallInterfaceDescriptor descriptor_; - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { inputs_[0] = context; } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - -class LNumberTagI final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") -}; - -class LNumberTagU final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - -class LNumberTagD final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - -class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToI(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 2> { - public: - LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - bool needs_check() const { return needs_check_; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - private: - bool needs_check_; -}; - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { - inputs_[0] = object; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; - - Representation representation() const { - return hydrogen()->field_representation(); - } -}; - -class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> { - public: - LStoreKeyed(LOperand* object, LOperand* key, LOperand* value, - LOperand* backing_store_owner) { - inputs_[0] = object; - inputs_[1] = key; - inputs_[2] = value; - inputs_[3] = backing_store_owner; - } - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - LOperand* backing_store_owner() { return inputs_[3]; } - ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) - - void PrintDataTo(StringStream* stream) override; - bool NeedsCanonicalization() { - if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() || - hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) { - return false; - } - return hydrogen()->NeedsCanonicalization(); - } - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> { - public: - LTransitionElementsKind(LOperand* object, LOperand* context, - LOperand* new_map_temp) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = new_map_temp; - } - - LOperand* context() { return inputs_[1]; } - LOperand* object() { return inputs_[0]; } - LOperand* new_map_temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() { return hydrogen()->from_kind(); } - ElementsKind to_kind() { return hydrogen()->to_kind(); } -}; - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> { - public: - LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) { - inputs_[0] = object; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento") -}; - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; } - - LOperand* view() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - -class LCheckMaps final : public LTemplateInstruction<0, 1, 1> { - public: - explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - -class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* unclamped, LOperand* temp) { - inputs_[0] = unclamped; - temps_[0] = temp; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") -}; - -class LAllocate final : public LTemplateInstruction<1, 2, 2> { - public: - LAllocate(LOperand* context, LOperand* size, LOperand* temp1, - LOperand* temp2) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 2> { - public: - LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { - inputs_[0] = size; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* size() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - -class LTypeofIsAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - LOsrEntry() {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { inputs_[0] = context; } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); } -}; - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> { - public: - LCheckMapValue(LOperand* value, LOperand* map) { - inputs_[0] = value; - inputs_[1] = map; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {} - - int GetNextSpillIndex(RegisterKind kind); - LOperand* GetNextSpillSlot(RegisterKind kind); -}; - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - -// Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); - LInstruction* DoMultiplySub(HValue* minuend, HMul* mul); - - static bool HasMagicNumberForDivisor(int32_t divisor); - - LInstruction* DoMathFloor(HUnaryMathOperation* instr); - LInstruction* DoMathRound(HUnaryMathOperation* instr); - LInstruction* DoMathFround(HUnaryMathOperation* instr); - LInstruction* DoMathAbs(HUnaryMathOperation* instr); - LInstruction* DoMathLog(HUnaryMathOperation* instr); - LInstruction* DoMathCos(HUnaryMathOperation* instr); - LInstruction* DoMathSin(HUnaryMathOperation* instr); - LInstruction* DoMathExp(HUnaryMathOperation* instr); - LInstruction* DoMathSqrt(HUnaryMathOperation* instr); - LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); - LInstruction* DoMathClz32(HUnaryMathOperation* instr); - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HDiv* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(DoubleRegister reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, - DoubleRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register that may be trashed. - MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - - // An input operand in a register or stack slot. - MUST_USE_RESULT LOperand* Use(HValue* value); - MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - - // An input operand in a register, stack slot or a constant operand. - MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // An input operand in a constant operand. - MUST_USE_RESULT LOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - MUST_USE_RESULT LOperand* UseAny(HValue* value) override; - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - MUST_USE_RESULT LUnallocated* TempDoubleRegister(); - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg); - LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, - DoubleRegister reg); - LInstruction* AssignEnvironment(LInstruction* instr); - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - // By default we assume that instruction sequences generated for calls - // cannot deoptimize eagerly and we do not attach environment to this - // instruction. - LInstruction* MarkAsCall( - LInstruction* instr, HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_S390_LITHIUM_S390_H_ diff --git a/src/crankshaft/unique.h b/src/crankshaft/unique.h deleted file mode 100644 index 4c6a0976f8..0000000000 --- a/src/crankshaft/unique.h +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_UNIQUE_H_ -#define V8_CRANKSHAFT_UNIQUE_H_ - -#include // NOLINT(readability/streams) - -#include "src/assert-scope.h" -#include "src/base/functional.h" -#include "src/handles.h" -#include "src/utils.h" -#include "src/zone/zone.h" - -namespace v8 { -namespace internal { - - -template -class UniqueSet; - - -// Represents a handle to an object on the heap, but with the additional -// ability of checking for equality and hashing without accessing the heap. -// -// Creating a Unique requires first dereferencing the handle to obtain -// the address of the object, which is used as the hashcode and the basis for -// comparison. The object can be moved later by the GC, but comparison -// and hashing use the old address of the object, without dereferencing it. -// -// Careful! Comparison of two Uniques is only correct if both were created -// in the same "era" of GC or if at least one is a non-movable object. -template -class Unique final { - public: - Unique() : raw_address_(NULL) {} - - // TODO(titzer): make private and introduce a uniqueness scope. - explicit Unique(Handle handle) { - if (handle.is_null()) { - raw_address_ = NULL; - } else { - // This is a best-effort check to prevent comparing Unique's created - // in different GC eras; we require heap allocation to be disallowed at - // creation time. - // NOTE: we currently consider maps to be non-movable, so no special - // assurance is required for creating a Unique. - // TODO(titzer): other immortable immovable objects are also fine. - DCHECK(!AllowHeapAllocation::IsAllowed() || handle->IsMap()); - raw_address_ = reinterpret_cast
(*handle); - DCHECK_NOT_NULL(raw_address_); // Non-null should imply non-zero address. - } - handle_ = handle; - } - - // Constructor for handling automatic up casting. - // Eg. Unique can be passed when Unique is expected. - template Unique(Unique uniq) { -#ifdef DEBUG - T* a = NULL; - S* b = NULL; - a = b; // Fake assignment to enforce type checks. - USE(a); -#endif - raw_address_ = uniq.raw_address_; - handle_ = uniq.handle_; - } - - template - inline bool operator==(const Unique& other) const { - DCHECK(IsInitialized() && other.IsInitialized()); - return raw_address_ == other.raw_address_; - } - - template - inline bool operator!=(const Unique& other) const { - DCHECK(IsInitialized() && other.IsInitialized()); - return raw_address_ != other.raw_address_; - } - - friend inline size_t hash_value(Unique const& unique) { - DCHECK(unique.IsInitialized()); - return base::hash()(unique.raw_address_); - } - - inline intptr_t Hashcode() const { - DCHECK(IsInitialized()); - return reinterpret_cast(raw_address_); - } - - inline bool IsNull() const { - DCHECK(IsInitialized()); - return raw_address_ == NULL; - } - - inline bool IsKnownGlobal(void* global) const { - DCHECK(IsInitialized()); - return raw_address_ == reinterpret_cast
(global); - } - - inline Handle handle() const { - return handle_; - } - - template static Unique cast(Unique that) { - // Allow fetching location() to unsafe-cast the handle. This is necessary - // since we can't concurrently safe-cast. Safe-casting requires looking at - // the heap which may be moving concurrently to the compiler thread. - AllowHandleDereference allow_deref; - return Unique(that.raw_address_, - Handle(reinterpret_cast(that.handle_.location()))); - } - - inline bool IsInitialized() const { - return raw_address_ != NULL || handle_.is_null(); - } - - // TODO(titzer): this is a hack to migrate to Unique incrementally. - static Unique CreateUninitialized(Handle handle) { - return Unique(NULL, handle); - } - - static Unique CreateImmovable(Handle handle) { - return Unique(reinterpret_cast
(*handle), handle); - } - - private: - Unique(Address raw_address, Handle handle) - : raw_address_(raw_address), handle_(handle) {} - - Address raw_address_; - Handle handle_; - - friend class UniqueSet; // Uses internal details for speed. - template - friend class Unique; // For comparing raw_address values. -}; - -template -inline std::ostream& operator<<(std::ostream& os, Unique uniq) { - return os << Brief(*uniq.handle()); -} - - -template -class UniqueSet final : public ZoneObject { - public: - // Constructor. A new set will be empty. - UniqueSet() : size_(0), capacity_(0), array_(NULL) { } - - // Capacity constructor. A new set will be empty. - UniqueSet(int capacity, Zone* zone) - : size_(0), capacity_(capacity), - array_(zone->NewArray >(capacity)) { - DCHECK(capacity <= kMaxCapacity); - } - - // Singleton constructor. - UniqueSet(Unique uniq, Zone* zone) - : size_(1), capacity_(1), array_(zone->NewArray >(1)) { - array_[0] = uniq; - } - - // Add a new element to this unique set. Mutates this set. O(|this|). - void Add(Unique uniq, Zone* zone) { - DCHECK(uniq.IsInitialized()); - // Keep the set sorted by the {raw_address} of the unique elements. - for (int i = 0; i < size_; i++) { - if (array_[i] == uniq) return; - if (array_[i].raw_address_ > uniq.raw_address_) { - // Insert in the middle. - Grow(size_ + 1, zone); - for (int j = size_ - 1; j >= i; j--) array_[j + 1] = array_[j]; - array_[i] = uniq; - size_++; - return; - } - } - // Append the element to the the end. - Grow(size_ + 1, zone); - array_[size_++] = uniq; - } - - // Remove an element from this set. Mutates this set. O(|this|) - void Remove(Unique uniq) { - for (int i = 0; i < size_; i++) { - if (array_[i] == uniq) { - while (++i < size_) array_[i - 1] = array_[i]; - size_--; - return; - } - } - } - - // Compare this set against another set. O(|this|). - bool Equals(const UniqueSet* that) const { - if (that->size_ != this->size_) return false; - for (int i = 0; i < this->size_; i++) { - if (this->array_[i] != that->array_[i]) return false; - } - return true; - } - - // Check whether this set contains the given element. O(|this|) - // TODO(titzer): use binary search for large sets to make this O(log|this|) - template - bool Contains(const Unique elem) const { - for (int i = 0; i < this->size_; ++i) { - Unique cand = this->array_[i]; - if (cand.raw_address_ >= elem.raw_address_) { - return cand.raw_address_ == elem.raw_address_; - } - } - return false; - } - - // Check if this set is a subset of the given set. O(|this| + |that|). - bool IsSubset(const UniqueSet* that) const { - if (that->size_ < this->size_) return false; - int j = 0; - for (int i = 0; i < this->size_; i++) { - Unique sought = this->array_[i]; - while (true) { - if (sought == that->array_[j++]) break; - // Fail whenever there are more elements in {this} than {that}. - if ((this->size_ - i) > (that->size_ - j)) return false; - } - } - return true; - } - - // Returns a new set representing the intersection of this set and the other. - // O(|this| + |that|). - UniqueSet* Intersect(const UniqueSet* that, Zone* zone) const { - if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet(); - - UniqueSet* out = new(zone) UniqueSet( - Min(this->size_, that->size_), zone); - - int i = 0, j = 0, k = 0; - while (i < this->size_ && j < that->size_) { - Unique a = this->array_[i]; - Unique b = that->array_[j]; - if (a == b) { - out->array_[k++] = a; - i++; - j++; - } else if (a.raw_address_ < b.raw_address_) { - i++; - } else { - j++; - } - } - - out->size_ = k; - return out; - } - - // Returns a new set representing the union of this set and the other. - // O(|this| + |that|). - UniqueSet* Union(const UniqueSet* that, Zone* zone) const { - if (that->size_ == 0) return this->Copy(zone); - if (this->size_ == 0) return that->Copy(zone); - - UniqueSet* out = new(zone) UniqueSet( - this->size_ + that->size_, zone); - - int i = 0, j = 0, k = 0; - while (i < this->size_ && j < that->size_) { - Unique a = this->array_[i]; - Unique b = that->array_[j]; - if (a == b) { - out->array_[k++] = a; - i++; - j++; - } else if (a.raw_address_ < b.raw_address_) { - out->array_[k++] = a; - i++; - } else { - out->array_[k++] = b; - j++; - } - } - - while (i < this->size_) out->array_[k++] = this->array_[i++]; - while (j < that->size_) out->array_[k++] = that->array_[j++]; - - out->size_ = k; - return out; - } - - // Returns a new set representing all elements from this set which are not in - // that set. O(|this| * |that|). - UniqueSet* Subtract(const UniqueSet* that, Zone* zone) const { - if (that->size_ == 0) return this->Copy(zone); - - UniqueSet* out = new(zone) UniqueSet(this->size_, zone); - - int i = 0, j = 0; - while (i < this->size_) { - Unique cand = this->array_[i]; - if (!that->Contains(cand)) { - out->array_[j++] = cand; - } - i++; - } - - out->size_ = j; - return out; - } - - // Makes an exact copy of this set. O(|this|). - UniqueSet* Copy(Zone* zone) const { - UniqueSet* copy = new(zone) UniqueSet(this->size_, zone); - copy->size_ = this->size_; - memcpy(copy->array_, this->array_, this->size_ * sizeof(Unique)); - return copy; - } - - void Clear() { - size_ = 0; - } - - inline int size() const { - return size_; - } - - inline Unique at(int index) const { - DCHECK(index >= 0 && index < size_); - return array_[index]; - } - - private: - // These sets should be small, since operations are implemented with simple - // linear algorithms. Enforce a maximum size. - static const int kMaxCapacity = 65535; - - uint16_t size_; - uint16_t capacity_; - Unique* array_; - - // Grow the size of internal storage to be at least {size} elements. - void Grow(int size, Zone* zone) { - CHECK(size < kMaxCapacity); // Enforce maximum size. - if (capacity_ < size) { - int new_capacity = 2 * capacity_ + size; - if (new_capacity > kMaxCapacity) new_capacity = kMaxCapacity; - Unique* new_array = zone->NewArray >(new_capacity); - if (size_ > 0) { - memcpy(new_array, array_, size_ * sizeof(Unique)); - } - capacity_ = new_capacity; - array_ = new_array; - } - } -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_UNIQUE_H_ diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc deleted file mode 100644 index 772526d374..0000000000 --- a/src/crankshaft/x64/lithium-codegen-x64.cc +++ /dev/null @@ -1,5386 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_X64 - -#include "src/crankshaft/x64/lithium-codegen-x64.h" - -#include "src/base/bits.h" -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -// When invoking builtins, we need to record the safepoint in the middle of -// the invoke instruction sequence generated by the macro assembler. -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, - LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), - pointers_(pointers), - deopt_mode_(mode) { } - virtual ~SafepointGenerator() {} - - void BeforeCall(int call_size) const override {} - - void AfterCall() const override { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::MANUAL); - - return GeneratePrologue() && - GenerateBody() && - GenerateDeferredCode() && - GenerateJumpTable() && - GenerateSafepointTable(); -} - - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); -} - - -#ifdef _MSC_VER -void LCodeGen::MakeSureStackPagesMapped(int offset) { - const int kPageSize = 4 * KB; - for (offset -= kPageSize; offset > 0; offset -= kPageSize) { - __ movp(Operand(rsp, offset), rax); - } -} -#endif - - -void LCodeGen::SaveCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ Movsd(MemOperand(rsp, count * kDoubleSize), - XMMRegister::from_code(save_iterator.Current())); - save_iterator.Advance(); - count++; - } -} - - -void LCodeGen::RestoreCallerDoubles() { - DCHECK(info()->saves_caller_doubles()); - DCHECK(NeedsEagerFrame()); - Comment(";;; Restore clobbered callee double registers"); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ Movsd(XMMRegister::from_code(save_iterator.Current()), - MemOperand(rsp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } -} - - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - } - - info()->set_prologue_offset(masm_->pc_offset()); - if (NeedsEagerFrame()) { - DCHECK(!frame_is_built_); - frame_is_built_ = true; - if (info()->IsStub()) { - __ StubPrologue(StackFrame::STUB); - } else { - __ Prologue(info()->GeneratePreagedPrologue()); - } - } - - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - if (slots > 0) { - if (FLAG_debug_code) { - __ subp(rsp, Immediate(slots * kPointerSize)); -#ifdef _MSC_VER - MakeSureStackPagesMapped(slots * kPointerSize); -#endif - __ Push(rax); - __ Set(rax, slots); - __ Set(kScratchRegister, kSlotsZapValue); - Label loop; - __ bind(&loop); - __ movp(MemOperand(rsp, rax, times_pointer_size, 0), - kScratchRegister); - __ decl(rax); - __ j(not_zero, &loop); - __ Pop(rax); - } else { - __ subp(rsp, Immediate(slots * kPointerSize)); -#ifdef _MSC_VER - MakeSureStackPagesMapped(slots * kPointerSize); -#endif - } - - if (info()->saves_caller_doubles()) { - SaveCallerDoubles(); - } - } - return !is_aborted(); -} - - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Possibly allocate a local context. - if (info_->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is still in rdi. - int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ Push(rdi); - __ Push(info()->scope()->scope_info()); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of FastNewFunctionContextStub is always in new space. - need_write_barrier = false; - } else { - __ Push(rdi); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - - // Context is returned in rax. It replaces the context passed to us. - // It's saved in the stack and kept live in rsi. - __ movp(rsi, rax); - __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax); - - // Copy any necessary parameters into the context. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ movp(rax, Operand(rbp, parameter_offset)); - // Store it in the context. - int context_offset = Context::SlotOffset(var->index()); - __ movp(Operand(rsi, context_offset), rax); - // Update the write barrier. This clobbers rax and rbx. - if (need_write_barrier) { - __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } -} - - -void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { - if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() && - instr->hydrogen_value()->representation().IsInteger32() && - instr->result()->IsRegister()) { - __ AssertZeroExtended(ToRegister(instr->result())); - } - - if (instr->HasResult() && instr->MustSignExtendResult(chunk())) { - // We sign extend the dehoisted key at the definition point when the pointer - // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use - // points and MustSignExtendResult is always false. We can't use - // STATIC_ASSERT here as the pointer size is 32-bit for x32. - DCHECK(kPointerSize == kInt64Size); - if (instr->result()->IsRegister()) { - Register result_reg = ToRegister(instr->result()); - __ movsxlq(result_reg, result_reg); - } else { - // Sign extend the 32bit result in the stack slots. - DCHECK(instr->result()->IsStackSlot()); - Operand src = ToOperand(instr->result()); - __ movsxlq(kScratchRegister, src); - __ movq(src, kScratchRegister); - } - } -} - - -bool LCodeGen::GenerateJumpTable() { - if (jump_table_.length() == 0) return !is_aborted(); - - Label needs_frame; - Comment(";;; -------------------- Jump table --------------------"); - for (int i = 0; i < jump_table_.length(); i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); - __ call(&needs_frame); - } else { - if (info()->saves_caller_doubles()) { - DCHECK(info()->IsStub()); - RestoreCallerDoubles(); - } - __ call(entry, RelocInfo::RUNTIME_ENTRY); - } - } - - if (needs_frame.is_linked()) { - __ bind(&needs_frame); - /* stack layout - 3: return address <-- rsp - 2: garbage - 1: garbage - 0: garbage - */ - // Reserve space for stub marker. - __ subp(rsp, Immediate(TypedFrameConstants::kFrameTypeSize)); - __ Push(MemOperand( - rsp, TypedFrameConstants::kFrameTypeSize)); // Copy return address. - __ Push(kScratchRegister); - - /* stack layout - 3: return address - 2: garbage - 1: return address - 0: entry address <-- rsp - */ - - // Create a stack frame. - __ movp(MemOperand(rsp, 3 * kPointerSize), rbp); - __ leap(rbp, MemOperand(rsp, 3 * kPointerSize)); - - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - DCHECK(info()->IsStub()); - __ movp(MemOperand(rsp, 2 * kPointerSize), - Immediate(StackFrame::TypeToMarker(StackFrame::STUB))); - - /* stack layout - 3: old rbp - 2: stub marker - 1: return address - 0: entry address <-- rsp - */ - __ ret(0); - } - - return !is_aborted(); -} - - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment(";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), - code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - __ bind(code->entry()); - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - // Build the frame in such a way that esi isn't trashed. - __ pushq(rbp); // Caller's frame pointer. - __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB))); - __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp)); - Comment(";;; Deferred code"); - } - code->Generate(); - if (NeedsDeferredFrame()) { - __ bind(code->done()); - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - frame_is_built_ = false; - __ movp(rsp, rbp); - __ popq(rbp); - } - __ jmp(code->exit()); - } - } - - // Deferred code is the last part of the instruction sequence. Mark - // the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - - -Register LCodeGen::ToRegister(int index) const { - return Register::from_code(index); -} - - -XMMRegister LCodeGen::ToDoubleRegister(int index) const { - return XMMRegister::from_code(index); -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - DCHECK(op->IsRegister()); - return ToRegister(op->index()); -} - - -XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - DCHECK(op->IsDoubleRegister()); - return ToDoubleRegister(op->index()); -} - - -bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - - -bool LCodeGen::IsExternalConstant(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsExternal(); -} - - -bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const { - return op->IsConstantOperand() && - chunk_->IsDehoistedKey(chunk_->LookupConstant(op)); -} - - -bool LCodeGen::IsSmiConstant(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - return ToRepresentation(op, Representation::Integer32()); -} - - -int32_t LCodeGen::ToRepresentation(LConstantOperand* op, - const Representation& r) const { - HConstant* constant = chunk_->LookupConstant(op); - int32_t value = constant->Integer32Value(); - if (r.IsInteger32()) return value; - DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged()); - return static_cast(reinterpret_cast(Smi::FromInt(value))); -} - - -Smi* LCodeGen::ToSmi(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - return Smi::FromInt(constant->Integer32Value()); -} - - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - - -ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasExternalReferenceValue()); - return constant->ExternalReferenceValue(); -} - - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - - -static int ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize + kPCOnStackSize; -} - - -Operand LCodeGen::ToOperand(LOperand* op) const { - // Does not handle registers. In X64 assembler, plain registers are not - // representable as an Operand. - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return Operand(rbp, FrameSlotToFPOffset(op->index())); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index())); - } -} - - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation( - environment, translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); - } -} - - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, - dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - XMMRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode, - int argc) { - DCHECK(instr != NULL); - __ call(code, mode); - RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc); - - // Signal that we don't inline smi code before these stubs in the - // optimizing code generator. - if (code->kind() == Code::COMPARE_IC) { - __ nop(); - } -} - - -void LCodeGen::CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0); -} - - -void LCodeGen::CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - DCHECK(instr->HasPointerMap()); - - __ CallRuntime(function, num_arguments, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); -} - - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - if (!ToRegister(context).is(rsi)) { - __ movp(rsi, ToRegister(context)); - } - } else if (context->IsStackSlot()) { - __ movp(rsi, ToOperand(context)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ Move(rsi, Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - - - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context) { - LoadContextFromDeferred(context); - - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters( - instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, - translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, environment->zone()); - } -} - -void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - return; - } - - if (DeoptEveryNTimes()) { - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - Label no_deopt; - __ pushfq(); - __ pushq(rax); - Operand count_operand = masm()->ExternalOperand(count, kScratchRegister); - __ movl(rax, count_operand); - __ subl(rax, Immediate(1)); - __ j(not_zero, &no_deopt, Label::kNear); - if (FLAG_trap_on_deopt) __ int3(); - __ movl(rax, Immediate(FLAG_deopt_every_n_times)); - __ movl(count_operand, rax); - __ popq(rax); - __ popfq(); - DCHECK(frame_is_built_); - __ call(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&no_deopt); - __ movl(count_operand, rax); - __ popq(rax); - __ popfq(); - } - - if (info()->ShouldTrapOnDeopt()) { - Label done; - if (cc != no_condition) { - __ j(NegateCondition(cc), &done, Label::kNear); - } - __ int3(); - __ bind(&done); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - // Go through jump table if we need to handle condition, build frame, or - // restore caller doubles. - if (cc == no_condition && frame_is_built_ && - !info()->saves_caller_doubles()) { - DeoptComment(deopt_info); - __ call(entry, RelocInfo::RUNTIME_ENTRY); - } else { - Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, - !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - if (cc == no_condition) { - __ jmp(&jump_table_.last().label); - } else { - __ j(cc, &jump_table_.last().label); - } - } -} - -void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason) { - Deoptimizer::BailoutType bailout_type = info()->IsStub() - ? Deoptimizer::LAZY - : Deoptimizer::EAGER; - DeoptimizeIf(cc, instr, deopt_reason, bailout_type); -} - - -void LCodeGen::RecordSafepointWithLazyDeopt( - LInstruction* instr, SafepointMode safepoint_mode, int argc) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS); - RecordSafepointWithRegisters( - instr->pointer_map(), argc, Safepoint::kLazyDeopt); - } -} - - -void LCodeGen::RecordSafepoint( - LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode deopt_mode) { - DCHECK(kind == expected_safepoint_kind_); - - const ZoneList* operands = pointers->GetNormalizedOperands(); - - Safepoint safepoint = safepoints_.DefineSafepoint(masm(), - kind, arguments, deopt_mode); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); -} - - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, deopt_mode); -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); -} - - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, - label->hydrogen_value()->id(), - label->block_id(), - LabelType(label)); - __ bind(label->label()); - current_block_ = label->block_id(); - DoGap(label); -} - - -void LCodeGen::DoParallelMove(LParallelMove* move) { - resolver_.Resolve(move); -} - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; - i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } -} - - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { - DoGap(instr); -} - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - Label dividend_is_not_negative, done; - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ testl(dividend, dividend); - __ j(not_sign, ÷nd_is_not_negative, Label::kNear); - // Note that this is correct even for kMinInt operands. - __ negl(dividend); - __ andl(dividend, Immediate(mask)); - __ negl(dividend); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - __ jmp(&done, Label::kNear); - } - - __ bind(÷nd_is_not_negative); - __ andl(dividend, Immediate(mask)); - __ bind(&done); -} - - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(rax)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - __ TruncatingDiv(dividend, Abs(divisor)); - __ imull(rdx, rdx, Immediate(Abs(divisor))); - __ movl(rax, dividend); - __ subl(rax, rdx); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ j(not_zero, &remainder_not_zero, Label::kNear); - __ cmpl(dividend, Immediate(0)); - DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero); - __ bind(&remainder_not_zero); - } -} - - -void LCodeGen::DoModI(LModI* instr) { - HMod* hmod = instr->hydrogen(); - - Register left_reg = ToRegister(instr->left()); - DCHECK(left_reg.is(rax)); - Register right_reg = ToRegister(instr->right()); - DCHECK(!right_reg.is(rax)); - DCHECK(!right_reg.is(rdx)); - Register result_reg = ToRegister(instr->result()); - DCHECK(result_reg.is(rdx)); - - Label done; - // Check for x % 0, idiv would signal a divide error. We have to - // deopt in this case because we can't return a NaN. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - __ testl(right_reg, right_reg); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for kMinInt % -1, idiv would signal a divide error. We - // have to deopt if we care about -0, because we can't return that. - if (hmod->CheckFlag(HValue::kCanOverflow)) { - Label no_overflow_possible; - __ cmpl(left_reg, Immediate(kMinInt)); - __ j(not_zero, &no_overflow_possible, Label::kNear); - __ cmpl(right_reg, Immediate(-1)); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero); - } else { - __ j(not_equal, &no_overflow_possible, Label::kNear); - __ Set(result_reg, 0); - __ jmp(&done, Label::kNear); - } - __ bind(&no_overflow_possible); - } - - // Sign extend dividend in eax into edx:eax, since we are using only the low - // 32 bits of the values. - __ cdq(); - - // If we care about -0, test if the dividend is <0 and the result is 0. - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label positive_left; - __ testl(left_reg, left_reg); - __ j(not_sign, &positive_left, Label::kNear); - __ idivl(right_reg); - __ testl(result_reg, result_reg); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - __ jmp(&done, Label::kNear); - __ bind(&positive_left); - } - __ idivl(right_reg); - __ bind(&done); -} - - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - if (divisor == 1) return; - int32_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 1) { - __ sarl(dividend, Immediate(shift)); - return; - } - - // If the divisor is negative, we have to negate and handle edge cases. - __ negl(dividend); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - // Dividing by -1 is basically negation, unless we overflow. - if (divisor == -1) { - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - return; - } - - // If the negation could not overflow, simply shifting is OK. - if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - __ sarl(dividend, Immediate(shift)); - return; - } - - Label not_kmin_int, done; - __ j(no_overflow, ¬_kmin_int, Label::kNear); - __ movl(dividend, Immediate(kMinInt / divisor)); - __ jmp(&done, Label::kNear); - __ bind(¬_kmin_int); - __ sarl(dividend, Immediate(shift)); - __ bind(&done); -} - - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(rdx)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ testl(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ negl(rdx); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister(instr->temp3()); - DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx)); - Label needs_adjustment, done; - __ cmpl(dividend, Immediate(0)); - __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ negl(rdx); - __ jmp(&done, Label::kNear); - __ bind(&needs_adjustment); - __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1)); - __ TruncatingDiv(temp, Abs(divisor)); - if (divisor < 0) __ negl(rdx); - __ decl(rdx); - __ bind(&done); -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - Register remainder = ToRegister(instr->temp()); - Register result = ToRegister(instr->result()); - DCHECK(dividend.is(rax)); - DCHECK(remainder.is(rdx)); - DCHECK(result.is(rax)); - DCHECK(!divisor.is(rax)); - DCHECK(!divisor.is(rdx)); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ testl(divisor, divisor); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ testl(dividend, dividend); - __ j(not_zero, ÷nd_not_zero, Label::kNear); - __ testl(divisor, divisor); - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - Label dividend_not_min_int; - __ cmpl(dividend, Immediate(kMinInt)); - __ j(not_zero, ÷nd_not_min_int, Label::kNear); - __ cmpl(divisor, Immediate(-1)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - __ bind(÷nd_not_min_int); - } - - // Sign extend to rdx (= remainder). - __ cdq(); - __ idivl(divisor); - - Label done; - __ testl(remainder, remainder); - __ j(zero, &done, Label::kNear); - __ xorl(remainder, divisor); - __ sarl(remainder, Immediate(31)); - __ addl(result, remainder); - __ bind(&done); -} - - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ testl(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - __ cmpl(dividend, Immediate(kMinInt)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - } - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1) { - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - __ testl(dividend, Immediate(mask)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision); - } - __ Move(result, dividend); - int32_t shift = WhichPowerOf2Abs(divisor); - if (shift > 0) { - // The arithmetic shift is always OK, the 'if' is an optimization only. - if (shift > 1) __ sarl(result, Immediate(31)); - __ shrl(result, Immediate(32 - shift)); - __ addl(result, dividend); - __ sarl(result, Immediate(shift)); - } - if (divisor < 0) __ negl(result); -} - - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(rdx)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ testl(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ negl(rdx); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - __ movl(rax, rdx); - __ imull(rax, rax, Immediate(divisor)); - __ subl(rax, dividend); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision); - } -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - Register remainder = ToRegister(instr->temp()); - DCHECK(dividend.is(rax)); - DCHECK(remainder.is(rdx)); - DCHECK(ToRegister(instr->result()).is(rax)); - DCHECK(!divisor.is(rax)); - DCHECK(!divisor.is(rdx)); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ testl(divisor, divisor); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ testl(dividend, dividend); - __ j(not_zero, ÷nd_not_zero, Label::kNear); - __ testl(divisor, divisor); - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - Label dividend_not_min_int; - __ cmpl(dividend, Immediate(kMinInt)); - __ j(not_zero, ÷nd_not_min_int, Label::kNear); - __ cmpl(divisor, Immediate(-1)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - __ bind(÷nd_not_min_int); - } - - // Sign extend to rdx (= remainder). - __ cdq(); - __ idivl(divisor); - - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - // Deoptimize if remainder is not 0. - __ testl(remainder, remainder); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision); - } -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register left = ToRegister(instr->left()); - LOperand* right = instr->right(); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - if (instr->hydrogen_value()->representation().IsSmi()) { - __ movp(kScratchRegister, left); - } else { - __ movl(kScratchRegister, left); - } - } - - bool can_overflow = - instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - if (right->IsConstantOperand()) { - int32_t right_value = ToInteger32(LConstantOperand::cast(right)); - if (right_value == -1) { - __ negl(left); - } else if (right_value == 0) { - __ xorl(left, left); - } else if (right_value == 2) { - __ addl(left, left); - } else if (!can_overflow) { - // If the multiplication is known to not overflow, we - // can use operations that don't set the overflow flag - // correctly. - switch (right_value) { - case 1: - // Do nothing. - break; - case 3: - __ leal(left, Operand(left, left, times_2, 0)); - break; - case 4: - __ shll(left, Immediate(2)); - break; - case 5: - __ leal(left, Operand(left, left, times_4, 0)); - break; - case 8: - __ shll(left, Immediate(3)); - break; - case 9: - __ leal(left, Operand(left, left, times_8, 0)); - break; - case 16: - __ shll(left, Immediate(4)); - break; - default: - __ imull(left, left, Immediate(right_value)); - break; - } - } else { - __ imull(left, left, Immediate(right_value)); - } - } else if (right->IsStackSlot()) { - if (instr->hydrogen_value()->representation().IsSmi()) { - __ SmiToInteger64(left, left); - __ imulp(left, ToOperand(right)); - } else { - __ imull(left, ToOperand(right)); - } - } else { - if (instr->hydrogen_value()->representation().IsSmi()) { - __ SmiToInteger64(left, left); - __ imulp(left, ToRegister(right)); - } else { - __ imull(left, ToRegister(right)); - } - } - - if (can_overflow) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Bail out if the result is supposed to be negative zero. - Label done; - if (instr->hydrogen_value()->representation().IsSmi()) { - __ testp(left, left); - } else { - __ testl(left, left); - } - __ j(not_zero, &done, Label::kNear); - if (right->IsConstantOperand()) { - // Constant can't be represented as 32-bit Smi due to immediate size - // limit. - DCHECK(SmiValuesAre32Bits() - ? !instr->hydrogen_value()->representation().IsSmi() - : SmiValuesAre31Bits()); - if (ToInteger32(LConstantOperand::cast(right)) < 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { - __ cmpl(kScratchRegister, Immediate(0)); - DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero); - } - } else if (right->IsStackSlot()) { - if (instr->hydrogen_value()->representation().IsSmi()) { - __ orp(kScratchRegister, ToOperand(right)); - } else { - __ orl(kScratchRegister, ToOperand(right)); - } - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - } else { - // Test the non-zero operand for negative sign. - if (instr->hydrogen_value()->representation().IsSmi()) { - __ orp(kScratchRegister, ToRegister(right)); - } else { - __ orl(kScratchRegister, ToRegister(right)); - } - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - } - __ bind(&done); - } -} - - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - DCHECK(left->IsRegister()); - - if (right->IsConstantOperand()) { - int32_t right_operand = - ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->right()->representation()); - switch (instr->op()) { - case Token::BIT_AND: - __ andl(ToRegister(left), Immediate(right_operand)); - break; - case Token::BIT_OR: - __ orl(ToRegister(left), Immediate(right_operand)); - break; - case Token::BIT_XOR: - if (right_operand == int32_t(~0)) { - __ notl(ToRegister(left)); - } else { - __ xorl(ToRegister(left), Immediate(right_operand)); - } - break; - default: - UNREACHABLE(); - break; - } - } else if (right->IsStackSlot()) { - switch (instr->op()) { - case Token::BIT_AND: - if (instr->IsInteger32()) { - __ andl(ToRegister(left), ToOperand(right)); - } else { - __ andp(ToRegister(left), ToOperand(right)); - } - break; - case Token::BIT_OR: - if (instr->IsInteger32()) { - __ orl(ToRegister(left), ToOperand(right)); - } else { - __ orp(ToRegister(left), ToOperand(right)); - } - break; - case Token::BIT_XOR: - if (instr->IsInteger32()) { - __ xorl(ToRegister(left), ToOperand(right)); - } else { - __ xorp(ToRegister(left), ToOperand(right)); - } - break; - default: - UNREACHABLE(); - break; - } - } else { - DCHECK(right->IsRegister()); - switch (instr->op()) { - case Token::BIT_AND: - if (instr->IsInteger32()) { - __ andl(ToRegister(left), ToRegister(right)); - } else { - __ andp(ToRegister(left), ToRegister(right)); - } - break; - case Token::BIT_OR: - if (instr->IsInteger32()) { - __ orl(ToRegister(left), ToRegister(right)); - } else { - __ orp(ToRegister(left), ToRegister(right)); - } - break; - case Token::BIT_XOR: - if (instr->IsInteger32()) { - __ xorl(ToRegister(left), ToRegister(right)); - } else { - __ xorp(ToRegister(left), ToRegister(right)); - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - DCHECK(left->IsRegister()); - if (right->IsRegister()) { - DCHECK(ToRegister(right).is(rcx)); - - switch (instr->op()) { - case Token::ROR: - __ rorl_cl(ToRegister(left)); - break; - case Token::SAR: - __ sarl_cl(ToRegister(left)); - break; - case Token::SHR: - __ shrl_cl(ToRegister(left)); - if (instr->can_deopt()) { - __ testl(ToRegister(left), ToRegister(left)); - DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue); - } - break; - case Token::SHL: - __ shll_cl(ToRegister(left)); - break; - default: - UNREACHABLE(); - break; - } - } else { - int32_t value = ToInteger32(LConstantOperand::cast(right)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::ROR: - if (shift_count != 0) { - __ rorl(ToRegister(left), Immediate(shift_count)); - } - break; - case Token::SAR: - if (shift_count != 0) { - __ sarl(ToRegister(left), Immediate(shift_count)); - } - break; - case Token::SHR: - if (shift_count != 0) { - __ shrl(ToRegister(left), Immediate(shift_count)); - } else if (instr->can_deopt()) { - __ testl(ToRegister(left), ToRegister(left)); - DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue); - } - break; - case Token::SHL: - if (shift_count != 0) { - if (instr->hydrogen_value()->representation().IsSmi()) { - if (SmiValuesAre32Bits()) { - __ shlp(ToRegister(left), Immediate(shift_count)); - } else { - DCHECK(SmiValuesAre31Bits()); - if (instr->can_deopt()) { - if (shift_count != 1) { - __ shll(ToRegister(left), Immediate(shift_count - 1)); - } - __ Integer32ToSmi(ToRegister(left), ToRegister(left)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } else { - __ shll(ToRegister(left), Immediate(shift_count)); - } - } - } else { - __ shll(ToRegister(left), Immediate(shift_count)); - } - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - - if (right->IsConstantOperand()) { - int32_t right_operand = - ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->right()->representation()); - __ subl(ToRegister(left), Immediate(right_operand)); - } else if (right->IsRegister()) { - if (instr->hydrogen_value()->representation().IsSmi()) { - __ subp(ToRegister(left), ToRegister(right)); - } else { - __ subl(ToRegister(left), ToRegister(right)); - } - } else { - if (instr->hydrogen_value()->representation().IsSmi()) { - __ subp(ToRegister(left), ToOperand(right)); - } else { - __ subl(ToRegister(left), ToOperand(right)); - } - } - - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - Register dst = ToRegister(instr->result()); - if (instr->value() == 0) { - __ xorl(dst, dst); - } else { - __ movl(dst, Immediate(instr->value())); - } -} - - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ Move(ToRegister(instr->result()), instr->value()); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - __ Move(ToDoubleRegister(instr->result()), instr->bits()); -} - - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ LoadAddress(ToRegister(instr->result()), instr->value()); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ Move(ToRegister(instr->result()), object); -} - - -Operand LCodeGen::BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToInteger32(LConstantOperand::cast(index)); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldOperand(string, SeqString::kHeaderSize + offset); - } - return FieldOperand( - string, ToRegister(index), - encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, - SeqString::kHeaderSize); -} - - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register result = ToRegister(instr->result()); - Register string = ToRegister(instr->string()); - - if (FLAG_debug_code) { - __ Push(string); - __ movp(string, FieldOperand(string, HeapObject::kMapOffset)); - __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset)); - - __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(equal, kUnexpectedStringType); - __ Pop(string); - } - - Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ movzxbl(result, operand); - } else { - __ movzxwl(result, operand); - } -} - - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - - if (FLAG_debug_code) { - Register value = ToRegister(instr->value()); - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); - } - - Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (instr->value()->IsConstantOperand()) { - int value = ToInteger32(LConstantOperand::cast(instr->value())); - DCHECK_LE(0, value); - if (encoding == String::ONE_BYTE_ENCODING) { - DCHECK_LE(value, String::kMaxOneByteCharCode); - __ movb(operand, Immediate(value)); - } else { - DCHECK_LE(value, String::kMaxUtf16CodeUnit); - __ movw(operand, Immediate(value)); - } - } else { - Register value = ToRegister(instr->value()); - if (encoding == String::ONE_BYTE_ENCODING) { - __ movb(operand, value); - } else { - __ movw(operand, value); - } - } -} - - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - - Representation target_rep = instr->hydrogen()->representation(); - bool is_p = target_rep.IsSmi() || target_rep.IsExternal(); - - if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { - if (right->IsConstantOperand()) { - // No support for smi-immediates for 32-bit SMI. - DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits()); - int32_t offset = - ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->right()->representation()); - if (is_p) { - __ leap(ToRegister(instr->result()), - MemOperand(ToRegister(left), offset)); - } else { - __ leal(ToRegister(instr->result()), - MemOperand(ToRegister(left), offset)); - } - } else { - Operand address(ToRegister(left), ToRegister(right), times_1, 0); - if (is_p) { - __ leap(ToRegister(instr->result()), address); - } else { - __ leal(ToRegister(instr->result()), address); - } - } - } else { - if (right->IsConstantOperand()) { - // No support for smi-immediates for 32-bit SMI. - DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits()); - int32_t right_operand = - ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->right()->representation()); - if (is_p) { - __ addp(ToRegister(left), Immediate(right_operand)); - } else { - __ addl(ToRegister(left), Immediate(right_operand)); - } - } else if (right->IsRegister()) { - if (is_p) { - __ addp(ToRegister(left), ToRegister(right)); - } else { - __ addl(ToRegister(left), ToRegister(right)); - } - } else { - if (is_p) { - __ addp(ToRegister(left), ToOperand(right)); - } else { - __ addl(ToRegister(left), ToOperand(right)); - } - } - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - } -} - - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - HMathMinMax::Operation operation = instr->hydrogen()->operation(); - if (instr->hydrogen()->representation().IsSmiOrInteger32()) { - Label return_left; - Condition condition = (operation == HMathMinMax::kMathMin) - ? less_equal - : greater_equal; - Register left_reg = ToRegister(left); - if (right->IsConstantOperand()) { - Immediate right_imm = Immediate( - ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->right()->representation())); - DCHECK(SmiValuesAre32Bits() - ? !instr->hydrogen()->representation().IsSmi() - : SmiValuesAre31Bits()); - __ cmpl(left_reg, right_imm); - __ j(condition, &return_left, Label::kNear); - __ movl(left_reg, right_imm); - } else if (right->IsRegister()) { - Register right_reg = ToRegister(right); - if (instr->hydrogen_value()->representation().IsSmi()) { - __ cmpp(left_reg, right_reg); - } else { - __ cmpl(left_reg, right_reg); - } - __ j(condition, &return_left, Label::kNear); - __ movp(left_reg, right_reg); - } else { - Operand right_op = ToOperand(right); - if (instr->hydrogen_value()->representation().IsSmi()) { - __ cmpp(left_reg, right_op); - } else { - __ cmpl(left_reg, right_op); - } - __ j(condition, &return_left, Label::kNear); - __ movp(left_reg, right_op); - } - __ bind(&return_left); - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - Label not_nan, distinct, return_left, return_right; - Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; - XMMRegister left_reg = ToDoubleRegister(left); - XMMRegister right_reg = ToDoubleRegister(right); - __ Ucomisd(left_reg, right_reg); - __ j(parity_odd, ¬_nan, Label::kNear); // Both are not NaN. - - // One of the numbers is NaN. Find which one and return it. - __ Ucomisd(left_reg, left_reg); - __ j(parity_even, &return_left, Label::kNear); // left is NaN. - __ jmp(&return_right, Label::kNear); // right is NaN. - - __ bind(¬_nan); - __ j(not_equal, &distinct, Label::kNear); // left != right. - - // left == right - XMMRegister xmm_scratch = double_scratch0(); - __ Xorpd(xmm_scratch, xmm_scratch); - __ Ucomisd(left_reg, xmm_scratch); - __ j(not_equal, &return_left, Label::kNear); // left == right != 0. - - // At this point, both left and right are either +0 or -0. - if (operation == HMathMinMax::kMathMin) { - __ Orpd(left_reg, right_reg); - } else { - __ Andpd(left_reg, right_reg); - } - __ jmp(&return_left, Label::kNear); - - __ bind(&distinct); - __ j(condition, &return_left, Label::kNear); - - __ bind(&return_right); - __ Movapd(left_reg, right_reg); - - __ bind(&return_left); - } -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - XMMRegister left = ToDoubleRegister(instr->left()); - XMMRegister right = ToDoubleRegister(instr->right()); - XMMRegister result = ToDoubleRegister(instr->result()); - switch (instr->op()) { - case Token::ADD: - if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope scope(masm(), AVX); - __ vaddsd(result, left, right); - } else { - DCHECK(result.is(left)); - __ addsd(left, right); - } - break; - case Token::SUB: - if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope scope(masm(), AVX); - __ vsubsd(result, left, right); - } else { - DCHECK(result.is(left)); - __ subsd(left, right); - } - break; - case Token::MUL: - if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope scope(masm(), AVX); - __ vmulsd(result, left, right); - } else { - DCHECK(result.is(left)); - __ mulsd(left, right); - } - break; - case Token::DIV: - if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope scope(masm(), AVX); - __ vdivsd(result, left, right); - } else { - DCHECK(result.is(left)); - __ divsd(left, right); - } - // Don't delete this mov. It may improve performance on some CPUs, - // when there is a (v)mulsd depending on the result - __ Movapd(result, result); - break; - case Token::MOD: { - DCHECK(left.is(xmm0)); - DCHECK(right.is(xmm1)); - DCHECK(result.is(xmm0)); - __ PrepareCallCFunction(2); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), 2); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - DCHECK(ToRegister(instr->left()).is(rdx)); - DCHECK(ToRegister(instr->right()).is(rax)); - DCHECK(ToRegister(instr->result()).is(rax)); - - UNREACHABLE(); -} - - -template -void LCodeGen::EmitBranch(InstrType instr, Condition cc) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - - if (right_block == left_block || cc == no_condition) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); - } else if (right_block == next_block) { - __ j(cc, chunk_->GetAssemblyLabel(left_block)); - } else { - __ j(cc, chunk_->GetAssemblyLabel(left_block)); - if (cc != always) { - __ jmp(chunk_->GetAssemblyLabel(right_block)); - } - } -} - - -template -void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) { - int true_block = instr->TrueDestination(chunk_); - __ j(cc, chunk_->GetAssemblyLabel(true_block)); -} - - -template -void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { - int false_block = instr->FalseDestination(chunk_); - __ j(cc, chunk_->GetAssemblyLabel(false_block)); -} - - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { - __ int3(); -} - - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsInteger32()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - __ testl(reg, reg); - EmitBranch(instr, not_zero); - } else if (r.IsSmi()) { - DCHECK(!info()->IsStub()); - Register reg = ToRegister(instr->value()); - __ testp(reg, reg); - EmitBranch(instr, not_zero); - } else if (r.IsDouble()) { - DCHECK(!info()->IsStub()); - XMMRegister reg = ToDoubleRegister(instr->value()); - XMMRegister xmm_scratch = double_scratch0(); - __ Xorpd(xmm_scratch, xmm_scratch); - __ Ucomisd(reg, xmm_scratch); - EmitBranch(instr, not_equal); - } else { - DCHECK(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ CompareRoot(reg, Heap::kTrueValueRootIndex); - EmitBranch(instr, equal); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - __ SmiCompare(reg, Smi::kZero); - EmitBranch(instr, not_equal); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, no_condition); - } else if (type.IsHeapNumber()) { - DCHECK(!info()->IsStub()); - XMMRegister xmm_scratch = double_scratch0(); - __ Xorpd(xmm_scratch, xmm_scratch); - __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); - EmitBranch(instr, not_equal); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); - EmitBranch(instr, not_equal); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - // Avoid deopts in the case where we've never executed this path before. - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); - __ j(equal, instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kBoolean) { - // true -> true. - __ CompareRoot(reg, Heap::kTrueValueRootIndex); - __ j(equal, instr->TrueLabel(chunk_)); - // false -> false. - __ CompareRoot(reg, Heap::kFalseValueRootIndex); - __ j(equal, instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ CompareRoot(reg, Heap::kNullValueRootIndex); - __ j(equal, instr->FalseLabel(chunk_)); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - __ Cmp(reg, Smi::kZero); - __ j(equal, instr->FalseLabel(chunk_)); - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a Smi -> deopt. - __ testb(reg, Immediate(kSmiTagMask)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi); - } - - const Register map = kScratchRegister; - if (expected & ToBooleanHint::kNeedsMap) { - __ movp(map, FieldOperand(reg, HeapObject::kMapOffset)); - - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ testb(FieldOperand(map, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, instr->FalseLabel(chunk_)); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE); - __ j(above_equal, instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); - __ j(above_equal, ¬_string, Label::kNear); - __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); - __ j(not_zero, instr->TrueLabel(chunk_)); - __ jmp(instr->FalseLabel(chunk_)); - __ bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - __ CmpInstanceType(map, SYMBOL_TYPE); - __ j(equal, instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kHeapNumber) { - // heap number -> false iff +0, -0, or NaN. - Label not_heap_number; - __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); - __ j(not_equal, ¬_heap_number, Label::kNear); - XMMRegister xmm_scratch = double_scratch0(); - __ Xorpd(xmm_scratch, xmm_scratch); - __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); - __ j(zero, instr->FalseLabel(chunk_)); - __ jmp(instr->TrueLabel(chunk_)); - __ bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject); - } - } - } -} - - -void LCodeGen::EmitGoto(int block) { - if (!IsNextEmittedBlock(block)) { - __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); - } -} - - -void LCodeGen::DoGoto(LGoto* instr) { - EmitGoto(instr->block_id()); -} - - -inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { - Condition cond = no_condition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = equal; - break; - case Token::NE: - case Token::NE_STRICT: - cond = not_equal; - break; - case Token::LT: - cond = is_unsigned ? below : less; - break; - case Token::GT: - cond = is_unsigned ? above : greater; - break; - case Token::LTE: - cond = is_unsigned ? below_equal : less_equal; - break; - case Token::GTE: - cond = is_unsigned ? above_equal : greater_equal; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->is_double() || - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cc = TokenToCondition(instr->op(), is_unsigned); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - // Don't base result on EFLAGS when a NaN is involved. Instead - // jump to the false block. - __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); - __ j(parity_even, instr->FalseLabel(chunk_)); - } else { - int32_t value; - if (right->IsConstantOperand()) { - value = ToInteger32(LConstantOperand::cast(right)); - if (instr->hydrogen_value()->representation().IsSmi()) { - __ Cmp(ToRegister(left), Smi::FromInt(value)); - } else { - __ cmpl(ToRegister(left), Immediate(value)); - } - } else if (left->IsConstantOperand()) { - value = ToInteger32(LConstantOperand::cast(left)); - if (instr->hydrogen_value()->representation().IsSmi()) { - if (right->IsRegister()) { - __ Cmp(ToRegister(right), Smi::FromInt(value)); - } else { - __ Cmp(ToOperand(right), Smi::FromInt(value)); - } - } else if (right->IsRegister()) { - __ cmpl(ToRegister(right), Immediate(value)); - } else { - __ cmpl(ToOperand(right), Immediate(value)); - } - // We commuted the operands, so commute the condition. - cc = CommuteCondition(cc); - } else if (instr->hydrogen_value()->representation().IsSmi()) { - if (right->IsRegister()) { - __ cmpp(ToRegister(left), ToRegister(right)); - } else { - __ cmpp(ToRegister(left), ToOperand(right)); - } - } else { - if (right->IsRegister()) { - __ cmpl(ToRegister(left), ToRegister(right)); - } else { - __ cmpl(ToRegister(left), ToOperand(right)); - } - } - } - EmitBranch(instr, cc); - } -} - - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - - if (instr->right()->IsConstantOperand()) { - Handle right = ToHandle(LConstantOperand::cast(instr->right())); - __ Cmp(left, right); - } else { - Register right = ToRegister(instr->right()); - __ cmpp(left, right); - } - EmitBranch(instr, equal); -} - - -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ Cmp(input_reg, factory()->the_hole_value()); - EmitBranch(instr, equal); - return; - } - - XMMRegister input_reg = ToDoubleRegister(instr->object()); - __ Ucomisd(input_reg, input_reg); - EmitFalseBranch(instr, parity_odd); - - __ subp(rsp, Immediate(kDoubleSize)); - __ Movsd(MemOperand(rsp, 0), input_reg); - __ addp(rsp, Immediate(kDoubleSize)); - - int offset = sizeof(kHoleNanUpper32); - __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32)); - EmitBranch(instr, equal); -} - - -Condition LCodeGen::EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - - Condition cond = masm_->IsObjectStringType(input, temp1, temp1); - - return cond; -} - - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - - Condition true_cond = EmitIsString( - reg, temp, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Condition is_smi; - if (instr->value()->IsRegister()) { - Register input = ToRegister(instr->value()); - is_smi = masm()->CheckSmi(input); - } else { - Operand input = ToOperand(instr->value()); - is_smi = masm()->CheckSmi(input); - } - EmitBranch(instr, is_smi); -} - - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ movp(temp, FieldOperand(input, HeapObject::kMapOffset)); - __ testb(FieldOperand(temp, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - EmitBranch(instr, not_zero); -} - - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - DCHECK(ToRegister(instr->left()).is(rdx)); - DCHECK(ToRegister(instr->right()).is(rax)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ CompareRoot(rax, Heap::kTrueValueRootIndex); - EmitBranch(instr, equal); -} - - -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK(from == to || to == LAST_TYPE); - return from; -} - - -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return equal; - if (to == LAST_TYPE) return above_equal; - if (from == FIRST_TYPE) return below_equal; - UNREACHABLE(); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register input = ToRegister(instr->value()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - - __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister); - EmitBranch(instr, BranchCondition(instr->hydrogen())); -} - -// Branches to a label or falls through with the answer in the z flag. -// Trashes the temp register. -void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handle class_name, Register input, - Register temp, Register temp2) { - DCHECK(!input.is(temp)); - DCHECK(!input.is(temp2)); - DCHECK(!temp.is(temp2)); - - __ JumpIfSmi(input, is_false); - - __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ j(above_equal, is_true); - } else { - __ j(above_equal, is_false); - } - - // Check if the constructor in the map is a function. - __ GetMapConstructor(temp, temp, kScratchRegister); - - // Objects with a non-function constructor have class 'Object'. - __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE); - if (String::Equals(class_name, isolate()->factory()->Object_string())) { - __ j(not_equal, is_true); - } else { - __ j(not_equal, is_false); - } - - // temp now contains the constructor function. Grab the - // instance class name from there. - __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); - __ movp(temp, - FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - DCHECK(class_name->IsInternalizedString()); - __ Cmp(temp, class_name); - // End with the answer in the z flag. -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); - Handle class_name = instr->hydrogen()->class_name(); - - EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - class_name, input, temp, temp2); - - EmitBranch(instr, equal); -} - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->value()); - - __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); - EmitBranch(instr, equal); -} - - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = kScratchRegister; - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - Condition is_smi = __ CheckSmi(object); - EmitFalseBranch(instr, is_smi); - } - - // Loop through the {object}s prototype chain looking for the {prototype}. - __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset)); - Label loop; - __ bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ testb(FieldOperand(object_map, Map::kBitFieldOffset), - Immediate(1 << Map::kIsAccessCheckNeeded)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck); - // Deoptimize for proxies. - __ CmpInstanceType(object_map, JS_PROXY_TYPE); - DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy); - - __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset)); - __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); - EmitFalseBranch(instr, equal); - __ cmpp(object_prototype, prototype); - EmitTrueBranch(instr, equal); - __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset)); - __ jmp(&loop); -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - Token::Value op = instr->op(); - - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - - Condition condition = TokenToCondition(op, false); - Label true_value, done; - __ testp(rax, rax); - __ j(condition, &true_value, Label::kNear); - __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); - __ jmp(&done, Label::kNear); - __ bind(&true_value); - __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); - __ bind(&done); -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Preserve the return value on the stack and rely on the runtime call - // to return the value in the same register. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ Push(rax); - __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - if (info()->saves_caller_doubles()) { - RestoreCallerDoubles(); - } - if (NeedsEagerFrame()) { - __ movp(rsp, rbp); - __ popq(rbp); - } - if (instr->has_constant_parameter_count()) { - __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize, - rcx); - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register reg = ToRegister(instr->parameter_count()); - // The argument count parameter is a smi - __ SmiToInteger32(reg, reg); - Register return_addr_reg = reg.is(rcx) ? rbx : rcx; - __ PopReturnAddressTo(return_addr_reg); - __ shlp(reg, Immediate(kPointerSizeLog2)); - __ addp(rsp, reg); - __ jmp(return_addr_reg); - } -} - - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - __ movp(result, ContextOperand(context, instr->slot_index())); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } else { - Label is_not_hole; - __ j(not_equal, &is_not_hole, Label::kNear); - __ LoadRoot(result, Heap::kUndefinedValueRootIndex); - __ bind(&is_not_hole); - } - } -} - - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - - Operand target = ContextOperand(context, instr->slot_index()); - - Label skip_assignment; - if (instr->hydrogen()->RequiresHoleCheck()) { - __ CompareRoot(target, Heap::kTheHoleValueRootIndex); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } else { - __ j(not_equal, &skip_assignment); - } - } - __ movp(target, value); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - int offset = Context::SlotOffset(instr->slot_index()); - Register scratch = ToRegister(instr->temp()); - __ RecordWriteContextSlot(context, - offset, - value, - scratch, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } - - __ bind(&skip_assignment); -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - if (instr->object()->IsConstantOperand()) { - DCHECK(result.is(rax)); - __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object()))); - } else { - Register object = ToRegister(instr->object()); - __ Load(result, MemOperand(object, offset), access.representation()); - } - return; - } - - Register object = ToRegister(instr->object()); - if (instr->hydrogen()->representation().IsDouble()) { - DCHECK(access.IsInobject()); - XMMRegister result = ToDoubleRegister(instr->result()); - __ Movsd(result, FieldOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - if (!access.IsInobject()) { - __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset)); - object = result; - } - - Representation representation = access.representation(); - if (representation.IsSmi() && SmiValuesAre32Bits() && - instr->hydrogen()->representation().IsInteger32()) { - if (FLAG_debug_code) { - Register scratch = kScratchRegister; - __ Load(scratch, FieldOperand(object, offset), representation); - __ AssertSmi(scratch); - } - - // Read int value directly from upper half of the smi. - STATIC_ASSERT(kSmiTag == 0); - DCHECK(kSmiTagSize + kSmiShiftSize == 32); - offset += kPointerSize / 2; - representation = Representation::Integer32(); - } - __ Load(result, FieldOperand(object, offset), representation); -} - - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register function = ToRegister(instr->function()); - Register result = ToRegister(instr->result()); - - // Get the prototype or initial map from the function. - __ movp(result, - FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - - // If the function does not have an initial map, we're done. - Label done; - __ CmpObjectType(result, MAP_TYPE, kScratchRegister); - __ j(not_equal, &done, Label::kNear); - - // Get the prototype from the initial map. - __ movp(result, FieldOperand(result, Map::kPrototypeOffset)); - - // All done. - __ bind(&done); -} - - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - - if (instr->length()->IsConstantOperand() && - instr->index()->IsConstantOperand()) { - int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length())); - if (const_index >= 0 && const_index < const_length) { - StackArgumentsAccessor args(arguments, const_length, - ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ movp(result, args.GetArgumentOperand(const_index)); - } else if (FLAG_debug_code) { - __ int3(); - } - } else { - Register length = ToRegister(instr->length()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - if (instr->index()->IsRegister()) { - __ subl(length, ToRegister(instr->index())); - } else { - __ subl(length, ToOperand(instr->index())); - } - StackArgumentsAccessor args(arguments, length, - ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ movp(result, args.GetArgumentOperand(0)); - } -} - - -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = instr->key(); - if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { - Register key_reg = ToRegister(key); - Representation key_representation = - instr->hydrogen()->key()->representation(); - if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) { - __ SmiToInteger64(key_reg, key_reg); - } else if (instr->hydrogen()->IsDehoisted()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - __ movsxlq(key_reg, key_reg); - } - } - Operand operand(BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - elements_kind, - instr->base_offset())); - - if (elements_kind == FLOAT32_ELEMENTS) { - XMMRegister result(ToDoubleRegister(instr->result())); - __ Cvtss2sd(result, operand); - } else if (elements_kind == FLOAT64_ELEMENTS) { - __ Movsd(ToDoubleRegister(instr->result()), operand); - } else { - Register result(ToRegister(instr->result())); - switch (elements_kind) { - case INT8_ELEMENTS: - __ movsxbl(result, operand); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ movzxbl(result, operand); - break; - case INT16_ELEMENTS: - __ movsxwl(result, operand); - break; - case UINT16_ELEMENTS: - __ movzxwl(result, operand); - break; - case INT32_ELEMENTS: - __ movl(result, operand); - break; - case UINT32_ELEMENTS: - __ movl(result, operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ testl(result, result); - DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { - XMMRegister result(ToDoubleRegister(instr->result())); - LOperand* key = instr->key(); - if (kPointerSize == kInt32Size && !key->IsConstantOperand() && - instr->hydrogen()->IsDehoisted()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - __ movsxlq(ToRegister(key), ToRegister(key)); - } - if (instr->hydrogen()->RequiresHoleCheck()) { - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset() + sizeof(kHoleNanLower32)); - __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } - - Operand double_load_operand = BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset()); - __ Movsd(result, double_load_operand); -} - - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - HLoadKeyed* hinstr = instr->hydrogen(); - Register result = ToRegister(instr->result()); - LOperand* key = instr->key(); - bool requires_hole_check = hinstr->RequiresHoleCheck(); - Representation representation = hinstr->representation(); - int offset = instr->base_offset(); - - if (kPointerSize == kInt32Size && !key->IsConstantOperand() && - instr->hydrogen()->IsDehoisted()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - __ movsxlq(ToRegister(key), ToRegister(key)); - } - if (representation.IsInteger32() && SmiValuesAre32Bits() && - hinstr->elements_kind() == FAST_SMI_ELEMENTS) { - DCHECK(!requires_hole_check); - if (FLAG_debug_code) { - Register scratch = kScratchRegister; - __ Load(scratch, - BuildFastArrayOperand(instr->elements(), - key, - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, - offset), - Representation::Smi()); - __ AssertSmi(scratch); - } - // Read int value directly from upper half of the smi. - STATIC_ASSERT(kSmiTag == 0); - DCHECK(kSmiTagSize + kSmiShiftSize == 32); - offset += kPointerSize / 2; - } - - __ Load(result, - BuildFastArrayOperand(instr->elements(), key, - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, offset), - representation); - - // Check for the hole value. - if (requires_hole_check) { - if (IsFastSmiElementsKind(hinstr->elements_kind())) { - Condition smi = __ CheckSmi(result); - DeoptimizeIf(NegateCondition(smi), instr, DeoptimizeReason::kNotASmi); - } else { - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } - } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - __ j(not_equal, &done); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise - // it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - __ Cmp(FieldOperand(result, PropertyCell::kValueOffset), - Smi::FromInt(Isolate::kProtectorValid)); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole); - } - __ Move(result, isolate()->factory()->undefined_value()); - __ bind(&done); - } -} - - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } -} - - -Operand LCodeGen::BuildFastArrayOperand( - LOperand* elements_pointer, - LOperand* key, - Representation key_representation, - ElementsKind elements_kind, - uint32_t offset) { - Register elements_pointer_reg = ToRegister(elements_pointer); - int shift_size = ElementsKindToShiftSize(elements_kind); - if (key->IsConstantOperand()) { - int32_t constant_value = ToInteger32(LConstantOperand::cast(key)); - if (constant_value & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - return Operand(elements_pointer_reg, - (constant_value << shift_size) + offset); - } else { - // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement(). - DCHECK(key_representation.IsInteger32()); - - ScaleFactor scale_factor = static_cast(shift_size); - return Operand(elements_pointer_reg, - ToRegister(key), - scale_factor, - offset); - } -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize)); - } else if (instr->hydrogen()->arguments_adaptor()) { - // Check for arguments adapter frame. - Label done, adapted; - __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ cmpp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset), - Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(equal, &adapted, Label::kNear); - - // No arguments adaptor frame. - __ movp(result, rbp); - __ jmp(&done, Label::kNear); - - // Arguments adaptor frame present. - __ bind(&adapted); - __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ bind(&done); - } else { - __ movp(result, rbp); - } -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register result = ToRegister(instr->result()); - - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - if (instr->elements()->IsRegister()) { - __ cmpp(rbp, ToRegister(instr->elements())); - } else { - __ cmpp(rbp, ToOperand(instr->elements())); - } - __ movl(result, Immediate(scope()->num_parameters())); - __ j(equal, &done, Label::kNear); - - // Arguments adaptor frame present. Get argument length from there. - __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ SmiToInteger32(result, - Operand(result, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - - // Argument length is in result register. - __ bind(&done); -} - - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. - Label global_object, receiver_ok; - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - - if (!instr->hydrogen()->known_function()) { - // Do not transform the receiver to object for strict mode functions or - // builtins. - __ movp(kScratchRegister, - FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ testl(FieldOperand(kScratchRegister, - SharedFunctionInfo::kCompilerHintsOffset), - Immediate(SharedFunctionInfo::IsStrictBit::kMask | - SharedFunctionInfo::IsNativeBit::kMask)); - __ j(not_equal, &receiver_ok, dist); - } - - // Normal function. Replace undefined or null with global receiver. - __ CompareRoot(receiver, Heap::kNullValueRootIndex); - __ j(equal, &global_object, dist); - __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); - __ j(equal, &global_object, dist); - - // The receiver should be a JS object. - Condition is_smi = __ CheckSmi(receiver); - DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi); - __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister); - DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject); - - __ jmp(&receiver_ok, dist); - __ bind(&global_object); - __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset)); - __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX)); - __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX)); - - __ bind(&receiver_ok); -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); - DCHECK(receiver.is(rax)); // Used for parameter count. - DCHECK(function.is(rdi)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).is(rax)); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - __ cmpp(length, Immediate(kArgumentsLimit)); - DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments); - - __ Push(receiver); - __ movp(receiver, length); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ testl(length, length); - __ j(zero, &invoke, Label::kNear); - __ bind(&loop); - StackArgumentsAccessor args(elements, length, - ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ Push(args.GetArgumentOperand(0)); - __ decl(length); - __ j(not_zero, &loop); - - // Invoke the function. - __ bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(rax); - // It is safe to use rbx, rcx and r8 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) rbx (expected number of arguments) will be initialized below. - PrepareForTailCall(actual, rbx, rcx, r8); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(rax); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - EmitPushTaggedOperand(argument); -} - - -void LCodeGen::DoDrop(LDrop* instr) { - __ Drop(instr->count()); -} - - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); -} - - -void LCodeGen::DoContext(LContext* instr) { - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in rsi. - DCHECK(result.is(rsi)); - } -} - - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - __ Push(instr->hydrogen()->declarations()); - __ Push(Smi::FromInt(instr->hydrogen()->flags())); - __ Push(instr->hydrogen()->feedback_vector()); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - Register function_reg = rdi; - LPointerMap* pointers = instr->pointer_map(); - - if (can_invoke_directly) { - // Change context. - __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); - __ Set(rax, arity); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset); - if (is_tail_call) { - __ Jump(target); - } else { - __ Call(target); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); - } - } else { - // We need to adapt arguments. - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, no_reg, expected, actual, flag, generator); - } -} - - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(ToRegister(instr->result()).is(rax)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ leave(); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - __ jmp(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(target); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code)); - __ call(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(target)); - __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ call(target); - } - generator.AfterCall(); - } -} - - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - Register input_reg = ToRegister(instr->value()); - __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - - Label slow, allocated, done; - uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit(); - available_regs &= ~input_reg.bit(); - if (instr->context()->IsRegister()) { - // Make sure that the context isn't overwritten in the AllocateHeapNumber - // macro below. - available_regs &= ~ToRegister(instr->context()).bit(); - } - - Register tmp = - Register::from_code(base::bits::CountTrailingZeros32(available_regs)); - available_regs &= ~tmp.bit(); - Register tmp2 = - Register::from_code(base::bits::CountTrailingZeros32(available_regs)); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - - __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, just - // return it. We do not need to patch the stack since |input| and - // |result| are the same register and |input| will be restored - // unchanged by popping safepoint registers. - __ testl(tmp, Immediate(HeapNumber::kSignMask)); - __ j(zero, &done); - - __ AllocateHeapNumber(tmp, tmp2, &slow); - __ jmp(&allocated, Label::kNear); - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - CallRuntimeFromDeferred( - Runtime::kAllocateHeapNumber, 0, instr, instr->context()); - // Set the pointer to the new heap number in tmp. - if (!tmp.is(rax)) __ movp(tmp, rax); - // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input_reg, input_reg); - - __ bind(&allocated); - __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ shlq(tmp2, Immediate(1)); - __ shrq(tmp2, Immediate(1)); - __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); - __ StoreToSafepointRegisterSlot(input_reg, tmp); - - __ bind(&done); -} - - -void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { - Register input_reg = ToRegister(instr->value()); - __ testl(input_reg, input_reg); - Label is_positive; - __ j(not_sign, &is_positive, Label::kNear); - __ negl(input_reg); // Sets flags. - DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow); - __ bind(&is_positive); -} - - -void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { - Register input_reg = ToRegister(instr->value()); - __ testp(input_reg, input_reg); - Label is_positive; - __ j(not_sign, &is_positive, Label::kNear); - __ negp(input_reg); // Sets flags. - DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow); - __ bind(&is_positive); -} - - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - // Class for deferred case. - class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { - public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LMathAbs* instr_; - }; - - DCHECK(instr->value()->Equals(instr->result())); - Representation r = instr->hydrogen()->value()->representation(); - - if (r.IsDouble()) { - XMMRegister scratch = double_scratch0(); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - __ Xorpd(scratch, scratch); - __ Subsd(scratch, input_reg); - __ Andpd(input_reg, scratch); - } else if (r.IsInteger32()) { - EmitIntegerMathAbs(instr); - } else if (r.IsSmi()) { - EmitSmiMathAbs(instr); - } else { // Tagged case. - DeferredMathAbsTaggedHeapNumber* deferred = - new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); - Register input_reg = ToRegister(instr->value()); - // Smi check. - __ JumpIfNotSmi(input_reg, deferred->entry()); - EmitSmiMathAbs(instr); - __ bind(deferred->exit()); - } -} - -void LCodeGen::DoMathFloorD(LMathFloorD* instr) { - XMMRegister output_reg = ToDoubleRegister(instr->result()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - CpuFeatureScope scope(masm(), SSE4_1); - __ Roundsd(output_reg, input_reg, kRoundDown); -} - -void LCodeGen::DoMathFloorI(LMathFloorI* instr) { - XMMRegister xmm_scratch = double_scratch0(); - Register output_reg = ToRegister(instr->result()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - - if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatureScope scope(masm(), SSE4_1); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Deoptimize if minus zero. - __ Movq(output_reg, input_reg); - __ subq(output_reg, Immediate(1)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero); - } - __ Roundsd(xmm_scratch, input_reg, kRoundDown); - __ Cvttsd2si(output_reg, xmm_scratch); - __ cmpl(output_reg, Immediate(0x1)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } else { - Label negative_sign, done; - // Deoptimize on unordered. - __ Xorpd(xmm_scratch, xmm_scratch); // Zero the register. - __ Ucomisd(input_reg, xmm_scratch); - DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN); - __ j(below, &negative_sign, Label::kNear); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Check for negative zero. - Label positive_sign; - __ j(above, &positive_sign, Label::kNear); - __ Movmskpd(output_reg, input_reg); - __ testl(output_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - __ Set(output_reg, 0); - __ jmp(&done); - __ bind(&positive_sign); - } - - // Use truncating instruction (OK because input is positive). - __ Cvttsd2si(output_reg, input_reg); - // Overflow is signalled with minint. - __ cmpl(output_reg, Immediate(0x1)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - __ jmp(&done, Label::kNear); - - // Non-zero negative reaches here. - __ bind(&negative_sign); - // Truncate, then compare and compensate. - __ Cvttsd2si(output_reg, input_reg); - __ Cvtlsi2sd(xmm_scratch, output_reg); - __ Ucomisd(input_reg, xmm_scratch); - __ j(equal, &done, Label::kNear); - __ subl(output_reg, Immediate(1)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - - __ bind(&done); - } -} - -void LCodeGen::DoMathRoundD(LMathRoundD* instr) { - XMMRegister xmm_scratch = double_scratch0(); - XMMRegister output_reg = ToDoubleRegister(instr->result()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - CpuFeatureScope scope(masm(), SSE4_1); - Label done; - __ Roundsd(output_reg, input_reg, kRoundUp); - __ Move(xmm_scratch, -0.5); - __ Addsd(xmm_scratch, output_reg); - __ Ucomisd(xmm_scratch, input_reg); - __ j(below_equal, &done, Label::kNear); - __ Move(xmm_scratch, 1.0); - __ Subsd(output_reg, xmm_scratch); - __ bind(&done); -} - -void LCodeGen::DoMathRoundI(LMathRoundI* instr) { - const XMMRegister xmm_scratch = double_scratch0(); - Register output_reg = ToRegister(instr->result()); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - XMMRegister input_temp = ToDoubleRegister(instr->temp()); - static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 - static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 - - Label done, round_to_zero, below_one_half; - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ movq(kScratchRegister, one_half); - __ Movq(xmm_scratch, kScratchRegister); - __ Ucomisd(xmm_scratch, input_reg); - __ j(above, &below_one_half, Label::kNear); - - // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). - __ Addsd(xmm_scratch, input_reg); - __ Cvttsd2si(output_reg, xmm_scratch); - // Overflow is signalled with minint. - __ cmpl(output_reg, Immediate(0x1)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - __ jmp(&done, dist); - - __ bind(&below_one_half); - __ movq(kScratchRegister, minus_one_half); - __ Movq(xmm_scratch, kScratchRegister); - __ Ucomisd(xmm_scratch, input_reg); - __ j(below_equal, &round_to_zero, Label::kNear); - - // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then - // compare and compensate. - __ Movapd(input_temp, input_reg); // Do not alter input_reg. - __ Subsd(input_temp, xmm_scratch); - __ Cvttsd2si(output_reg, input_temp); - // Catch minint due to overflow, and to prevent overflow when compensating. - __ cmpl(output_reg, Immediate(0x1)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - - __ Cvtlsi2sd(xmm_scratch, output_reg); - __ Ucomisd(xmm_scratch, input_temp); - __ j(equal, &done, dist); - __ subl(output_reg, Immediate(1)); - // No overflow because we already ruled out minint. - __ jmp(&done, dist); - - __ bind(&round_to_zero); - // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if - // we can ignore the difference between a result of -0 and +0. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ Movq(output_reg, input_reg); - __ testq(output_reg, output_reg); - DeoptimizeIf(negative, instr, DeoptimizeReason::kMinusZero); - } - __ Set(output_reg, 0); - __ bind(&done); -} - - -void LCodeGen::DoMathFround(LMathFround* instr) { - XMMRegister input_reg = ToDoubleRegister(instr->value()); - XMMRegister output_reg = ToDoubleRegister(instr->result()); - __ Cvtsd2ss(output_reg, input_reg); - __ Cvtss2sd(output_reg, output_reg); -} - - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - XMMRegister output = ToDoubleRegister(instr->result()); - if (instr->value()->IsDoubleRegister()) { - XMMRegister input = ToDoubleRegister(instr->value()); - __ Sqrtsd(output, input); - } else { - Operand input = ToOperand(instr->value()); - __ Sqrtsd(output, input); - } -} - - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - XMMRegister xmm_scratch = double_scratch0(); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - DCHECK(ToDoubleRegister(instr->result()).is(input_reg)); - - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label done, sqrt; - // Check base for -Infinity. According to IEEE-754, double-precision - // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. - __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000)); - __ Movq(xmm_scratch, kScratchRegister); - __ Ucomisd(xmm_scratch, input_reg); - // Comparing -Infinity with NaN results in "unordered", which sets the - // zero flag as if both were equal. However, it also sets the carry flag. - __ j(not_equal, &sqrt, Label::kNear); - __ j(carry, &sqrt, Label::kNear); - // If input is -Infinity, return Infinity. - __ Xorpd(input_reg, input_reg); - __ Subsd(input_reg, xmm_scratch); - __ jmp(&done, Label::kNear); - - // Square root. - __ bind(&sqrt); - __ Xorpd(xmm_scratch, xmm_scratch); - __ Addsd(input_reg, xmm_scratch); // Convert -0 to +0. - __ Sqrtsd(input_reg, input_reg); - __ bind(&done); -} - - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - // Having marked this as a call, we can use any registers. - // Just make sure that the input/output registers are the expected ones. - - Register tagged_exponent = MathPowTaggedDescriptor::exponent(); - DCHECK(!instr->right()->IsRegister() || - ToRegister(instr->right()).is(tagged_exponent)); - DCHECK(!instr->right()->IsDoubleRegister() || - ToDoubleRegister(instr->right()).is(xmm1)); - DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); - DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); - - if (exponent_type.IsSmi()) { - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsTagged()) { - Label no_deopt; - __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear); - __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - __ bind(&no_deopt); - MathPowStub stub(isolate(), MathPowStub::TAGGED); - __ CallStub(&stub); - } else if (exponent_type.IsInteger32()) { - MathPowStub stub(isolate(), MathPowStub::INTEGER); - __ CallStub(&stub); - } else { - DCHECK(exponent_type.IsDouble()); - MathPowStub stub(isolate(), MathPowStub::DOUBLE); - __ CallStub(&stub); - } -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - DCHECK(ToDoubleRegister(instr->value()).is(xmm0)); - DCHECK(ToDoubleRegister(instr->result()).is(xmm0)); - __ PrepareCallCFunction(1); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 1); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - DCHECK(ToDoubleRegister(instr->value()).is(xmm0)); - DCHECK(ToDoubleRegister(instr->result()).is(xmm0)); - __ PrepareCallCFunction(1); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 1); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - DCHECK(ToDoubleRegister(instr->value()).is(xmm0)); - DCHECK(ToDoubleRegister(instr->result()).is(xmm0)); - __ PrepareCallCFunction(1); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 1); -} - -void LCodeGen::DoMathLog(LMathLog* instr) { - DCHECK(ToDoubleRegister(instr->value()).is(xmm0)); - DCHECK(ToDoubleRegister(instr->result()).is(xmm0)); - __ PrepareCallCFunction(1); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 1); -} - - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - - __ Lzcntl(result, input); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset), - Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(not_equal, &no_arguments_adaptor, Label::kNear); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ movp(rbp, scratch2); - __ SmiToInteger32( - caller_args_count_reg, - Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ jmp(&formal_parameter_count_loaded, Label::kNear); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count. - __ movp(caller_args_count_reg, - Immediate(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3, - ReturnAddressState::kNotOnStack); - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(rsi)); - DCHECK(ToRegister(instr->function()).is(rdi)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use rbx, rcx and r8 as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) rbx (expected number of arguments) will be initialized below. - PrepareForTailCall(actual, rbx, rcx, r8); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(rdi, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } -} - - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - DCHECK(ToRegister(instr->constructor()).is(rdi)); - DCHECK(ToRegister(instr->result()).is(rax)); - - __ Set(rax, instr->arity()); - __ Move(rbx, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - // We might need a change here - // look at the first argument - __ movp(rcx, Operand(rsp, 0)); - __ testp(rcx, rcx); - __ j(zero, &packed_case, Label::kNear); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), - holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ jmp(&done, Label::kNear); - __ bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); -} - - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize)); - __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); -} - - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ leap(result, Operand(base, ToInteger32(offset))); - } else { - Register offset = ToRegister(instr->offset()); - __ leap(result, Operand(base, offset, times_1, 0)); - } -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - HStoreNamedField* hinstr = instr->hydrogen(); - Representation representation = instr->representation(); - - HObjectAccess access = hinstr->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - DCHECK(!hinstr->NeedsWriteBarrier()); - Register value = ToRegister(instr->value()); - if (instr->object()->IsConstantOperand()) { - DCHECK(value.is(rax)); - LConstantOperand* object = LConstantOperand::cast(instr->object()); - __ store_rax(ToExternalReference(object)); - } else { - Register object = ToRegister(instr->object()); - __ Store(MemOperand(object, offset), value, representation); - } - return; - } - - Register object = ToRegister(instr->object()); - __ AssertNotSmi(object); - - DCHECK(!representation.IsSmi() || - !instr->value()->IsConstantOperand() || - IsInteger32Constant(LConstantOperand::cast(instr->value()))); - if (!FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!hinstr->has_transition()); - DCHECK(!hinstr->NeedsWriteBarrier()); - XMMRegister value = ToDoubleRegister(instr->value()); - __ Movsd(FieldOperand(object, offset), value); - return; - } - - if (hinstr->has_transition()) { - Handle transition = hinstr->transition_map(); - AddDeprecationDependency(transition); - if (!hinstr->NeedsWriteBarrierForMap()) { - __ Move(FieldOperand(object, HeapObject::kMapOffset), transition); - } else { - Register temp = ToRegister(instr->temp()); - __ Move(kScratchRegister, transition); - __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister); - // Update the write barrier for the map field. - __ RecordWriteForMap(object, - kScratchRegister, - temp, - kSaveFPRegs); - } - } - - // Do the store. - Register write_register = object; - if (!access.IsInobject()) { - write_register = ToRegister(instr->temp()); - __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); - } - - if (representation.IsSmi() && SmiValuesAre32Bits() && - hinstr->value()->representation().IsInteger32()) { - DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); - if (FLAG_debug_code) { - Register scratch = kScratchRegister; - __ Load(scratch, FieldOperand(write_register, offset), representation); - __ AssertSmi(scratch); - } - // Store int value directly to upper half of the smi. - STATIC_ASSERT(kSmiTag == 0); - DCHECK(kSmiTagSize + kSmiShiftSize == 32); - offset += kPointerSize / 2; - representation = Representation::Integer32(); - } - - Operand operand = FieldOperand(write_register, offset); - - if (FLAG_unbox_double_fields && representation.IsDouble()) { - DCHECK(access.IsInobject()); - XMMRegister value = ToDoubleRegister(instr->value()); - __ Movsd(operand, value); - - } else if (instr->value()->IsRegister()) { - Register value = ToRegister(instr->value()); - __ Store(operand, value, representation); - } else { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - if (IsInteger32Constant(operand_value)) { - DCHECK(!hinstr->NeedsWriteBarrier()); - int32_t value = ToInteger32(operand_value); - if (representation.IsSmi()) { - __ Move(operand, Smi::FromInt(value)); - - } else { - __ movl(operand, Immediate(value)); - } - - } else if (IsExternalConstant(operand_value)) { - DCHECK(!hinstr->NeedsWriteBarrier()); - ExternalReference ptr = ToExternalReference(operand_value); - __ Move(kScratchRegister, ptr); - __ movp(operand, kScratchRegister); - } else { - Handle handle_value = ToHandle(operand_value); - DCHECK(!hinstr->NeedsWriteBarrier()); - __ Move(operand, handle_value); - } - } - - if (hinstr->NeedsWriteBarrier()) { - Register value = ToRegister(instr->value()); - Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; - // Update the write barrier for the object for in-object properties. - __ RecordWriteField(write_register, - offset, - value, - temp, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - hinstr->SmiCheckForWriteBarrier(), - hinstr->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Representation representation = instr->hydrogen()->length()->representation(); - DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); - DCHECK(representation.IsSmiOrInteger32()); - - Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal; - if (instr->length()->IsConstantOperand()) { - int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); - Register index = ToRegister(instr->index()); - if (representation.IsSmi()) { - __ Cmp(index, Smi::FromInt(length)); - } else { - __ cmpl(index, Immediate(length)); - } - cc = CommuteCondition(cc); - } else if (instr->index()->IsConstantOperand()) { - int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); - if (instr->length()->IsRegister()) { - Register length = ToRegister(instr->length()); - if (representation.IsSmi()) { - __ Cmp(length, Smi::FromInt(index)); - } else { - __ cmpl(length, Immediate(index)); - } - } else { - Operand length = ToOperand(instr->length()); - if (representation.IsSmi()) { - __ Cmp(length, Smi::FromInt(index)); - } else { - __ cmpl(length, Immediate(index)); - } - } - } else { - Register index = ToRegister(instr->index()); - if (instr->length()->IsRegister()) { - Register length = ToRegister(instr->length()); - if (representation.IsSmi()) { - __ cmpp(length, index); - } else { - __ cmpl(length, index); - } - } else { - Operand length = ToOperand(instr->length()); - if (representation.IsSmi()) { - __ cmpp(length, index); - } else { - __ cmpl(length, index); - } - } - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - Label done; - __ j(NegateCondition(cc), &done, Label::kNear); - __ int3(); - __ bind(&done); - } else { - DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); - } -} - - -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = instr->key(); - if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { - Register key_reg = ToRegister(key); - Representation key_representation = - instr->hydrogen()->key()->representation(); - if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) { - __ SmiToInteger64(key_reg, key_reg); - } else if (instr->hydrogen()->IsDehoisted()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - __ movsxlq(key_reg, key_reg); - } - } - Operand operand(BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - elements_kind, - instr->base_offset())); - - if (elements_kind == FLOAT32_ELEMENTS) { - XMMRegister value(ToDoubleRegister(instr->value())); - __ Cvtsd2ss(value, value); - __ Movss(operand, value); - } else if (elements_kind == FLOAT64_ELEMENTS) { - __ Movsd(operand, ToDoubleRegister(instr->value())); - } else { - Register value(ToRegister(instr->value())); - switch (elements_kind) { - case INT8_ELEMENTS: - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ movb(operand, value); - break; - case INT16_ELEMENTS: - case UINT16_ELEMENTS: - __ movw(operand, value); - break; - case INT32_ELEMENTS: - case UINT32_ELEMENTS: - __ movl(operand, value); - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - XMMRegister value = ToDoubleRegister(instr->value()); - LOperand* key = instr->key(); - if (kPointerSize == kInt32Size && !key->IsConstantOperand() && - instr->hydrogen()->IsDehoisted()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - __ movsxlq(ToRegister(key), ToRegister(key)); - } - if (instr->NeedsCanonicalization()) { - XMMRegister xmm_scratch = double_scratch0(); - // Turn potential sNaN value into qNaN. - __ Xorpd(xmm_scratch, xmm_scratch); - __ Subsd(value, xmm_scratch); - } - - Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset()); - - __ Movsd(double_store_operand, value); -} - - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - HStoreKeyed* hinstr = instr->hydrogen(); - LOperand* key = instr->key(); - int offset = instr->base_offset(); - Representation representation = hinstr->value()->representation(); - - if (kPointerSize == kInt32Size && !key->IsConstantOperand() && - instr->hydrogen()->IsDehoisted()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - __ movsxlq(ToRegister(key), ToRegister(key)); - } - if (representation.IsInteger32() && SmiValuesAre32Bits()) { - DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); - DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); - if (FLAG_debug_code) { - Register scratch = kScratchRegister; - __ Load(scratch, - BuildFastArrayOperand(instr->elements(), - key, - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, - offset), - Representation::Smi()); - __ AssertSmi(scratch); - } - // Store int value directly to upper half of the smi. - STATIC_ASSERT(kSmiTag == 0); - DCHECK(kSmiTagSize + kSmiShiftSize == 32); - offset += kPointerSize / 2; - } - - Operand operand = - BuildFastArrayOperand(instr->elements(), - key, - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, - offset); - if (instr->value()->IsRegister()) { - __ Store(operand, ToRegister(instr->value()), representation); - } else { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - if (IsInteger32Constant(operand_value)) { - int32_t value = ToInteger32(operand_value); - if (representation.IsSmi()) { - __ Move(operand, Smi::FromInt(value)); - - } else { - __ movl(operand, Immediate(value)); - } - } else { - Handle handle_value = ToHandle(operand_value); - __ Move(operand, handle_value); - } - } - - if (hinstr->NeedsWriteBarrier()) { - Register elements = ToRegister(instr->elements()); - DCHECK(instr->value()->IsRegister()); - Register value = ToRegister(instr->value()); - DCHECK(!key->IsConstantOperand()); - SmiCheck check_needed = hinstr->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - Register key_reg(ToRegister(key)); - __ leap(key_reg, operand); - __ RecordWrite(elements, - key_reg, - value, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed, - hinstr->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) - : LDeferredCode(codegen), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = rax; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ jmp(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ cmpl(ToRegister(current_capacity), Immediate(constant_key)); - __ j(less_equal, deferred->entry()); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ cmpl(ToRegister(key), Immediate(constant_capacity)); - __ j(greater_equal, deferred->entry()); - } else { - __ cmpl(ToRegister(key), ToRegister(current_capacity)); - __ j(greater_equal, deferred->entry()); - } - - if (instr->elements()->IsRegister()) { - __ movp(result, ToRegister(instr->elements())); - } else { - __ movp(result, ToOperand(instr->elements())); - } - - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = rax; - __ Move(result, Smi::kZero); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - if (instr->object()->IsConstantOperand()) { - LConstantOperand* constant_object = - LConstantOperand::cast(instr->object()); - if (IsSmiConstant(constant_object)) { - Smi* immediate = ToSmi(constant_object); - __ Move(result, immediate); - } else { - Handle handle_value = ToHandle(constant_object); - __ Move(result, handle_value); - } - } else if (instr->object()->IsRegister()) { - __ Move(result, ToRegister(instr->object())); - } else { - __ movp(result, ToOperand(instr->object())); - } - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - __ Move(rbx, ToSmi(LConstantOperand::cast(key))); - } else { - __ Move(rbx, ToRegister(key)); - __ Integer32ToSmi(rbx, rbx); - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - Condition is_smi = __ CheckSmi(result); - DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi); -} - - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp = ToRegister(instr->temp()); - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); - DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound); - __ bind(&no_memento_found); -} - - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - DCHECK(ToRegister(instr->left()).is(rdx)); - DCHECK(ToRegister(instr->right()).is(rax)); - StringAddStub stub(isolate(), - instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt final : public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new(zone()) DeferredStringCharCodeAt(this, instr); - - StringCharLoadGenerator::Generate(masm(), - ToRegister(instr->string()), - ToRegister(instr->index()), - ToRegister(instr->result()), - deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Set(result, 0); - - PushSafepointRegistersScope scope(this); - __ Push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); - if (instr->index()->IsConstantOperand()) { - int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); - __ Push(Smi::FromInt(const_index)); - } else { - Register index = ToRegister(instr->index()); - __ Integer32ToSmi(index, index); - __ Push(index); - } - CallRuntimeFromDeferred( - Runtime::kStringCharCodeAtRT, 2, instr, instr->context()); - __ AssertSmi(rax); - __ SmiToInteger32(rax, rax); - __ StoreToSafepointRegisterSlot(result, rax); -} - - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode final : public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredStringCharFromCode(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new(zone()) DeferredStringCharFromCode(this, instr); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - DCHECK(!char_code.is(result)); - - __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); - __ j(above, deferred->entry()); - __ movsxlq(char_code, char_code); - __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ movp(result, FieldOperand(result, - char_code, times_pointer_size, - FixedArray::kHeaderSize)); - __ CompareRoot(result, Heap::kUndefinedValueRootIndex); - __ j(equal, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Set(result, 0); - - PushSafepointRegistersScope scope(this); - __ Integer32ToSmi(char_code, char_code); - __ Push(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(result, rax); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister() || input->IsStackSlot()); - LOperand* output = instr->result(); - DCHECK(output->IsDoubleRegister()); - if (input->IsRegister()) { - __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); - } else { - __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); - } -} - - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - - __ LoadUint32(ToDoubleRegister(output), ToRegister(input)); -} - - -void LCodeGen::DoNumberTagI(LNumberTagI* instr) { - class DeferredNumberTagI final : public LDeferredCode { - public: - DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), - instr_->temp2(), SIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister() && input->Equals(instr->result())); - Register reg = ToRegister(input); - - if (SmiValuesAre32Bits()) { - __ Integer32ToSmi(reg, reg); - } else { - DCHECK(SmiValuesAre31Bits()); - DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); - __ Integer32ToSmi(reg, reg); - __ j(overflow, deferred->entry()); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU final : public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), - instr_->temp2(), UNSIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagU* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister() && input->Equals(instr->result())); - Register reg = ToRegister(input); - - DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); - __ cmpl(reg, Immediate(Smi::kMaxValue)); - __ j(above, deferred->entry()); - __ Integer32ToSmi(reg, reg); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2, - IntegerSignedness signedness) { - Label done, slow; - Register reg = ToRegister(value); - Register tmp = ToRegister(temp1); - XMMRegister temp_xmm = ToDoubleRegister(temp2); - - // Load value into temp_xmm which will be preserved across potential call to - // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable - // XMM registers on x64). - if (signedness == SIGNED_INT32) { - DCHECK(SmiValuesAre31Bits()); - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - __ SmiToInteger32(reg, reg); - __ xorl(reg, Immediate(0x80000000)); - __ Cvtlsi2sd(temp_xmm, reg); - } else { - DCHECK(signedness == UNSIGNED_INT32); - __ LoadUint32(temp_xmm, reg); - } - - if (FLAG_inline_new) { - __ AllocateHeapNumber(reg, tmp, &slow); - __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // Put a valid pointer value in the stack slot where the result - // register is stored, as this register is in the pointer map, but contains - // an integer value. - __ Set(reg, 0); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(rsi)) { - __ Set(rsi, 0); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(reg, rax); - } - - // Done. Put the value in temp_xmm into the value of the allocated heap - // number. - __ bind(&done); - __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD final : public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagD* instr_; - }; - - XMMRegister input_reg = ToDoubleRegister(instr->value()); - Register reg = ToRegister(instr->result()); - Register tmp = ToRegister(instr->temp()); - - DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - __ AllocateHeapNumber(reg, tmp, deferred->entry()); - } else { - __ jmp(deferred->entry()); - } - __ bind(deferred->exit()); - __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ Move(reg, Smi::kZero); - - { - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(rsi)) { - __ Move(rsi, 0); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ movp(kScratchRegister, rax); - } - __ movp(reg, kScratchRegister); -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - Register output = ToRegister(instr->result()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - Condition is_smi = __ CheckUInteger32ValidSmiValue(input); - DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kOverflow); - } - __ Integer32ToSmi(output, input); - if (hchange->CheckFlag(HValue::kCanOverflow) && - !hchange->value()->CheckFlag(HValue::kUint32)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - DCHECK(instr->value()->Equals(instr->result())); - Register input = ToRegister(instr->value()); - if (instr->needs_check()) { - Condition is_smi = __ CheckSmi(input); - DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kNotASmi); - } else { - __ AssertSmi(input); - } - __ SmiToInteger32(input, input); -} - - -void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, - XMMRegister result_reg, NumberUntagDMode mode) { - bool can_convert_undefined_to_nan = instr->truncating(); - bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); - - Label convert, load_smi, done; - - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ JumpIfSmi(input_reg, &load_smi, Label::kNear); - - // Heap number map check. - __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - - // On x64 it is safe to load at heap number offset before evaluating the map - // check, since all heap objects are at least two words long. - __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); - - if (can_convert_undefined_to_nan) { - __ j(not_equal, &convert, Label::kNear); - } else { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - } - - if (deoptimize_on_minus_zero) { - XMMRegister xmm_scratch = double_scratch0(); - __ Xorpd(xmm_scratch, xmm_scratch); - __ Ucomisd(xmm_scratch, result_reg); - __ j(not_equal, &done, Label::kNear); - __ Movmskpd(kScratchRegister, result_reg); - __ testl(kScratchRegister, Immediate(1)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - } - __ jmp(&done, Label::kNear); - - if (can_convert_undefined_to_nan) { - __ bind(&convert); - - // Convert undefined (and hole) to NaN. Compute NaN as 0/0. - __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(not_equal, instr, - DeoptimizeReason::kNotAHeapNumberUndefined); - - __ Xorpd(result_reg, result_reg); - __ Divsd(result_reg, result_reg); - __ jmp(&done, Label::kNear); - } - } else { - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - } - - // Smi to XMM conversion - __ bind(&load_smi); - __ SmiToInteger32(kScratchRegister, input_reg); - __ Cvtlsi2sd(result_reg, kScratchRegister); - __ bind(&done); -} - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { - Register input_reg = ToRegister(instr->value()); - - if (instr->truncating()) { - Register input_map_reg = kScratchRegister; - Label truncate; - Label::Distance truncate_distance = - DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ movp(input_map_reg, FieldOperand(input_reg, HeapObject::kMapOffset)); - __ JumpIfRoot(input_map_reg, Heap::kHeapNumberMapRootIndex, &truncate, - truncate_distance); - __ CmpInstanceType(input_map_reg, ODDBALL_TYPE); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball); - __ bind(&truncate); - __ TruncateHeapNumberToI(input_reg, input_reg); - } else { - XMMRegister scratch = ToDoubleRegister(instr->temp()); - DCHECK(!scratch.is(double_scratch0())); - __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - __ Movsd(double_scratch0(), - FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ Cvttsd2si(input_reg, double_scratch0()); - __ Cvtlsi2sd(scratch, input_reg); - __ Ucomisd(double_scratch0(), scratch); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision); - DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN); - if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { - __ testl(input_reg, input_reg); - __ j(not_zero, done); - __ Movmskpd(input_reg, double_scratch0()); - __ andl(input_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - } - } -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI final : public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); } - LInstruction* instr() override { return instr_; } - - private: - LTaggedToI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - DCHECK(input->Equals(instr->result())); - Register input_reg = ToRegister(input); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiToInteger32(input_reg, input_reg); - } else { - DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); - __ JumpIfNotSmi(input_reg, deferred->entry()); - __ SmiToInteger32(input_reg, input_reg); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - XMMRegister result_reg = ToDoubleRegister(result); - - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - EmitNumberUntagD(instr, input_reg, result_reg, mode); -} - - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsDoubleRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsRegister()); - - XMMRegister input_reg = ToDoubleRegister(input); - Register result_reg = ToRegister(result); - - if (instr->truncating()) { - __ TruncateDoubleToI(result_reg, input_reg); - } else { - Label lost_precision, is_nan, minus_zero, done; - XMMRegister xmm_scratch = double_scratch0(); - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ DoubleToI(result_reg, input_reg, xmm_scratch, - instr->hydrogen()->GetMinusZeroMode(), &lost_precision, - &is_nan, &minus_zero, dist); - __ jmp(&done, dist); - __ bind(&lost_precision); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); - __ bind(&is_nan); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); - __ bind(&minus_zero); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } -} - - -void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsDoubleRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsRegister()); - - XMMRegister input_reg = ToDoubleRegister(input); - Register result_reg = ToRegister(result); - - Label lost_precision, is_nan, minus_zero, done; - XMMRegister xmm_scratch = double_scratch0(); - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ DoubleToI(result_reg, input_reg, xmm_scratch, - instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, - &minus_zero, dist); - __ jmp(&done, dist); - __ bind(&lost_precision); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); - __ bind(&is_nan); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); - __ bind(&minus_zero); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - __ Integer32ToSmi(result_reg, result_reg); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->value(); - Condition cc = masm()->CheckSmi(ToRegister(input)); - DeoptimizeIf(NegateCondition(cc), instr, DeoptimizeReason::kNotASmi); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - LOperand* input = instr->value(); - Condition cc = masm()->CheckSmi(ToRegister(input)); - DeoptimizeIf(cc, instr, DeoptimizeReason::kSmi); - } -} - - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - Register view = ToRegister(instr->view()); - - __ movp(kScratchRegister, - FieldOperand(view, JSArrayBufferView::kBufferOffset)); - __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset), - Immediate(1 << JSArrayBuffer::WasNeutered::kShift)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - - __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), - Immediate(static_cast(first))); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType); - } else { - DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType); - // Omit check for the last type. - if (last != LAST_TYPE) { - __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), - Immediate(static_cast(last))); - DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType); - } - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); - __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), - Immediate(mask)); - DeoptimizeIf(tag == 0 ? not_zero : zero, instr, - DeoptimizeReason::kWrongInstanceType); - } else { - __ movzxbl(kScratchRegister, - FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); - __ andb(kScratchRegister, Immediate(mask)); - __ cmpb(kScratchRegister, Immediate(tag)); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType); - } - } -} - - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Register reg = ToRegister(instr->value()); - __ Cmp(reg, instr->hydrogen()->object().handle()); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch); -} - - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ Push(object); - __ movp(object, FieldOperand(object, HeapObject::kMapOffset)); - __ testl(FieldOperand(object, Map::kBitField3Offset), - Immediate(Map::Deprecated::kMask)); - __ Pop(object); - __ j(zero, &deopt); - - { - PushSafepointRegistersScope scope(this); - __ Push(object); - - __ Set(rsi, 0); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters( - instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); - - __ testp(rax, Immediate(kSmiTagMask)); - } - __ j(not_zero, &done); - - __ bind(&deopt); - DeoptimizeIf(always, instr, DeoptimizeReason::kInstanceMigrationFailed); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps final : public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - void Generate() override { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - LInstruction* instr() override { return instr_; } - - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register reg = ToRegister(input); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, reg); - __ bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMap(reg, map); - __ j(equal, &success, Label::kNear); - } - - Handle map = maps->at(maps->size() - 1).handle(); - __ CompareMap(reg, map); - if (instr->hydrogen()->HasMigrationTarget()) { - __ j(not_equal, deferred->entry()); - } else { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap); - } - - __ bind(&success); -} - - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); - XMMRegister xmm_scratch = double_scratch0(); - Register result_reg = ToRegister(instr->result()); - __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - DCHECK(instr->unclamped()->Equals(instr->result())); - Register value_reg = ToRegister(instr->result()); - __ ClampUint8(value_reg); -} - - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - DCHECK(instr->unclamped()->Equals(instr->result())); - Register input_reg = ToRegister(instr->unclamped()); - XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); - XMMRegister xmm_scratch = double_scratch0(); - Label is_smi, done, heap_number; - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ JumpIfSmi(input_reg, &is_smi, dist); - - // Check for heap number - __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - __ j(equal, &heap_number, Label::kNear); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - __ Cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ xorl(input_reg, input_reg); - __ jmp(&done, Label::kNear); - - // Heap number - __ bind(&heap_number); - __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); - __ jmp(&done, Label::kNear); - - // smi - __ bind(&is_smi); - __ SmiToInteger32(input_reg, input_reg); - __ ClampUint8(input_reg); - - __ bind(&done); -} - - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate final : public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, LAllocate* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredAllocate(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = - new(zone()) DeferredAllocate(this, instr); - - Register result = ToRegister(instr->result()); - Register temp = ToRegister(instr->temp()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); - } else { - Register size = ToRegister(instr->size()); - __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); - } - - __ bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ movl(temp, Immediate((size / kPointerSize) - 1)); - } else { - temp = ToRegister(instr->size()); - __ sarp(temp, Immediate(kPointerSizeLog2)); - __ decl(temp); - } - Label loop; - __ bind(&loop); - __ Move(FieldOperand(result, temp, times_pointer_size, 0), - isolate()->factory()->one_pointer_filler_map()); - __ decl(temp); - __ j(not_zero, &loop); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register temp = ToRegister(instr->temp()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, temp, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, temp, flags); - } -} - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Move(result, Smi::kZero); - - PushSafepointRegistersScope scope(this); - if (instr->size()->IsRegister()) { - Register size = ToRegister(instr->size()); - DCHECK(!size.is(result)); - __ Integer32ToSmi(size, size); - __ Push(size); - } else { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ Push(Smi::FromInt(size)); - } - - int flags = 0; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ Push(Smi::FromInt(flags)); - - CallRuntimeFromDeferred( - Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); - __ StoreToSafepointRegisterSlot(result, rax); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - __ subp(rax, Immediate(kHeapObjectTag)); - __ Store(allocation_top, rax); - __ addp(rax, Immediate(kHeapObjectTag)); - } -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - DCHECK(ToRegister(instr->value()).is(rbx)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ Move(rax, isolate()->factory()->number_string()); - __ jmp(&end); - __ bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ bind(&end); -} - - -void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { - DCHECK(!operand->IsDoubleRegister()); - if (operand->IsConstantOperand()) { - __ Push(ToHandle(LConstantOperand::cast(operand))); - } else if (operand->IsRegister()) { - __ Push(ToRegister(operand)); - } else { - __ Push(ToOperand(operand)); - } -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->value()); - Condition final_branch_condition = EmitTypeofIs(instr, input); - if (final_branch_condition != no_condition) { - EmitBranch(instr, final_branch_condition); - } -} - - -Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { - Label* true_label = instr->TrueLabel(chunk_); - Label* false_label = instr->FalseLabel(chunk_); - Handle type_name = instr->type_literal(); - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - int next_block = GetNextEmittedBlock(); - - Label::Distance true_distance = left_block == next_block ? Label::kNear - : Label::kFar; - Label::Distance false_distance = right_block == next_block ? Label::kNear - : Label::kFar; - Condition final_branch_condition = no_condition; - Factory* factory = isolate()->factory(); - if (String::Equals(type_name, factory->number_string())) { - __ JumpIfSmi(input, true_label, true_distance); - __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory->string_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); - final_branch_condition = below; - - } else if (String::Equals(type_name, factory->symbol_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ CmpObjectType(input, SYMBOL_TYPE, input); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory->boolean_string())) { - __ CompareRoot(input, Heap::kTrueValueRootIndex); - __ j(equal, true_label, true_distance); - __ CompareRoot(input, Heap::kFalseValueRootIndex); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory->undefined_string())) { - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ j(equal, false_label, false_distance); - __ JumpIfSmi(input, false_label, false_distance); - // Check for undetectable objects => true. - __ movp(input, FieldOperand(input, HeapObject::kMapOffset)); - __ testb(FieldOperand(input, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - final_branch_condition = not_zero; - - } else if (String::Equals(type_name, factory->function_string())) { - __ JumpIfSmi(input, false_label, false_distance); - // Check for callable and not undetectable objects => true. - __ movp(input, FieldOperand(input, HeapObject::kMapOffset)); - __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset)); - __ andb(input, - Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - __ cmpb(input, Immediate(1 << Map::kIsCallable)); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory->object_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ j(equal, true_label, true_distance); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input); - __ j(below, false_label, false_distance); - // Check for callable or undetectable objects => false. - __ testb(FieldOperand(input, Map::kBitFieldOffset), - Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - final_branch_condition = zero; - - } else { - __ jmp(false_label, false_distance); - } - - return final_branch_condition; -} - - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - __ Nop(padding_size); - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && type == Deoptimizer::EAGER) { - type = Deoptimizer::LAZY; - } - DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type); -} - - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck final : public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStackCheck(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - __ CompareRoot(rsp, Heap::kStackLimitRootIndex); - __ j(above_equal, &done, Label::kNear); - - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(rsi)); - CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new(zone()) DeferredStackCheck(this, instr); - __ CompareRoot(rsp, Heap::kStackLimitRootIndex); - __ j(below, deferred_stack_check->entry()); - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - DCHECK(ToRegister(instr->context()).is(rsi)); - - Label use_cache, call_runtime; - __ CheckEnumCache(&call_runtime); - - __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); - __ jmp(&use_cache, Label::kNear); - - // Get the set of properties to enumerate. - __ bind(&call_runtime); - __ Push(rax); - CallRuntime(Runtime::kForInEnumerate, instr); - __ bind(&use_cache); -} - - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - __ EnumLength(result, map); - __ Cmp(result, Smi::kZero); - __ j(not_equal, &load_cache, Label::kNear); - __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); - __ jmp(&done, Label::kNear); - __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ movp(result, - FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ movp(result, - FieldOperand(result, FixedArray::SizeFor(instr->idx()))); - __ bind(&done); - Condition cc = masm()->CheckSmi(result); - DeoptimizeIf(cc, instr, DeoptimizeReason::kNoCache); -} - - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - __ cmpp(ToRegister(instr->map()), - FieldOperand(object, HeapObject::kMapOffset)); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap); -} - - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ Push(object); - __ Push(index); - __ xorp(rsi, rsi); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters( - instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(object, rax); -} - - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, - LLoadFieldByIndex* instr, - Register object, - Register index) - : LDeferredCode(codegen), - instr_(instr), - object_(object), - index_(index) { - } - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register object_; - Register index_; - }; - - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - - DeferredLoadMutableDouble* deferred; - deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index); - - Label out_of_object, done; - __ Move(kScratchRegister, Smi::FromInt(1)); - __ testp(index, kScratchRegister); - __ j(not_zero, deferred->entry()); - - __ sarp(index, Immediate(1)); - - __ SmiToInteger32(index, index); - __ cmpl(index, Immediate(0)); - __ j(less, &out_of_object, Label::kNear); - __ movp(object, FieldOperand(object, - index, - times_pointer_size, - JSObject::kHeaderSize)); - __ jmp(&done, Label::kNear); - - __ bind(&out_of_object); - __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset)); - __ negl(index); - // Index is now equal to out of object property index plus 1. - __ movp(object, FieldOperand(object, - index, - times_pointer_size, - FixedArray::kHeaderSize - kPointerSize)); - __ bind(deferred->exit()); - __ bind(&done); -} - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_X64 diff --git a/src/crankshaft/x64/lithium-codegen-x64.h b/src/crankshaft/x64/lithium-codegen-x64.h deleted file mode 100644 index 7a8c84d7b2..0000000000 --- a/src/crankshaft/x64/lithium-codegen-x64.h +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_ -#define V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_ - - -#include "src/ast/scopes.h" -#include "src/base/logging.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/crankshaft/x64/lithium-gap-resolver-x64.h" -#include "src/crankshaft/x64/lithium-x64.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class SafepointGenerator; - -class LCodeGen: public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - // Support for converting LOperands to assembler types. - Register ToRegister(LOperand* op) const; - XMMRegister ToDoubleRegister(LOperand* op) const; - bool IsInteger32Constant(LConstantOperand* op) const; - bool IsExternalConstant(LConstantOperand* op) const; - bool IsDehoistedKeyConstant(LConstantOperand* op) const; - bool IsSmiConstant(LConstantOperand* op) const; - int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; - int32_t ToInteger32(LConstantOperand* op) const; - Smi* ToSmi(LConstantOperand* op) const; - double ToDouble(LConstantOperand* op) const; - ExternalReference ToExternalReference(LConstantOperand* op) const; - Handle ToHandle(LConstantOperand* op) const; - Operand ToOperand(LOperand* op) const; - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - // Deferred code support. - void DoDeferredNumberTagD(LNumberTagD* instr); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - void DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp1, - LOperand* temp2, - IntegerSignedness signedness); - - void DoDeferredTaggedToI(LTaggedToI* instr, Label* done); - void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocate(LAllocate* instr); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register object, - Register index); - -// Parallel move support. - void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - LPlatformChunk* chunk() const { return chunk_; } - Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk()->graph(); } - - XMMRegister double_scratch0() const { return kScratchDoubleReg; } - - void EmitClassOfTest(Label* if_true, Label* if_false, - Handle class_name, Register input, - Register temporary, Register scratch); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - - void SaveCallerDoubles(); - void RestoreCallerDoubles(); - - // Code generation passes. Returns true if code generation should - // continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - void GenerateBodyInstructionPost(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS - }; - - void CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode, - int argc); - - - void CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr); - - void CallRuntime(const Runtime::Function* function, - int num_arguments, - LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, - int num_arguments, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, num_arguments, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context); - - void LoadContextFromDeferred(LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in rdi. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode, - int argc); - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type); - void DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason); - - bool DeoptEveryNTimes() { - return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); - } - - void AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - Register ToRegister(int index) const; - XMMRegister ToDoubleRegister(int index) const; - Operand BuildFastArrayOperand( - LOperand* elements_pointer, - LOperand* key, - Representation key_representation, - ElementsKind elements_kind, - uint32_t base_offset); - - Operand BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding); - - void EmitIntegerMathAbs(LMathAbs* instr); - void EmitSmiMathAbs(LMathAbs* instr); - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); - - static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block); - - // EmitBranch expects to be the last instruction of a block. - template - void EmitBranch(InstrType instr, Condition cc); - template - void EmitTrueBranch(InstrType instr, Condition cc); - template - void EmitFalseBranch(InstrType instr, Condition cc); - void EmitNumberUntagD(LNumberUntagD* instr, Register input, - XMMRegister result, NumberUntagDMode mode); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed); - - // Emits code for pushing either a tagged constant, a (non-double) - // register, or a stack slot operand. - void EmitPushTaggedOperand(LOperand* operand); - - // Emits optimized code to deep-copy the contents of statically known - // object graphs (e.g. object literal boilerplate). - void EmitDeepCopy(Handle object, - Register result, - Register source, - int* offset, - AllocationSiteMode mode); - - void EnsureSpaceForLazyDeopt(int space_needed) override; - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); - - template - void EmitVectorLoadICRegisters(T* instr); - -#ifdef _MSC_VER - // On windows, you may not access the stack more than one page below - // the most recently mapped page. To make the allocated area randomly - // accessible, we write an arbitrary value to each page in range - // rsp + offset - page_size .. rsp in turn. - void MakeSureStackPagesMapped(int offset); -#endif - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - class PushSafepointRegistersScope final BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->info()->is_calling()); - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->masm_->PushSafepointRegisters(); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - } - - ~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - codegen_->masm_->PopSafepointRegisters(); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; - } - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode: public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() {} - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); } - int instruction_index() const { return instruction_index_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label done_; - Label* external_exit_; - int instruction_index_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_ diff --git a/src/crankshaft/x64/lithium-gap-resolver-x64.cc b/src/crankshaft/x64/lithium-gap-resolver-x64.cc deleted file mode 100644 index 38b7d4525a..0000000000 --- a/src/crankshaft/x64/lithium-gap-resolver-x64.cc +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_X64 - -#include "src/crankshaft/x64/lithium-gap-resolver-x64.h" - -#include "src/crankshaft/x64/lithium-codegen-x64.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), moves_(32, owner->zone()) {} - - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(moves_.is_empty()); - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - PerformMove(i); - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated()) { - DCHECK(moves_[i].source()->IsConstantOperand()); - EmitMove(i); - } - } - - moves_.Rewind(0); -} - - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); - } - Verify(); -} - - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. We use operand swaps to resolve cycles, - // which means that a call to PerformMove could change any source operand - // in the move graph. - - DCHECK(!moves_[index].IsPending()); - DCHECK(!moves_[index].IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved in a stack-allocated local. Recursion may allow - // multiple moves to be pending. - DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. - LOperand* destination = moves_[index].destination(); - moves_[index].set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - // Though PerformMove can change any source operand in the move graph, - // this call cannot create a blocking move via a swap (this loop does - // not miss any). Assume there is a non-blocking move with source A - // and this move is blocked on source B and there is a swap of A and - // B. Then A and B must be involved in the same cycle (or they would - // not be swapped). Since this move's destination is B and there is - // only a single incoming edge to an operand, this move must also be - // involved in the same cycle. In that case, the blocking move will - // be created but will be "pending" when we return from PerformMove. - PerformMove(i); - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - moves_[index].set_destination(destination); - - // This move's source may have changed due to swaps to resolve cycles and - // so it may now be the last move in the cycle. If so remove it. - if (moves_[index].source()->Equals(destination)) { - moves_[index].Eliminate(); - return; - } - - // The move may be blocked on a (at most one) pending move, in which case - // we have a cycle. Search for such a blocking move and perform a swap to - // resolve it. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - EmitSwap(index); - return; - } - } - - // This move is not blocked. - EmitMove(index); -} - - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - - -#define __ ACCESS_MASM(cgen_->masm()) - - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - if (source->IsRegister()) { - Register src = cgen_->ToRegister(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - __ movp(dst, src); - } else { - DCHECK(destination->IsStackSlot()); - Operand dst = cgen_->ToOperand(destination); - __ movp(dst, src); - } - - } else if (source->IsStackSlot()) { - Operand src = cgen_->ToOperand(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - __ movp(dst, src); - } else { - DCHECK(destination->IsStackSlot()); - Operand dst = cgen_->ToOperand(destination); - __ movp(kScratchRegister, src); - __ movp(dst, kScratchRegister); - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - if (cgen_->IsSmiConstant(constant_source)) { - __ Move(dst, cgen_->ToSmi(constant_source)); - } else if (cgen_->IsInteger32Constant(constant_source)) { - int32_t constant = cgen_->ToInteger32(constant_source); - // Do sign extension only for constant used as de-hoisted array key. - // Others only need zero extension, which saves 2 bytes. - if (cgen_->IsDehoistedKeyConstant(constant_source)) { - __ Set(dst, constant); - } else { - __ Set(dst, static_cast(constant)); - } - } else { - __ Move(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - double v = cgen_->ToDouble(constant_source); - uint64_t int_val = bit_cast(v); - XMMRegister dst = cgen_->ToDoubleRegister(destination); - if (int_val == 0) { - __ Xorpd(dst, dst); - } else { - __ Set(kScratchRegister, int_val); - __ Movq(dst, kScratchRegister); - } - } else { - DCHECK(destination->IsStackSlot()); - Operand dst = cgen_->ToOperand(destination); - if (cgen_->IsSmiConstant(constant_source)) { - __ Move(dst, cgen_->ToSmi(constant_source)); - } else if (cgen_->IsInteger32Constant(constant_source)) { - // Do sign extension to 64 bits when stored into stack slot. - __ movp(dst, Immediate(cgen_->ToInteger32(constant_source))); - } else { - __ Move(kScratchRegister, cgen_->ToHandle(constant_source)); - __ movp(dst, kScratchRegister); - } - } - - } else if (source->IsDoubleRegister()) { - XMMRegister src = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - __ Movapd(cgen_->ToDoubleRegister(destination), src); - } else { - DCHECK(destination->IsDoubleStackSlot()); - __ Movsd(cgen_->ToOperand(destination), src); - } - } else if (source->IsDoubleStackSlot()) { - Operand src = cgen_->ToOperand(source); - if (destination->IsDoubleRegister()) { - __ Movsd(cgen_->ToDoubleRegister(destination), src); - } else { - DCHECK(destination->IsDoubleStackSlot()); - __ Movsd(kScratchDoubleReg, src); - __ Movsd(cgen_->ToOperand(destination), kScratchDoubleReg); - } - } else { - UNREACHABLE(); - } - - moves_[index].Eliminate(); -} - - -void LGapResolver::EmitSwap(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - if (source->IsRegister() && destination->IsRegister()) { - // Swap two general-purpose registers. - Register src = cgen_->ToRegister(source); - Register dst = cgen_->ToRegister(destination); - __ movp(kScratchRegister, src); - __ movp(src, dst); - __ movp(dst, kScratchRegister); - - } else if ((source->IsRegister() && destination->IsStackSlot()) || - (source->IsStackSlot() && destination->IsRegister())) { - // Swap a general-purpose register and a stack slot. - Register reg = - cgen_->ToRegister(source->IsRegister() ? source : destination); - Operand mem = - cgen_->ToOperand(source->IsRegister() ? destination : source); - __ movp(kScratchRegister, mem); - __ movp(mem, reg); - __ movp(reg, kScratchRegister); - - } else if ((source->IsStackSlot() && destination->IsStackSlot()) || - (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) { - // Swap two stack slots or two double stack slots. - Operand src = cgen_->ToOperand(source); - Operand dst = cgen_->ToOperand(destination); - __ Movsd(kScratchDoubleReg, src); - __ movp(kScratchRegister, dst); - __ Movsd(dst, kScratchDoubleReg); - __ movp(src, kScratchRegister); - - } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { - // Swap two double registers. - XMMRegister source_reg = cgen_->ToDoubleRegister(source); - XMMRegister destination_reg = cgen_->ToDoubleRegister(destination); - __ Movapd(kScratchDoubleReg, source_reg); - __ Movapd(source_reg, destination_reg); - __ Movapd(destination_reg, kScratchDoubleReg); - - } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { - // Swap a double register and a double stack slot. - DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) || - (source->IsDoubleStackSlot() && destination->IsDoubleRegister())); - XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() - ? source - : destination); - LOperand* other = source->IsDoubleRegister() ? destination : source; - DCHECK(other->IsDoubleStackSlot()); - Operand other_operand = cgen_->ToOperand(other); - __ Movapd(kScratchDoubleReg, reg); - __ Movsd(reg, other_operand); - __ Movsd(other_operand, kScratchDoubleReg); - - } else { - // No other combinations are possible. - UNREACHABLE(); - } - - // The swap of source and destination has executed a move from source to - // destination. - moves_[index].Eliminate(); - - // Any unperformed (including pending) move with a source of either - // this move's source or destination needs to have their source - // changed to reflect the state of affairs after the swap. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(source)) { - moves_[i].set_source(destination); - } else if (other_move.Blocks(destination)) { - moves_[i].set_source(source); - } - } -} - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_X64 diff --git a/src/crankshaft/x64/lithium-gap-resolver-x64.h b/src/crankshaft/x64/lithium-gap-resolver-x64.h deleted file mode 100644 index 641f0ee69f..0000000000 --- a/src/crankshaft/x64/lithium-gap-resolver-x64.h +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_ -#define V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class LGapResolver final BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Execute a move by emitting a swap of two operands. The move from - // source to destination is removed from the move graph. - void EmitSwap(int index); - - // Verify the move list before performing moves. - void Verify(); - - LCodeGen* cgen_; - - // List of moves not yet resolved. - ZoneList moves_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_ diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc deleted file mode 100644 index 3c8ef1ff38..0000000000 --- a/src/crankshaft/x64/lithium-x64.cc +++ /dev/null @@ -1,2454 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/x64/lithium-x64.h" - -#include - -#if V8_TARGET_ARCH_X64 - -#include "src/crankshaft/lithium-inl.h" -#include "src/crankshaft/x64/lithium-codegen-x64.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || - LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || - operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); - } -} -#endif - - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-d"; - case Token::SUB: return "sub-d"; - case Token::MUL: return "mul-d"; - case Token::DIV: return "div-d"; - case Token::MOD: return "mod-d"; - default: - UNREACHABLE(); - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-t"; - case Token::SUB: return "sub-t"; - case Token::MUL: return "mul-t"; - case Token::MOD: return "mod-t"; - case Token::DIV: return "div-t"; - case Token::BIT_AND: return "bit-and-t"; - case Token::BIT_OR: return "bit-or-t"; - case Token::BIT_XOR: return "bit-xor-t"; - case Token::ROR: return "ror-t"; - case Token::SHL: return "sal-t"; - case Token::SAR: return "sar-t"; - case Token::SHR: return "shr-t"; - default: - UNREACHABLE(); - } -} - - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - - -template -bool LTemplateResultInstruction::MustSignExtendResult( - LPlatformChunk* chunk) const { - HValue* hvalue = this->hydrogen_value(); - return hvalue != NULL && - hvalue->representation().IsInteger32() && - chunk->GetDehoistedKeyIds()->Contains(hvalue->id()); -} - - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), - true_block_id(), false_block_id()); -} - - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - - stream->Add(" length "); - length()->PrintTo(stream); - - stream->Add(" index "); - index()->PrintTo(stream); -} - - -int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { - if (kind == DOUBLE_REGISTERS && kDoubleSize == 2 * kPointerSize) { - // Skip a slot if for a double-width slot for x32 port. - current_frame_slots_++; - // The spill slot's address is at rbp - (index + 1) * kPointerSize - - // StandardFrameConstants::kFixedFrameSizeFromFp. kFixedFrameSizeFromFp is - // 2 * kPointerSize, if rbp is aligned at 8-byte boundary, the below "|= 1" - // will make sure the spilled doubles are aligned at 8-byte boundary. - // TODO(haitao): make sure rbp is aligned at 8-byte boundary for x32 port. - current_frame_slots_ |= 1; - } - return current_frame_slots_++; -} - - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - // All stack slots are Double stack slots on x64. - // Alternatively, at some point, start using half-size - // stack slots for int32 values. - int index = GetNextSpillIndex(kind); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access() << " <- "; - stream->Add(os.str().c_str()); - value()->PrintTo(stream); -} - - -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", base_offset()); - } else { - stream->Add("]"); - } -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", base_offset()); - } else { - stream->Add("] <- "); - } - - if (value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - value()->PrintTo(stream); - } -} - - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); -} - - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new(zone()) LPlatformChunk(info(), graph()); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) { - return Use(value, ToUnallocated(reg)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseTempRegisterOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseTempRegister(value); -} - - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE)); -} - - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - - -LOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, - int index) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixedDouble( - LTemplateResultInstruction<1>* instr, - XMMRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); - -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = - (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(zone())); - return instr; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new(zone()) LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new(zone()) LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - if (SmiValuesAre31Bits() && instr->representation().IsSmi() && - constant_value > 0) { - // Left shift can deoptimize if we shift by > 0 and the result - // cannot be truncated to smi. - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); - } - } else { - right = UseFixed(right_value, rcx); - } - - // Shift operations can only deoptimize if we do a logical shift by 0 and - // the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result = - DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; - } else { - return DoArithmeticT(op, instr); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (op == Token::MOD) { - LOperand* left = UseFixedDouble(instr->BetterLeftOperand(), xmm0); - LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return MarkAsCall(DefineFixedDouble(result, xmm0), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result) - : DefineSameAsFirst(result); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - HValue* left = instr->left(); - HValue* right = instr->right(); - DCHECK(left->representation().IsTagged()); - DCHECK(right->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* left_operand = UseFixed(left, rdx); - LOperand* right_operand = UseFixed(right, rax); - LArithmeticT* result = - new(zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, rax), instr); -} - - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - DCHECK(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new(zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new(zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new(zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - instr = new(zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()); -} - - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new(zone()) LDebugBreak(); -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - ToBooleanHints expected = instr->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || - type.IsJSArray() || type.IsHeapNumber() || type.IsString(); - LInstruction* branch = new(zone()) LBranch(UseRegister(value)); - if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) && - (expected & ToBooleanHint::kNeedsMap)) || - expected != ToBooleanHint::kAny)) { - branch = AssignEnvironment(branch); - } - return branch; -} - - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LCmpMapAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value()))); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsElements); -} - - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LHasInPrototypeChainAndBranch* result = - new (zone()) LHasInPrototypeChainAndBranch(object, prototype); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegister(instr->receiver()); - LOperand* function = UseRegisterAtStart(instr->function()); - LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function); - return AssignEnvironment(DefineSameAsFirst(result)); -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), rdi); - LOperand* receiver = UseFixed(instr->receiver(), rax); - LOperand* length = UseFixed(instr->length(), rbx); - LOperand* elements = UseFixed(instr->elements(), rcx); - LApplyArguments* result = new(zone()) LApplyArguments(function, - receiver, - length, - elements); - return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - for (int i = 0; i < argc; ++i) { - LOperand* argument = UseOrConstant(instr->argument(i)); - AddInstruction(new(zone()) LPushArgument(argument), instr); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoStoreCodeEntry( - HStoreCodeEntry* store_code_entry) { - LOperand* function = UseRegister(store_code_entry->function()); - LOperand* code_object = UseTempRegister(store_code_entry->code_object()); - return new(zone()) LStoreCodeEntry(function, code_object); -} - - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister( - new(zone()) LInnerAllocatedObject(base_object, offset)); -} - - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() - ? NULL - : DefineAsRegister(new(zone()) LThisFunction); -} - - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new(zone()) LContext, rsi); - } - - return DefineAsRegister(new(zone()) LContext); -} - - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), rsi); - return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); -} - - -LInstruction* LChunkBuilder::DoCallWithDescriptor( - HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), rsi); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - for (; i < descriptor.GetParameterCount(); i++) { - op = UseAny(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - AddInstruction(new (zone()) LPushArgument(op), instr); - } - - LCallWithDescriptor* result = new(zone()) LCallWithDescriptor( - descriptor, ops, zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, rax), instr); -} - - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* function = UseFixed(instr->function(), rdi); - LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathFloor: - return DoMathFloor(instr); - case kMathRound: - return DoMathRound(instr); - case kMathFround: - return DoMathFround(instr); - case kMathAbs: - return DoMathAbs(instr); - case kMathCos: - return DoMathCos(instr); - case kMathLog: - return DoMathLog(instr); - case kMathExp: - return DoMathExp(instr); - case kMathSin: - return DoMathSin(instr); - case kMathSqrt: - return DoMathSqrt(instr); - case kMathPowHalf: - return DoMathPowHalf(instr); - case kMathClz32: - return DoMathClz32(instr); - default: - UNREACHABLE(); - } -} - -LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - if (instr->representation().IsInteger32()) { - LMathFloorI* result = new (zone()) LMathFloorI(input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - } else { - DCHECK(instr->representation().IsDouble()); - LMathFloorD* result = new (zone()) LMathFloorD(input); - return DefineAsRegister(result); - } -} - -LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegister(instr->value()); - if (instr->representation().IsInteger32()) { - LOperand* temp = FixedTemp(xmm4); - LMathRoundI* result = new (zone()) LMathRoundI(input, temp); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - } else { - DCHECK(instr->representation().IsDouble()); - LMathRoundD* result = new (zone()) LMathRoundD(input); - return DefineAsRegister(result); - } -} - -LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { - LOperand* context = UseAny(instr->context()); - LOperand* input = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineSameAsFirst(new(zone()) LMathAbs(context, input)); - Representation r = instr->value()->representation(); - if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; -} - - -LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), xmm0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), xmm0), - instr); -} - - -LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathClz32* result = new(zone()) LMathClz32(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), xmm0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), xmm0), - instr); -} - -LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), xmm0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), xmm0), - instr); -} - -LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseFixedDouble(instr->value(), xmm0); - return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), xmm0), - instr); -} - -LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseAtStart(instr->value()); - return DefineAsRegister(new(zone()) LMathSqrt(input)); -} - - -LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathPowHalf* result = new(zone()) LMathPowHalf(input); - return DefineSameAsFirst(result); -} - - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* constructor = UseFixed(instr->constructor(), rdi); - LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, rax), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), rsi); - LCallRuntime* result = new(zone()) LCallRuntime(context); - return MarkAsCall(DefineFixed(result, rax), instr); -} - - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right; - if (SmiValuesAre32Bits() && instr->representation().IsSmi()) { - // We don't support tagged immediates, so we request it in a register. - right = UseRegisterAtStart(instr->BetterRightOperand()); - } else { - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } - return DefineSameAsFirst(new(zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(rax); - LOperand* temp2 = FixedTemp(rdx); - LInstruction* result = DefineFixed(new(zone()) LDivByConstI( - dividend, divisor, temp1, temp2), rdx); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), rax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(rdx); - LInstruction* result = DefineFixed(new(zone()) LDivI( - dividend, divisor, temp), rax); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanOverflow) || - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(rax); - LOperand* temp2 = FixedTemp(rdx); - LOperand* temp3 = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? - NULL : TempRegister(); - LInstruction* result = - DefineFixed(new(zone()) LFlooringDivByConstI(dividend, - divisor, - temp1, - temp2, - temp3), - rdx); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), rax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(rdx); - LInstruction* result = DefineFixed(new(zone()) LFlooringDivI( - dividend, divisor, temp), rax); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( - dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(rax); - LOperand* temp2 = FixedTemp(rdx); - LInstruction* result = DefineFixed(new(zone()) LModByConstI( - dividend, divisor, temp1, temp2), rax); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), rax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(rdx); - LInstruction* result = DefineFixed(new(zone()) LModI( - dividend, divisor, temp), rdx); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoModByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoModByConstI(instr); - } else { - return DoModI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - HValue* h_right = instr->BetterRightOperand(); - LOperand* right = UseOrConstant(h_right); - LMulI* mul = new(zone()) LMulI(left, right); - int constant_value = - h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0; - // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls - // |DeoptimizeIf|. - bool needs_environment = - instr->CheckFlag(HValue::kCanOverflow) || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && - (!right->IsConstantOperand() || constant_value <= 0)); - if (needs_environment) { - AssignEnvironment(mul); - } - return DefineSameAsFirst(mul); - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right; - if (SmiValuesAre32Bits() && instr->representation().IsSmi()) { - // We don't support tagged immediates, so we request it in a register. - right = UseRegisterAtStart(instr->right()); - } else { - right = UseOrConstantAtStart(instr->right()); - } - LSubI* sub = new(zone()) LSubI(left, right); - LInstruction* result = DefineSameAsFirst(sub); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - // Check to see if it would be advantageous to use an lea instruction rather - // than an add. This is the case when no overflow check is needed and there - // are multiple uses of the add's inputs, so using a 3-register add will - // preserve all input values for later uses. - bool use_lea = LAddI::UseLea(instr); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - HValue* right_candidate = instr->BetterRightOperand(); - LOperand* right; - if (SmiValuesAre32Bits() && instr->representation().IsSmi()) { - // We cannot add a tagged immediate to a tagged value, - // so we request it in a register. - right = UseRegisterAtStart(right_candidate); - } else { - right = use_lea ? UseRegisterOrConstantAtStart(right_candidate) - : UseOrConstantAtStart(right_candidate); - } - LAddI* add = new(zone()) LAddI(left, right); - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - LInstruction* result = use_lea ? DefineAsRegister(add) - : DefineSameAsFirst(add); - if (can_overflow) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - bool use_lea = LAddI::UseLea(instr); - LOperand* left = UseRegisterAtStart(instr->left()); - HValue* right_candidate = instr->right(); - LOperand* right = use_lea - ? UseRegisterOrConstantAtStart(right_candidate) - : UseOrConstantAtStart(right_candidate); - LAddI* add = new(zone()) LAddI(left, right); - LInstruction* result = use_lea - ? DefineAsRegister(add) - : DefineSameAsFirst(add); - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::ADD, instr); - } else { - return DoArithmeticT(Token::ADD, instr); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - if (instr->representation().IsSmi()) { - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseAtStart(instr->BetterRightOperand()); - } else if (instr->representation().IsInteger32()) { - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - LMathMinMax* minmax = new(zone()) LMathMinMax(left, right); - return DefineSameAsFirst(minmax); -} - - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - DCHECK(instr->representation().IsDouble()); - // We call a C function for double power. It can't trigger a GC. - // We need to use fixed result register for the call. - Representation exponent_type = instr->right()->representation(); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseFixedDouble(instr->left(), xmm2); - LOperand* right = - exponent_type.IsDouble() - ? UseFixedDouble(instr->right(), xmm1) - : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent()); - LPower* result = new(zone()) LPower(left, right); - return MarkAsCall(DefineFixedDouble(result, xmm3), instr, - CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* left = UseFixed(instr->left(), rdx); - LOperand* right = UseFixed(instr->right(), rax); - LCmpT* result = new(zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, rax), instr); -} - - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - LOperand* left; - LOperand* right; - if (instr->left()->IsConstant() && instr->right()->IsConstant()) { - left = UseRegisterOrConstantAtStart(instr->left()); - right = UseRegisterOrConstantAtStart(instr->right()); - } else { - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - return new(zone()) LCompareNumericAndBranch(left, right); - } -} - - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return new(zone()) LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LCmpHoleAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new(zone()) LIsStringAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsSmiAndBranch(Use(instr->value())); -} - - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - return new(zone()) LIsUndetectableAndBranch(value, temp); -} - - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* left = UseFixed(instr->left(), rdx); - LOperand* right = UseFixed(instr->right(), rax); - LStringCompareAndBranch* result = - new(zone()) LStringCompareAndBranch(context, left, right); - - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LHasInstanceTypeAndBranch(value); -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - LOperand* value = UseRegister(instr->value()); - return new (zone()) - LClassOfTestAndBranch(value, TempRegister(), TempRegister()); -} - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index)); -} - - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegisterAtStart(instr->index()) - : UseRegisterOrConstantAtStart(instr->index()); - LOperand* value = FLAG_debug_code - ? UseRegisterAtStart(instr->value()) - : UseRegisterOrConstantAtStart(instr->value()); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), rsi) : NULL; - LInstruction* result = new(zone()) LSeqStringSetChar(context, string, - index, value); - if (FLAG_debug_code) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseOrConstantAtStart(instr->length()) - : UseAtStart(instr->length()); - LInstruction* result = new(zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { - return NULL; -} - - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - bool truncating = instr->CanTruncateToInt32(); - LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1); - LInstruction* result = - DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(val); - LOperand* temp = TempRegister(); - LUnallocated* result_temp = TempRegister(); - LNumberTagD* result = new(zone()) LNumberTagD(value, temp); - return AssignPointerMap(Define(result, result_temp)); - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - return AssignEnvironment( - DefineAsRegister(new(zone()) LDoubleToSmi(value))); - } else { - DCHECK(to.IsInteger32()); - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); - if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); - return result; - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - if (!instr->CheckFlag(HValue::kCanOverflow)) { - LOperand* value = UseRegister(val); - return DefineAsRegister(new(zone()) LSmiTag(value)); - } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegister(val); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = FixedTemp(xmm1); - LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); - return AssignPointerMap(DefineSameAsFirst(result)); - } else { - LOperand* value = UseRegister(val); - LOperand* temp1 = SmiValuesAre32Bits() ? NULL : TempRegister(); - LOperand* temp2 = SmiValuesAre32Bits() ? NULL : FixedTemp(xmm1); - LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2); - return AssignPointerMap(DefineSameAsFirst(result)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); - } else { - LOperand* value = Use(val); - return DefineAsRegister(new(zone()) LInteger32ToDouble(value)); - } - } - } - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LCheckInstanceType* result = new(zone()) LCheckInstanceType(value); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckValue(value)); -} - - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - return DefineAsRegister(new(zone()) LClampDToUint8(reg)); - } else if (input_rep.IsInteger32()) { - return DefineSameAsFirst(new(zone()) LClampIToUint8(reg)); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - // Register allocator doesn't (yet) support allocation of double - // temps. Reserve xmm1 explicitly. - LClampTToUint8* result = new(zone()) LClampTToUint8(reg, - FixedTemp(xmm1)); - return AssignEnvironment(DefineSameAsFirst(result)); - } -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn( - UseFixed(instr->value(), rax), context, parameter_count); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new(zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new(zone()) LConstantI); - } else if (r.IsDouble()) { - return DefineAsRegister(new (zone()) LConstantD); - } else if (r.IsExternal()) { - return DefineAsRegister(new(zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new(zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context; - LOperand* value; - LOperand* temp; - context = UseRegister(instr->context()); - if (instr->NeedsWriteBarrier()) { - value = UseTempRegister(instr->value()); - temp = TempRegister(); - } else { - value = UseRegister(instr->value()); - temp = NULL; - } - LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - // Use the special mov rax, moffs64 encoding for external - // memory accesses with 64-bit word-sized values. - if (instr->access().IsExternalMemory() && - instr->access().offset() == 0 && - (instr->access().representation().IsSmi() || - instr->access().representation().IsTagged() || - instr->access().representation().IsHeapObject() || - instr->access().representation().IsExternal())) { - LOperand* obj = UseRegisterOrConstantAtStart(instr->object()); - return DefineFixed(new(zone()) LLoadNamedField(obj), rax); - } - LOperand* obj = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LLoadNamedField(obj)); -} - - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - return AssignEnvironment(DefineAsRegister( - new(zone()) LLoadFunctionPrototype(UseRegister(instr->function())))); -} - - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new(zone()) LLoadRoot); -} - - -void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) { - // We sign extend the dehoisted key at the definition point when the pointer - // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use - // points and should not invoke this function. We can't use STATIC_ASSERT - // here as the pointer size is 32-bit for x32. - DCHECK(kPointerSize == kInt64Size); - BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds(); - if (dehoisted_key_ids->Contains(candidate->id())) return; - dehoisted_key_ids->Add(candidate->id()); - if (!candidate->IsPhi()) return; - for (int i = 0; i < candidate->OperandCount(); ++i) { - FindDehoistedKeyDefinitions(candidate->OperandAt(i)); - } -} - - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK((kPointerSize == kInt64Size && - instr->key()->representation().IsInteger32()) || - (kPointerSize == kInt32Size && - instr->key()->representation().IsSmiOrInteger32())); - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = NULL; - LInstruction* result = NULL; - - if (kPointerSize == kInt64Size) { - key = UseRegisterOrConstantAtStart(instr->key()); - } else { - bool clobbers_key = ExternalArrayOpRequiresTemp( - instr->key()->representation(), elements_kind); - key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - } - - if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) { - FindDehoistedKeyDefinitions(instr->key()); - } - - if (!instr->is_fixed_typed_array()) { - LOperand* obj = UseRegisterAtStart(instr->elements()); - result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr)); - } else { - DCHECK( - (instr->representation().IsInteger32() && - !(IsDoubleOrFloatElementsKind(elements_kind))) || - (instr->representation().IsDouble() && - (IsDoubleOrFloatElementsKind(elements_kind)))); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - result = DefineAsRegister( - new (zone()) LLoadKeyed(backing_store, key, backing_store_owner)); - } - - bool needs_environment; - if (instr->is_fixed_typed_array()) { - // see LCodeGen::DoLoadKeyedExternalArray - needs_environment = elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32); - } else { - // see LCodeGen::DoLoadKeyedFixedDoubleArray and - // LCodeGen::DoLoadKeyedFixedArray - needs_environment = - instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub()); - } - - if (needs_environment) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - - if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) { - FindDehoistedKeyDefinitions(instr->key()); - } - - if (!instr->is_fixed_typed_array()) { - DCHECK(instr->elements()->representation().IsTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* object = NULL; - LOperand* key = NULL; - LOperand* val = NULL; - - Representation value_representation = instr->value()->representation(); - if (value_representation.IsDouble()) { - object = UseRegisterAtStart(instr->elements()); - val = UseRegisterAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } else { - DCHECK(value_representation.IsSmiOrTagged() || - value_representation.IsInteger32()); - if (needs_write_barrier) { - object = UseTempRegister(instr->elements()); - val = UseTempRegister(instr->value()); - key = UseTempRegister(instr->key()); - } else { - object = UseRegisterAtStart(instr->elements()); - val = UseRegisterOrConstantAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } - } - - return new (zone()) LStoreKeyed(object, key, val, nullptr); - } - - DCHECK( - (instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(elements_kind)) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(elements_kind))); - DCHECK(instr->elements()->representation().IsExternal()); - bool val_is_temp_register = elements_kind == UINT8_CLAMPED_ELEMENTS || - elements_kind == FLOAT32_ELEMENTS; - LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) - : UseRegister(instr->value()); - LOperand* key = NULL; - if (kPointerSize == kInt64Size) { - key = UseRegisterOrConstantAtStart(instr->key()); - } else { - bool clobbers_key = ExternalArrayOpRequiresTemp( - instr->key()->representation(), elements_kind); - key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - } - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner); -} - - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LOperand* new_map_reg = TempRegister(); - LOperand* temp_reg = TempRegister(); - LTransitionElementsKind* result = new(zone()) LTransitionElementsKind( - object, NULL, new_map_reg, temp_reg); - return result; - } else { - LOperand* object = UseFixed(instr->object(), rax); - LOperand* context = UseFixed(instr->context(), rsi); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, context, NULL, NULL); - return MarkAsCall(result, instr); - } -} - - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp = TempRegister(); - LTrapAllocationMemento* result = - new(zone()) LTrapAllocationMemento(object, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* object = Use(instr->object()); - LOperand* elements = Use(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, rax); - return AssignPointerMap(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool is_in_object = instr->access().IsInobject(); - bool is_external_location = instr->access().IsExternalMemory() && - instr->access().offset() == 0; - bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = instr->has_transition() && - instr->NeedsWriteBarrierForMap(); - - LOperand* obj; - if (needs_write_barrier) { - obj = is_in_object - ? UseRegister(instr->object()) - : UseTempRegister(instr->object()); - } else if (is_external_location) { - DCHECK(!is_in_object); - DCHECK(!needs_write_barrier); - DCHECK(!needs_write_barrier_for_map); - obj = UseRegisterOrConstant(instr->object()); - } else { - obj = needs_write_barrier_for_map - ? UseRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - } - - bool can_be_constant = instr->value()->IsConstant() && - HConstant::cast(instr->value())->NotInNewSpace() && - !instr->field_representation().IsDouble(); - - LOperand* val; - if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - } else if (is_external_location) { - val = UseFixed(instr->value(), rax); - } else if (can_be_constant) { - val = UseRegisterOrConstant(instr->value()); - } else if (instr->field_representation().IsDouble()) { - val = UseRegisterAtStart(instr->value()); - } else { - val = UseRegister(instr->value()); - } - - // We only need a scratch register if we have a write barrier or we - // have a store into the properties array (not in-object-property). - LOperand* temp = (!is_in_object || needs_write_barrier || - needs_write_barrier_for_map) ? TempRegister() : NULL; - - return new(zone()) LStoreNamedField(obj, val, temp); -} - - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* left = UseFixed(instr->left(), rdx); - LOperand* right = UseFixed(instr->right(), rax); - return MarkAsCall( - DefineFixed(new(zone()) LStringAdd(context, left, right), rax), instr); -} - - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseTempRegister(instr->string()); - LOperand* index = UseTempRegister(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new(zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new(zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size()) - : UseRegister(instr->size()); - if (instr->IsAllocationFolded()) { - LOperand* temp = TempRegister(); - LFastAllocate* result = new (zone()) LFastAllocate(size, temp); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LOperand* temp = TempRegister(); - LAllocate* result = new (zone()) LAllocate(context, size, temp); - return AssignPointerMap(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new(zone()) LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new(zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk()->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kTooManySpillSlotsNeededForOSR); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = UseRegister(instr->arguments()); - LOperand* length; - LOperand* index; - if (instr->length()->IsConstant() && instr->index()->IsConstant()) { - length = UseRegisterOrConstant(instr->length()); - index = UseOrConstant(instr->index()); - } else { - length = UseTempRegister(instr->length()); - index = Use(instr->index()); - } - return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* value = UseFixed(instr->value(), rbx); - LTypeof* result = new(zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, rax), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); -} - - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - info()->MarkAsDeferredCalling(); - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), rsi); - return MarkAsCall(new(zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new(zone()) LStackCheck(context))); - } -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new(zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = current_block_->last_environment()-> - DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - - return pop; -} - - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), rsi); - LOperand* object = UseFixed(instr->enumerable(), rax); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment(DefineAsRegister( - new(zone()) LForInCacheArray(map))); -} - - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegisterAtStart(instr->map()); - return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); -} - - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* index = UseTempRegister(instr->index()); - LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_X64 diff --git a/src/crankshaft/x64/lithium-x64.h b/src/crankshaft/x64/lithium-x64.h deleted file mode 100644 index 591ab47c46..0000000000 --- a/src/crankshaft/x64/lithium-x64.h +++ /dev/null @@ -1,2496 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_X64_LITHIUM_X64_H_ -#define V8_CRANKSHAFT_X64_LITHIUM_X64_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(DummyUse) \ - V(Dummy) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathCos) \ - V(MathExp) \ - V(MathFloorD) \ - V(MathFloorI) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRoundD) \ - V(MathRoundI) \ - V(MathSin) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArgument) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { \ - return H##type::cast(hydrogen_value()); \ - } - - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) { - } - - virtual ~LInstruction() {} - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { - // Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) - kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - - // Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall(); - } - - // Interface to the register allocator and iterators. - bool IsMarkedAsCall() const { return IsCall(); } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - - virtual bool MustSignExtendResult(LPlatformChunk* chunk) const { - return false; - } - -#ifdef DEBUG - void VerifyCall(); -#endif - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - - private: - // Iterator support. - friend class InputIterator; - - friend class TempIterator; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - class IsCallBits: public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int bit_field_; -}; - - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return R != 0 && result() != NULL; } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - bool MustSignExtendResult(LPlatformChunk* chunk) const final; - - protected: - EmbeddedContainer results_; -}; - - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) - : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const final { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, - Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new(zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - - private: - HBasicBlock* block_; -}; - - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - LLazyBailout() : gap_instructions_size_(0) { } - - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") - - void set_gap_instructions_size(int gap_instructions_size) { - gap_instructions_size_ = gap_instructions_size; - } - int gap_instructions_size() { return gap_instructions_size_; } - - private: - int gap_instructions_size_; -}; - - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { - inputs_[0] = value; - } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) - : LGap(block), replacement_(NULL) { } - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -template -class LControlInstruction : public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) { } - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } - - Label* false_label_; - Label* true_label_; -}; - - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> { - public: - LWrapReceiver(LOperand* receiver, LOperand* function) { - inputs_[0] = receiver; - inputs_[1] = function; - } - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) -}; - - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, - LOperand* receiver, - LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) -}; - - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - void PrintDataTo(StringStream* stream) override; -}; - - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { - inputs_[0] = elements; - } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LModByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModI final : public LTemplateInstruction<1, 2, 1> { - public: - LModI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = temp; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LDivByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> { - public: - LFlooringDivByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2, - LOperand* temp3) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - LOperand* temp3() { return temps_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - - -class LMulI final : public LTemplateInstruction<1, 2, 0> { - public: - LMulI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { - return hydrogen()->representation().IsDouble(); - } - - void PrintDataTo(StringStream* stream) override; -}; - -// Math.floor with a double result. -class LMathFloorD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloorD(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.floor with an integer result. -class LMathFloorI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloorI(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.round with a double result. -class LMathRoundD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathRoundD(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - -// Math.round with an integer result. -class LMathRoundI final : public LTemplateInstruction<1, 1, 1> { - public: - LMathRoundI(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathFround final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFround(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - - -class LMathAbs final : public LTemplateInstruction<1, 2, 0> { - public: - explicit LMathAbs(LOperand* context, LOperand* value) { - inputs_[1] = context; - inputs_[0] = value; - } - - LOperand* context() { return inputs_[1]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathLog final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathLog(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - - -class LMathClz32 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathClz32(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - -class LMathCos final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathCos(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - -class LMathExp final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathExp(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - -class LMathSin final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSin(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - -class LMathSqrt final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSqrt(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - - -class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathPowHalf(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") -}; - - -class LCmpHoleAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - explicit LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - explicit LStringCompareAndBranch(LOperand* context, - LOperand* left, - LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - void PrintDataTo(StringStream* stream) override; - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LHasInstanceTypeAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 2> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) { - inputs_[0] = object; - inputs_[1] = prototype; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return hydrogen()->op(); } - bool IsInteger32() const { - return hydrogen()->representation().IsInteger32(); - } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) -}; - - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - - -class LConstantD final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); } -}; - - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - - -class LBranch final : public LControlInstruction<1, 0> { - public: - explicit LBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - - -class LCmpMapAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpMapAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> { - public: - LSeqStringGetChar(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; - } - - LOperand* string() const { return inputs_[0]; } - LOperand* index() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> { - public: - LSeqStringSetChar(LOperand* context, - LOperand* string, - LOperand* index, - LOperand* value) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - } - - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - static bool UseLea(HAdd* add) { - return !add->CheckFlag(HValue::kCanOverflow) && - add->BetterLeftOperand()->UseCount() > 1; - } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LMathMinMax final : public LTemplateInstruction<1, 2, 0> { - public: - LMathMinMax(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) - : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - Token::Value op() const { return op_; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, - LOperand* context, - LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - Token::Value op() const { return op_; } - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - Opcode opcode() const override { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - explicit LReturn(LOperand* value, - LOperand* context, - LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - LOperand* value() { return inputs_[0]; } - LOperand* context() { return inputs_[1]; } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - LOperand* parameter_count() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") - DECLARE_HYDROGEN_ACCESSOR(Return) -}; - - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadFunctionPrototype(LOperand* function) { - inputs_[0] = function; - } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) - - LOperand* function() { return inputs_[0]; } -}; - - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - - -inline static bool ExternalArrayOpRequiresTemp( - Representation key_representation, - ElementsKind elements_kind) { - // Operations that require the key to be divided by two to be converted into - // an index cannot fold the scale operation into a load and need an extra - // temp register to do the work. - return SmiValuesAre31Bits() && key_representation.IsSmi() && - (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS || - elements_kind == UINT8_CLAMPED_ELEMENTS); -} - - -class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = backing_store_owner; - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* backing_store_owner() { return inputs_[2]; } - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } -}; - - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) { - inputs_[0] = context; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LPushArgument final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LPushArgument(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) { } - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object) { - inputs_[0] = function; - inputs_[1] = code_object; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LUint32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - - -class LNumberTagI final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") -}; - - -class LNumberTagU final : public LTemplateInstruction<1, 1, 2> { - public: - LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - - -class LNumberTagD final : public LTemplateInstruction<1, 1, 1> { - public: - explicit LNumberTagD(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToI(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 1> { - public: - LTaggedToI(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LNumberUntagD(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change); - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) - : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - bool needs_check() const { return needs_check_; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - private: - bool needs_check_; -}; - - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { - inputs_[0] = object; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; - - Representation representation() const { - return hydrogen()->field_representation(); - } -}; - - -class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> { - public: - LStoreKeyed(LOperand* object, LOperand* key, LOperand* value, - LOperand* backing_store_owner) { - inputs_[0] = object; - inputs_[1] = key; - inputs_[2] = value; - inputs_[3] = backing_store_owner; - } - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - LOperand* backing_store_owner() { return inputs_[3]; } - ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) - - void PrintDataTo(StringStream* stream) override; - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } - uint32_t base_offset() const { return hydrogen()->base_offset(); } -}; - - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> { - public: - LTransitionElementsKind(LOperand* object, - LOperand* context, - LOperand* new_map_temp, - LOperand* temp) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = new_map_temp; - temps_[1] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* context() { return inputs_[1]; } - LOperand* new_map_temp() { return temps_[0]; } - LOperand* temp() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() { return hydrogen()->from_kind(); } - ElementsKind to_kind() { return hydrogen()->to_kind(); } -}; - - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> { - public: - LTrapAllocationMemento(LOperand* object, - LOperand* temp) { - inputs_[0] = object; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, - "trap-allocation-memento") -}; - - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; } - - LOperand* view() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckInstanceType(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - - -class LCheckMaps final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckMaps(LOperand* value = NULL) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampDToUint8(LOperand* unclamped) { - inputs_[0] = unclamped; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* unclamped) { - inputs_[0] = unclamped; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - - -class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* unclamped, - LOperand* temp_xmm) { - inputs_[0] = unclamped; - temps_[0] = temp_xmm; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* temp_xmm() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") -}; - - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - - -class LAllocate final : public LTemplateInstruction<1, 2, 1> { - public: - LAllocate(LOperand* context, LOperand* size, LOperand* temp) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 1> { - public: - LFastAllocate(LOperand* size, LOperand* temp) { - inputs_[0] = size; - temps_[0] = temp; - } - - LOperand* size() const { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIsAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LTypeofIsAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - LOsrEntry() {} - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { - inputs_[0] = map; - } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { - return HForInCacheArray::cast(this->hydrogen_value())->idx(); - } -}; - - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> { - public: - LCheckMapValue(LOperand* value, LOperand* map) { - inputs_[0] = value; - inputs_[1] = map; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) - : LChunk(info, graph), - dehoisted_key_ids_(graph->GetMaximumValueID(), graph->zone()) { } - - int GetNextSpillIndex(RegisterKind kind); - LOperand* GetNextSpillSlot(RegisterKind kind); - BitVector* GetDehoistedKeyIds() { return &dehoisted_key_ids_; } - bool IsDehoistedKey(HValue* value) { - return dehoisted_key_ids_.Contains(value->id()); - } - - private: - BitVector dehoisted_key_ids_; -}; - - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoMathFloor(HUnaryMathOperation* instr); - LInstruction* DoMathRound(HUnaryMathOperation* instr); - LInstruction* DoMathFround(HUnaryMathOperation* instr); - LInstruction* DoMathAbs(HUnaryMathOperation* instr); - LInstruction* DoMathCos(HUnaryMathOperation* instr); - LInstruction* DoMathLog(HUnaryMathOperation* instr); - LInstruction* DoMathExp(HUnaryMathOperation* instr); - LInstruction* DoMathSin(HUnaryMathOperation* instr); - LInstruction* DoMathSqrt(HUnaryMathOperation* instr); - LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); - LInstruction* DoMathClz32(HUnaryMathOperation* instr); - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HDiv* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(XMMRegister reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, - XMMRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register that may be trashed. - MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - - // An input operand in a register that may be trashed or a constant operand. - MUST_USE_RESULT LOperand* UseTempRegisterOrConstant(HValue* value); - - // An input operand in a register or stack slot. - MUST_USE_RESULT LOperand* Use(HValue* value); - MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - - // An input operand in a register, stack slot or a constant operand. - MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // An input operand in a constant operand. - MUST_USE_RESULT LOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - MUST_USE_RESULT LOperand* UseAny(HValue* value) override; - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg); - LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, - XMMRegister reg); - // Assigns an environment to an instruction. An instruction which can - // deoptimize must have an environment. - LInstruction* AssignEnvironment(LInstruction* instr); - // Assigns a pointer map to an instruction. An instruction which can - // trigger a GC or a lazy deoptimization must have a pointer map. - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - // Marks a call for the register allocator. Assigns a pointer map to - // support GC and lazy deoptimization. Assigns an environment to support - // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY. - LInstruction* MarkAsCall( - LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, - HBinaryOperation* instr); - void FindDehoistedKeyDefinitions(HValue* candidate); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_X64_LITHIUM_X64_H_ diff --git a/src/crankshaft/x87/OWNERS b/src/crankshaft/x87/OWNERS deleted file mode 100644 index 61245ae8e2..0000000000 --- a/src/crankshaft/x87/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -weiliang.lin@intel.com -chunyang.dai@intel.com diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc deleted file mode 100644 index 95ba345fe5..0000000000 --- a/src/crankshaft/x87/lithium-codegen-x87.cc +++ /dev/null @@ -1,5587 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_X87 - -#include "src/crankshaft/x87/lithium-codegen-x87.h" - -#include "src/base/bits.h" -#include "src/builtins/builtins-constructor.h" -#include "src/code-factory.h" -#include "src/code-stubs.h" -#include "src/codegen.h" -#include "src/deoptimizer.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" -#include "src/x87/frames-x87.h" - -namespace v8 { -namespace internal { - -// When invoking builtins, we need to record the safepoint in the middle of -// the invoke instruction sequence generated by the macro assembler. -class SafepointGenerator final : public CallWrapper { - public: - SafepointGenerator(LCodeGen* codegen, - LPointerMap* pointers, - Safepoint::DeoptMode mode) - : codegen_(codegen), - pointers_(pointers), - deopt_mode_(mode) {} - virtual ~SafepointGenerator() {} - - void BeforeCall(int call_size) const override {} - - void AfterCall() const override { - codegen_->RecordSafepoint(pointers_, deopt_mode_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - Safepoint::DeoptMode deopt_mode_; -}; - - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - LPhase phase("Z_Code generation", chunk()); - DCHECK(is_unused()); - status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::MANUAL); - - return GeneratePrologue() && - GenerateBody() && - GenerateDeferredCode() && - GenerateJumpTable() && - GenerateSafepointTable(); -} - - -void LCodeGen::FinishCode(Handle code) { - DCHECK(is_done()); - code->set_stack_slots(GetTotalFrameSlotCount()); - code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); - } -} - - -#ifdef _MSC_VER -void LCodeGen::MakeSureStackPagesMapped(int offset) { - const int kPageSize = 4 * KB; - for (offset -= kPageSize; offset > 0; offset -= kPageSize) { - __ mov(Operand(esp, offset), eax); - } -} -#endif - - -bool LCodeGen::GeneratePrologue() { - DCHECK(is_generating()); - - if (info()->IsOptimizing()) { - ProfileEntryHookStub::MaybeCallEntryHook(masm_); - } - - info()->set_prologue_offset(masm_->pc_offset()); - if (NeedsEagerFrame()) { - DCHECK(!frame_is_built_); - frame_is_built_ = true; - if (info()->IsStub()) { - __ StubPrologue(StackFrame::STUB); - } else { - __ Prologue(info()->GeneratePreagedPrologue()); - } - } - - // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); - DCHECK(slots != 0 || !info()->IsOptimizing()); - if (slots > 0) { - __ sub(Operand(esp), Immediate(slots * kPointerSize)); -#ifdef _MSC_VER - MakeSureStackPagesMapped(slots * kPointerSize); -#endif - if (FLAG_debug_code) { - __ push(eax); - __ mov(Operand(eax), Immediate(slots)); - Label loop; - __ bind(&loop); - __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue)); - __ dec(eax); - __ j(not_zero, &loop); - __ pop(eax); - } - } - - // Initailize FPU state. - __ fninit(); - - return !is_aborted(); -} - - -void LCodeGen::DoPrologue(LPrologue* instr) { - Comment(";;; Prologue begin"); - - // Possibly allocate a local context. - if (info_->scope()->NeedsContext()) { - Comment(";;; Allocate local context"); - bool need_write_barrier = true; - // Argument to NewContext is the function, which is still in edi. - int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; - if (info()->scope()->is_script_scope()) { - __ push(edi); - __ Push(info()->scope()->scope_info()); - __ CallRuntime(Runtime::kNewScriptContext); - deopt_mode = Safepoint::kLazyDeopt; - } else { - if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) { - Callable callable = CodeFactory::FastNewFunctionContext( - isolate(), info()->scope()->scope_type()); - __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), - Immediate(slots)); - __ Call(callable.code(), RelocInfo::CODE_TARGET); - // Result of the FastNewFunctionContext builtin is always in new space. - need_write_barrier = false; - } else { - __ Push(edi); - __ Push(Smi::FromInt(info()->scope()->scope_type())); - __ CallRuntime(Runtime::kNewFunctionContext); - } - } - RecordSafepoint(deopt_mode); - - // Context is returned in eax. It replaces the context passed to us. - // It's saved in the stack and kept live in esi. - __ mov(esi, eax); - __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax); - - // Copy parameters into context if necessary. - int num_parameters = info()->scope()->num_parameters(); - int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; - for (int i = first_parameter; i < num_parameters; i++) { - Variable* var = (i == -1) ? info()->scope()->receiver() - : info()->scope()->parameter(i); - if (var->IsContextSlot()) { - int parameter_offset = StandardFrameConstants::kCallerSPOffset + - (num_parameters - 1 - i) * kPointerSize; - // Load parameter from stack. - __ mov(eax, Operand(ebp, parameter_offset)); - // Store it in the context. - int context_offset = Context::SlotOffset(var->index()); - __ mov(Operand(esi, context_offset), eax); - // Update the write barrier. This clobbers eax and ebx. - if (need_write_barrier) { - __ RecordWriteContextSlot(esi, context_offset, eax, ebx, - kDontSaveFPRegs); - } else if (FLAG_debug_code) { - Label done; - __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); - __ Abort(kExpectedNewSpaceObject); - __ bind(&done); - } - } - } - Comment(";;; End allocate local context"); - } - - Comment(";;; Prologue end"); -} - -void LCodeGen::GenerateOsrPrologue() { UNREACHABLE(); } - -void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { - if (instr->IsCall()) { - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - } - if (!instr->IsLazyBailout() && !instr->IsGap()) { - safepoints_.BumpLastLazySafepointIndex(); - } - FlushX87StackIfNecessary(instr); -} - - -void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { - // When return from function call, FPU should be initialized again. - if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) { - bool double_result = instr->HasDoubleRegisterResult(); - if (double_result) { - __ lea(esp, Operand(esp, -kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - } - __ fninit(); - if (double_result) { - __ fld_d(Operand(esp, 0)); - __ lea(esp, Operand(esp, kDoubleSize)); - } - } - if (instr->IsGoto()) { - x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this); - } else if (FLAG_debug_code && FLAG_enable_slow_asserts && - !instr->IsGap() && !instr->IsReturn()) { - if (instr->ClobbersDoubleRegisters(isolate())) { - if (instr->HasDoubleRegisterResult()) { - DCHECK_EQ(1, x87_stack_.depth()); - } else { - DCHECK_EQ(0, x87_stack_.depth()); - } - } - __ VerifyX87StackDepth(x87_stack_.depth()); - } -} - - -bool LCodeGen::GenerateJumpTable() { - if (!jump_table_.length()) return !is_aborted(); - - Label needs_frame; - Comment(";;; -------------------- Jump table --------------------"); - - for (int i = 0; i < jump_table_.length(); i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); - Address entry = table_entry->address; - DeoptComment(table_entry->deopt_info); - if (table_entry->needs_frame) { - DCHECK(!info()->saves_caller_doubles()); - __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); - __ call(&needs_frame); - } else { - __ call(entry, RelocInfo::RUNTIME_ENTRY); - } - } - if (needs_frame.is_linked()) { - __ bind(&needs_frame); - /* stack layout - 3: entry address - 2: return address <-- esp - 1: garbage - 0: garbage - */ - __ push(MemOperand(esp, 0)); // Copy return address. - __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address. - - /* stack layout - 4: entry address - 3: return address - 1: return address - 0: entry address <-- esp - */ - __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp. - // Fill ebp with the right stack frame address. - __ lea(ebp, MemOperand(esp, 3 * kPointerSize)); - - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - DCHECK(info()->IsStub()); - __ mov(MemOperand(esp, 2 * kPointerSize), - Immediate(Smi::FromInt(StackFrame::STUB))); - - /* stack layout - 3: old ebp - 2: stub marker - 1: return address - 0: entry address <-- esp - */ - __ ret(0); // Call the continuation without clobbering registers. - } - return !is_aborted(); -} - - -bool LCodeGen::GenerateDeferredCode() { - DCHECK(is_generating()); - if (deferred_.length() > 0) { - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - X87Stack copy(code->x87_stack()); - x87_stack_ = copy; - - HValue* value = - instructions_->at(code->instruction_index())->hydrogen_value(); - RecordAndWritePosition(value->position()); - - Comment(";;; <@%d,#%d> " - "-------------------- Deferred %s --------------------", - code->instruction_index(), - code->instr()->hydrogen_value()->id(), - code->instr()->Mnemonic()); - __ bind(code->entry()); - if (NeedsDeferredFrame()) { - Comment(";;; Build frame"); - DCHECK(!frame_is_built_); - DCHECK(info()->IsStub()); - frame_is_built_ = true; - // Build the frame in such a way that esi isn't trashed. - __ push(ebp); // Caller's frame pointer. - __ push(Immediate(Smi::FromInt(StackFrame::STUB))); - __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp)); - Comment(";;; Deferred code"); - } - code->Generate(); - if (NeedsDeferredFrame()) { - __ bind(code->done()); - Comment(";;; Destroy frame"); - DCHECK(frame_is_built_); - frame_is_built_ = false; - __ mov(esp, ebp); - __ pop(ebp); - } - __ jmp(code->exit()); - } - } - - // Deferred code is the last part of the instruction sequence. Mark - // the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - DCHECK(is_done()); - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // For lazy deoptimization we need space to patch a call after every call. - // Ensure there is always space for such patching, even if the code ends - // in a call. - int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); - while (masm()->pc_offset() < target_offset) { - masm()->nop(); - } - } - safepoints_.Emit(masm(), GetTotalFrameSlotCount()); - return !is_aborted(); -} - - -Register LCodeGen::ToRegister(int code) const { - return Register::from_code(code); -} - - -X87Register LCodeGen::ToX87Register(int code) const { - return X87Register::from_code(code); -} - - -void LCodeGen::X87LoadForUsage(X87Register reg) { - DCHECK(x87_stack_.Contains(reg)); - x87_stack_.Fxch(reg); - x87_stack_.pop(); -} - - -void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { - DCHECK(x87_stack_.Contains(reg1)); - DCHECK(x87_stack_.Contains(reg2)); - if (reg1.is(reg2) && x87_stack_.depth() == 1) { - __ fld(x87_stack_.st(reg1)); - x87_stack_.push(reg1); - x87_stack_.pop(); - x87_stack_.pop(); - } else { - x87_stack_.Fxch(reg1, 1); - x87_stack_.Fxch(reg2); - x87_stack_.pop(); - x87_stack_.pop(); - } -} - - -int LCodeGen::X87Stack::GetLayout() { - int layout = stack_depth_; - for (int i = 0; i < stack_depth_; i++) { - layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3)); - } - - return layout; -} - - -void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { - DCHECK(is_mutable_); - DCHECK(Contains(reg) && stack_depth_ > other_slot); - int i = ArrayIndex(reg); - int st = st2idx(i); - if (st != other_slot) { - int other_i = st2idx(other_slot); - X87Register other = stack_[other_i]; - stack_[other_i] = reg; - stack_[i] = other; - if (st == 0) { - __ fxch(other_slot); - } else if (other_slot == 0) { - __ fxch(st); - } else { - __ fxch(st); - __ fxch(other_slot); - __ fxch(st); - } - } -} - - -int LCodeGen::X87Stack::st2idx(int pos) { - return stack_depth_ - pos - 1; -} - - -int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { - for (int i = 0; i < stack_depth_; i++) { - if (stack_[i].is(reg)) return i; - } - UNREACHABLE(); -} - - -bool LCodeGen::X87Stack::Contains(X87Register reg) { - for (int i = 0; i < stack_depth_; i++) { - if (stack_[i].is(reg)) return true; - } - return false; -} - - -void LCodeGen::X87Stack::Free(X87Register reg) { - DCHECK(is_mutable_); - DCHECK(Contains(reg)); - int i = ArrayIndex(reg); - int st = st2idx(i); - if (st > 0) { - // keep track of how fstp(i) changes the order of elements - int tos_i = st2idx(0); - stack_[i] = stack_[tos_i]; - } - pop(); - __ fstp(st); -} - - -void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { - if (x87_stack_.Contains(dst)) { - x87_stack_.Fxch(dst); - __ fstp(0); - } else { - x87_stack_.push(dst); - } - X87Fld(src, opts); -} - - -void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) { - if (x87_stack_.Contains(dst)) { - x87_stack_.Fxch(dst); - __ fstp(0); - x87_stack_.pop(); - // Push ST(i) onto the FPU register stack - __ fld(x87_stack_.st(src)); - x87_stack_.push(dst); - } else { - // Push ST(i) onto the FPU register stack - __ fld(x87_stack_.st(src)); - x87_stack_.push(dst); - } -} - - -void LCodeGen::X87Fld(Operand src, X87OperandType opts) { - DCHECK(!src.is_reg_only()); - switch (opts) { - case kX87DoubleOperand: - __ fld_d(src); - break; - case kX87FloatOperand: - __ fld_s(src); - break; - case kX87IntOperand: - __ fild_s(src); - break; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { - DCHECK(!dst.is_reg_only()); - x87_stack_.Fxch(src); - switch (opts) { - case kX87DoubleOperand: - __ fst_d(dst); - break; - case kX87FloatOperand: - __ fst_s(dst); - break; - case kX87IntOperand: - __ fist_s(dst); - break; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { - DCHECK(is_mutable_); - if (Contains(reg)) { - Free(reg); - } - // Mark this register as the next register to write to - stack_[stack_depth_] = reg; -} - - -void LCodeGen::X87Stack::CommitWrite(X87Register reg) { - DCHECK(is_mutable_); - // Assert the reg is prepared to write, but not on the virtual stack yet - DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) && - stack_depth_ < X87Register::kMaxNumAllocatableRegisters); - stack_depth_++; -} - - -void LCodeGen::X87PrepareBinaryOp( - X87Register left, X87Register right, X87Register result) { - // You need to use DefineSameAsFirst for x87 instructions - DCHECK(result.is(left)); - x87_stack_.Fxch(right, 1); - x87_stack_.Fxch(left); -} - - -void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { - if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) { - bool double_inputs = instr->HasDoubleRegisterInput(); - - // Flush stack from tos down, since FreeX87() will mess with tos - for (int i = stack_depth_-1; i >= 0; i--) { - X87Register reg = stack_[i]; - // Skip registers which contain the inputs for the next instruction - // when flushing the stack - if (double_inputs && instr->IsDoubleInput(reg, cgen)) { - continue; - } - Free(reg); - if (i < stack_depth_-1) i++; - } - } - if (instr->IsReturn()) { - while (stack_depth_ > 0) { - __ fstp(0); - stack_depth_--; - } - if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); - } -} - - -void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr, - LCodeGen* cgen) { - // For going to a joined block, an explicit LClobberDoubles is inserted before - // LGoto. Because all used x87 registers are spilled to stack slots. The - // ResolvePhis phase of register allocator could guarantee the two input's x87 - // stacks have the same layout. So don't check stack_depth_ <= 1 here. - int goto_block_id = goto_instr->block_id(); - if (current_block_id + 1 != goto_block_id) { - // If we have a value on the x87 stack on leaving a block, it must be a - // phi input. If the next block we compile is not the join block, we have - // to discard the stack state. - // Before discarding the stack state, we need to save it if the "goto block" - // has unreachable last predecessor when FLAG_unreachable_code_elimination. - if (FLAG_unreachable_code_elimination) { - int length = goto_instr->block()->predecessors()->length(); - bool has_unreachable_last_predecessor = false; - for (int i = 0; i < length; i++) { - HBasicBlock* block = goto_instr->block()->predecessors()->at(i); - if (block->IsUnreachable() && - (block->block_id() + 1) == goto_block_id) { - has_unreachable_last_predecessor = true; - } - } - if (has_unreachable_last_predecessor) { - if (cgen->x87_stack_map_.find(goto_block_id) == - cgen->x87_stack_map_.end()) { - X87Stack* stack = new (cgen->zone()) X87Stack(*this); - cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack)); - } - } - } - - // Discard the stack state. - stack_depth_ = 0; - } -} - - -void LCodeGen::EmitFlushX87ForDeopt() { - // The deoptimizer does not support X87 Registers. But as long as we - // deopt from a stub its not a problem, since we will re-materialize the - // original stub inputs, which can't be double registers. - // DCHECK(info()->IsStub()); - if (FLAG_debug_code && FLAG_enable_slow_asserts) { - __ pushfd(); - __ VerifyX87StackDepth(x87_stack_.depth()); - __ popfd(); - } - - // Flush X87 stack in the deoptimizer entry. -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - DCHECK(op->IsRegister()); - return ToRegister(op->index()); -} - - -X87Register LCodeGen::ToX87Register(LOperand* op) const { - DCHECK(op->IsDoubleRegister()); - return ToX87Register(op->index()); -} - - -int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { - return ToRepresentation(op, Representation::Integer32()); -} - - -int32_t LCodeGen::ToRepresentation(LConstantOperand* op, - const Representation& r) const { - HConstant* constant = chunk_->LookupConstant(op); - if (r.IsExternal()) { - return reinterpret_cast( - constant->ExternalReferenceValue().address()); - } - int32_t value = constant->Integer32Value(); - if (r.IsInteger32()) return value; - DCHECK(r.IsSmiOrTagged()); - return reinterpret_cast(Smi::FromInt(value)); -} - - -Handle LCodeGen::ToHandle(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); - return constant->handle(isolate()); -} - - -double LCodeGen::ToDouble(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasDoubleValue()); - return constant->DoubleValue(); -} - - -ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { - HConstant* constant = chunk_->LookupConstant(op); - DCHECK(constant->HasExternalReferenceValue()); - return constant->ExternalReferenceValue(); -} - - -bool LCodeGen::IsInteger32(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); -} - - -bool LCodeGen::IsSmi(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsSmi(); -} - - -static int ArgumentsOffsetWithoutFrame(int index) { - DCHECK(index < 0); - return -(index + 1) * kPointerSize + kPCOnStackSize; -} - - -Operand LCodeGen::ToOperand(LOperand* op) const { - if (op->IsRegister()) return Operand(ToRegister(op)); - DCHECK(!op->IsDoubleRegister()); - DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return Operand(ebp, FrameSlotToFPOffset(op->index())); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); - } -} - - -Operand LCodeGen::HighOperand(LOperand* op) { - DCHECK(op->IsDoubleStackSlot()); - if (NeedsEagerFrame()) { - return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize); - } else { - // Retrieve parameter without eager stack-frame relative to the - // stack-pointer. - return Operand( - esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); - } -} - - -void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { - if (environment == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = environment->translation_size(); - - WriteTranslation(environment->outer(), translation); - WriteTranslationFrame(environment, translation); - - int object_index = 0; - int dematerialized_index = 0; - for (int i = 0; i < translation_size; ++i) { - LOperand* value = environment->values()->at(i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), - &object_index, - &dematerialized_index); - } -} - - -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, - dematerialized_index_pointer); - } - return; - } - - if (op->IsStackSlot()) { - int index = op->index(); - if (is_tagged) { - translation->StoreStackSlot(index); - } else if (is_uint32) { - translation->StoreUint32StackSlot(index); - } else { - translation->StoreInt32StackSlot(index); - } - } else if (op->IsDoubleStackSlot()) { - int index = op->index(); - translation->StoreDoubleStackSlot(index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else if (is_uint32) { - translation->StoreUint32Register(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - X87Register reg = ToX87Register(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode) { - DCHECK(instr != NULL); - __ call(code, mode); - RecordSafepointWithLazyDeopt(instr, safepoint_mode); - - // Signal that we don't inline smi code before these stubs in the - // optimizing code generator. - if (code->kind() == Code::COMPARE_IC) { - __ nop(); - } -} - - -void LCodeGen::CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); -} - - -void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc, - LInstruction* instr, SaveFPRegsMode save_doubles) { - DCHECK(instr != NULL); - DCHECK(instr->HasPointerMap()); - - __ CallRuntime(fun, argc, save_doubles); - - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - - DCHECK(info()->is_calling()); -} - - -void LCodeGen::LoadContextFromDeferred(LOperand* context) { - if (context->IsRegister()) { - if (!ToRegister(context).is(esi)) { - __ mov(esi, ToRegister(context)); - } - } else if (context->IsStackSlot()) { - __ mov(esi, ToOperand(context)); - } else if (context->IsConstantOperand()) { - HConstant* constant = - chunk_->LookupConstant(LConstantOperand::cast(context)); - __ LoadObject(esi, Handle::cast(constant->handle(isolate()))); - } else { - UNREACHABLE(); - } -} - -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context) { - LoadContextFromDeferred(context); - - __ CallRuntimeSaveDoubles(id); - RecordSafepointWithRegisters( - instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); - - DCHECK(info()->is_calling()); -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization( - LEnvironment* environment, Safepoint::DeoptMode mode) { - environment->set_has_been_used(); - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - int jsframe_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - if (e->frame_type() == JS_FUNCTION) { - ++jsframe_count; - } - } - Translation translation(&translations_, frame_count, jsframe_count, zone()); - WriteTranslation(environment, &translation); - int deoptimization_index = deoptimizations_.length(); - int pc_offset = masm()->pc_offset(); - environment->Register(deoptimization_index, - translation.index(), - (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment, zone()); - } -} - -void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type) { - LEnvironment* environment = instr->environment(); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - DCHECK(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = - Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); - if (entry == NULL) { - Abort(kBailoutWasNotPrepared); - return; - } - - if (DeoptEveryNTimes()) { - ExternalReference count = ExternalReference::stress_deopt_count(isolate()); - Label no_deopt; - __ pushfd(); - __ push(eax); - __ mov(eax, Operand::StaticVariable(count)); - __ sub(eax, Immediate(1)); - __ j(not_zero, &no_deopt, Label::kNear); - if (FLAG_trap_on_deopt) __ int3(); - __ mov(eax, Immediate(FLAG_deopt_every_n_times)); - __ mov(Operand::StaticVariable(count), eax); - __ pop(eax); - __ popfd(); - DCHECK(frame_is_built_); - // Put the x87 stack layout in TOS. - if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt(); - __ push(Immediate(x87_stack_.GetLayout())); - __ fild_s(MemOperand(esp, 0)); - // Don't touch eflags. - __ lea(esp, Operand(esp, kPointerSize)); - __ call(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&no_deopt); - __ mov(Operand::StaticVariable(count), eax); - __ pop(eax); - __ popfd(); - } - - // Put the x87 stack layout in TOS, so that we can save x87 fp registers in - // the correct location. - { - Label done; - if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); - if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt(); - - int x87_stack_layout = x87_stack_.GetLayout(); - __ push(Immediate(x87_stack_layout)); - __ fild_s(MemOperand(esp, 0)); - // Don't touch eflags. - __ lea(esp, Operand(esp, kPointerSize)); - __ bind(&done); - } - - if (info()->ShouldTrapOnDeopt()) { - Label done; - if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); - __ int3(); - __ bind(&done); - } - - Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); - - DCHECK(info()->IsStub() || frame_is_built_); - if (cc == no_condition && frame_is_built_) { - DeoptComment(deopt_info); - __ call(entry, RelocInfo::RUNTIME_ENTRY); - } else { - Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, - !frame_is_built_); - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (FLAG_trace_deopt || isolate()->is_profiling() || - jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); - } - if (cc == no_condition) { - __ jmp(&jump_table_.last().label); - } else { - __ j(cc, &jump_table_.last().label); - } - } -} - -void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason) { - Deoptimizer::BailoutType bailout_type = info()->IsStub() - ? Deoptimizer::LAZY - : Deoptimizer::EAGER; - DeoptimizeIf(cc, instr, deopt_reason, bailout_type); -} - - -void LCodeGen::RecordSafepointWithLazyDeopt( - LInstruction* instr, SafepointMode safepoint_mode) { - if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { - RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); - } else { - DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kLazyDeopt); - } -} - - -void LCodeGen::RecordSafepoint( - LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode deopt_mode) { - DCHECK(kind == expected_safepoint_kind_); - const ZoneList* operands = pointers->GetNormalizedOperands(); - Safepoint safepoint = - safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index(), zone()); - } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer), zone()); - } - } -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - Safepoint::DeoptMode mode) { - RecordSafepoint(pointers, Safepoint::kSimple, 0, mode); -} - - -void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) { - LPointerMap empty_pointers(zone()); - RecordSafepoint(&empty_pointers, mode); -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode) { - RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode); -} - - -static const char* LabelType(LLabel* label) { - if (label->is_loop_header()) return " (loop header)"; - if (label->is_osr_entry()) return " (OSR entry)"; - return ""; -} - - -void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", - current_instruction_, - label->hydrogen_value()->id(), - label->block_id(), - LabelType(label)); - __ bind(label->label()); - current_block_ = label->block_id(); - if (label->block()->predecessors()->length() > 1) { - // A join block's x87 stack is that of its last visited predecessor. - // If the last visited predecessor block is unreachable, the stack state - // will be wrong. In such case, use the x87 stack of reachable predecessor. - X87StackMap::const_iterator it = x87_stack_map_.find(current_block_); - // Restore x87 stack. - if (it != x87_stack_map_.end()) { - x87_stack_ = *(it->second); - } - } - DoGap(label); -} - - -void LCodeGen::DoParallelMove(LParallelMove* move) { - resolver_.Resolve(move); -} - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; - i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } -} - - -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { - DoGap(instr); -} - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - GenerateOsrPrologue(); -} - - -void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // Theoretically, a variation of the branch-free code for integer division by - // a power of 2 (calculating the remainder via an additional multiplication - // (which gets simplified to an 'and') and subtraction) should be faster, and - // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to - // indicate that positive dividends are heavily favored, so the branching - // version performs better. - HMod* hmod = instr->hydrogen(); - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - Label dividend_is_not_negative, done; - if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ test(dividend, dividend); - __ j(not_sign, ÷nd_is_not_negative, Label::kNear); - // Note that this is correct even for kMinInt operands. - __ neg(dividend); - __ and_(dividend, mask); - __ neg(dividend); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - __ jmp(&done, Label::kNear); - } - - __ bind(÷nd_is_not_negative); - __ and_(dividend, mask); - __ bind(&done); -} - - -void LCodeGen::DoModByConstI(LModByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(eax)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - __ TruncatingDiv(dividend, Abs(divisor)); - __ imul(edx, edx, Abs(divisor)); - __ mov(eax, dividend); - __ sub(eax, edx); - - // Check for negative zero. - HMod* hmod = instr->hydrogen(); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label remainder_not_zero; - __ j(not_zero, &remainder_not_zero, Label::kNear); - __ cmp(dividend, Immediate(0)); - DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero); - __ bind(&remainder_not_zero); - } -} - - -void LCodeGen::DoModI(LModI* instr) { - HMod* hmod = instr->hydrogen(); - - Register left_reg = ToRegister(instr->left()); - DCHECK(left_reg.is(eax)); - Register right_reg = ToRegister(instr->right()); - DCHECK(!right_reg.is(eax)); - DCHECK(!right_reg.is(edx)); - Register result_reg = ToRegister(instr->result()); - DCHECK(result_reg.is(edx)); - - Label done; - // Check for x % 0, idiv would signal a divide error. We have to - // deopt in this case because we can't return a NaN. - if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { - __ test(right_reg, Operand(right_reg)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for kMinInt % -1, idiv would signal a divide error. We - // have to deopt if we care about -0, because we can't return that. - if (hmod->CheckFlag(HValue::kCanOverflow)) { - Label no_overflow_possible; - __ cmp(left_reg, kMinInt); - __ j(not_equal, &no_overflow_possible, Label::kNear); - __ cmp(right_reg, -1); - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero); - } else { - __ j(not_equal, &no_overflow_possible, Label::kNear); - __ Move(result_reg, Immediate(0)); - __ jmp(&done, Label::kNear); - } - __ bind(&no_overflow_possible); - } - - // Sign extend dividend in eax into edx:eax. - __ cdq(); - - // If we care about -0, test if the dividend is <0 and the result is 0. - if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label positive_left; - __ test(left_reg, Operand(left_reg)); - __ j(not_sign, &positive_left, Label::kNear); - __ idiv(right_reg); - __ test(result_reg, Operand(result_reg)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - __ jmp(&done, Label::kNear); - __ bind(&positive_left); - } - __ idiv(right_reg); - __ bind(&done); -} - - -void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - Register result = ToRegister(instr->result()); - DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); - DCHECK(!result.is(dividend)); - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ test(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - __ cmp(dividend, kMinInt); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - } - // Deoptimize if remainder will not be 0. - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1) { - int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); - __ test(dividend, Immediate(mask)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision); - } - __ Move(result, dividend); - int32_t shift = WhichPowerOf2Abs(divisor); - if (shift > 0) { - // The arithmetic shift is always OK, the 'if' is an optimization only. - if (shift > 1) __ sar(result, 31); - __ shr(result, 32 - shift); - __ add(result, dividend); - __ sar(result, shift); - } - if (divisor < 0) __ neg(result); -} - - -void LCodeGen::DoDivByConstI(LDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(edx)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ test(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ neg(edx); - - if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - __ mov(eax, edx); - __ imul(eax, eax, divisor); - __ sub(eax, dividend); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision); - } -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. -void LCodeGen::DoDivI(LDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - Register remainder = ToRegister(instr->temp()); - DCHECK(dividend.is(eax)); - DCHECK(remainder.is(edx)); - DCHECK(ToRegister(instr->result()).is(eax)); - DCHECK(!divisor.is(eax)); - DCHECK(!divisor.is(edx)); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ test(divisor, divisor); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ test(dividend, dividend); - __ j(not_zero, ÷nd_not_zero, Label::kNear); - __ test(divisor, divisor); - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - Label dividend_not_min_int; - __ cmp(dividend, kMinInt); - __ j(not_zero, ÷nd_not_min_int, Label::kNear); - __ cmp(divisor, -1); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - __ bind(÷nd_not_min_int); - } - - // Sign extend to edx (= remainder). - __ cdq(); - __ idiv(divisor); - - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - // Deoptimize if remainder is not 0. - __ test(remainder, remainder); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision); - } -} - - -void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(dividend.is(ToRegister(instr->result()))); - - // If the divisor is positive, things are easy: There can be no deopts and we - // can simply do an arithmetic right shift. - if (divisor == 1) return; - int32_t shift = WhichPowerOf2Abs(divisor); - if (divisor > 1) { - __ sar(dividend, shift); - return; - } - - // If the divisor is negative, we have to negate and handle edge cases. - __ neg(dividend); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - // Dividing by -1 is basically negation, unless we overflow. - if (divisor == -1) { - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - return; - } - - // If the negation could not overflow, simply shifting is OK. - if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - __ sar(dividend, shift); - return; - } - - Label not_kmin_int, done; - __ j(no_overflow, ¬_kmin_int, Label::kNear); - __ mov(dividend, Immediate(kMinInt / divisor)); - __ jmp(&done, Label::kNear); - __ bind(¬_kmin_int); - __ sar(dividend, shift); - __ bind(&done); -} - - -void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { - Register dividend = ToRegister(instr->dividend()); - int32_t divisor = instr->divisor(); - DCHECK(ToRegister(instr->result()).is(edx)); - - if (divisor == 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); - return; - } - - // Check for (0 / -x) that will produce negative zero. - HMathFloorOfDiv* hdiv = instr->hydrogen(); - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ test(dividend, dividend); - DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); - } - - // Easy case: We need no dynamic check for the dividend and the flooring - // division is the same as the truncating division. - if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ neg(edx); - return; - } - - // In the general case we may need to adjust before and after the truncating - // division to get a flooring division. - Register temp = ToRegister(instr->temp3()); - DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx)); - Label needs_adjustment, done; - __ cmp(dividend, Immediate(0)); - __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); - __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ neg(edx); - __ jmp(&done, Label::kNear); - __ bind(&needs_adjustment); - __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1)); - __ TruncatingDiv(temp, Abs(divisor)); - if (divisor < 0) __ neg(edx); - __ dec(edx); - __ bind(&done); -} - - -// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. -void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { - HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->dividend()); - Register divisor = ToRegister(instr->divisor()); - Register remainder = ToRegister(instr->temp()); - Register result = ToRegister(instr->result()); - DCHECK(dividend.is(eax)); - DCHECK(remainder.is(edx)); - DCHECK(result.is(eax)); - DCHECK(!divisor.is(eax)); - DCHECK(!divisor.is(edx)); - - // Check for x / 0. - if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ test(divisor, divisor); - DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); - } - - // Check for (0 / -x) that will produce negative zero. - if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label dividend_not_zero; - __ test(dividend, dividend); - __ j(not_zero, ÷nd_not_zero, Label::kNear); - __ test(divisor, divisor); - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - __ bind(÷nd_not_zero); - } - - // Check for (kMinInt / -1). - if (hdiv->CheckFlag(HValue::kCanOverflow)) { - Label dividend_not_min_int; - __ cmp(dividend, kMinInt); - __ j(not_zero, ÷nd_not_min_int, Label::kNear); - __ cmp(divisor, -1); - DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); - __ bind(÷nd_not_min_int); - } - - // Sign extend to edx (= remainder). - __ cdq(); - __ idiv(divisor); - - Label done; - __ test(remainder, remainder); - __ j(zero, &done, Label::kNear); - __ xor_(remainder, divisor); - __ sar(remainder, 31); - __ add(result, remainder); - __ bind(&done); -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register left = ToRegister(instr->left()); - LOperand* right = instr->right(); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ mov(ToRegister(instr->temp()), left); - } - - if (right->IsConstantOperand()) { - // Try strength reductions on the multiplication. - // All replacement instructions are at most as long as the imul - // and have better latency. - int constant = ToInteger32(LConstantOperand::cast(right)); - if (constant == -1) { - __ neg(left); - } else if (constant == 0) { - __ xor_(left, Operand(left)); - } else if (constant == 2) { - __ add(left, Operand(left)); - } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - // If we know that the multiplication can't overflow, it's safe to - // use instructions that don't set the overflow flag for the - // multiplication. - switch (constant) { - case 1: - // Do nothing. - break; - case 3: - __ lea(left, Operand(left, left, times_2, 0)); - break; - case 4: - __ shl(left, 2); - break; - case 5: - __ lea(left, Operand(left, left, times_4, 0)); - break; - case 8: - __ shl(left, 3); - break; - case 9: - __ lea(left, Operand(left, left, times_8, 0)); - break; - case 16: - __ shl(left, 4); - break; - default: - __ imul(left, left, constant); - break; - } - } else { - __ imul(left, left, constant); - } - } else { - if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(left); - } - __ imul(left, ToOperand(right)); - } - - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Bail out if the result is supposed to be negative zero. - Label done; - __ test(left, Operand(left)); - __ j(not_zero, &done); - if (right->IsConstantOperand()) { - if (ToInteger32(LConstantOperand::cast(right)) < 0) { - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { - __ cmp(ToRegister(instr->temp()), Immediate(0)); - DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero); - } - } else { - // Test the non-zero operand for negative sign. - __ or_(ToRegister(instr->temp()), ToOperand(right)); - DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); - } - __ bind(&done); - } -} - - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - DCHECK(left->IsRegister()); - - if (right->IsConstantOperand()) { - int32_t right_operand = - ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->representation()); - switch (instr->op()) { - case Token::BIT_AND: - __ and_(ToRegister(left), right_operand); - break; - case Token::BIT_OR: - __ or_(ToRegister(left), right_operand); - break; - case Token::BIT_XOR: - if (right_operand == int32_t(~0)) { - __ not_(ToRegister(left)); - } else { - __ xor_(ToRegister(left), right_operand); - } - break; - default: - UNREACHABLE(); - break; - } - } else { - switch (instr->op()) { - case Token::BIT_AND: - __ and_(ToRegister(left), ToOperand(right)); - break; - case Token::BIT_OR: - __ or_(ToRegister(left), ToOperand(right)); - break; - case Token::BIT_XOR: - __ xor_(ToRegister(left), ToOperand(right)); - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - DCHECK(left->IsRegister()); - if (right->IsRegister()) { - DCHECK(ToRegister(right).is(ecx)); - - switch (instr->op()) { - case Token::ROR: - __ ror_cl(ToRegister(left)); - break; - case Token::SAR: - __ sar_cl(ToRegister(left)); - break; - case Token::SHR: - __ shr_cl(ToRegister(left)); - if (instr->can_deopt()) { - __ test(ToRegister(left), ToRegister(left)); - DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); - } - break; - case Token::SHL: - __ shl_cl(ToRegister(left)); - break; - default: - UNREACHABLE(); - break; - } - } else { - int value = ToInteger32(LConstantOperand::cast(right)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::ROR: - if (shift_count == 0 && instr->can_deopt()) { - __ test(ToRegister(left), ToRegister(left)); - DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); - } else { - __ ror(ToRegister(left), shift_count); - } - break; - case Token::SAR: - if (shift_count != 0) { - __ sar(ToRegister(left), shift_count); - } - break; - case Token::SHR: - if (shift_count != 0) { - __ shr(ToRegister(left), shift_count); - } else if (instr->can_deopt()) { - __ test(ToRegister(left), ToRegister(left)); - DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); - } - break; - case Token::SHL: - if (shift_count != 0) { - if (instr->hydrogen_value()->representation().IsSmi() && - instr->can_deopt()) { - if (shift_count != 1) { - __ shl(ToRegister(left), shift_count - 1); - } - __ SmiTag(ToRegister(left)); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } else { - __ shl(ToRegister(left), shift_count); - } - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - - if (right->IsConstantOperand()) { - __ sub(ToOperand(left), - ToImmediate(right, instr->hydrogen()->representation())); - } else { - __ sub(ToRegister(left), ToOperand(right)); - } - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - __ Move(ToRegister(instr->result()), Immediate(instr->value())); -} - - -void LCodeGen::DoConstantS(LConstantS* instr) { - __ Move(ToRegister(instr->result()), Immediate(instr->value())); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - uint64_t const bits = instr->bits(); - uint32_t const lower = static_cast(bits); - uint32_t const upper = static_cast(bits >> 32); - DCHECK(instr->result()->IsDoubleRegister()); - - __ push(Immediate(upper)); - __ push(Immediate(lower)); - X87Register reg = ToX87Register(instr->result()); - X87Mov(reg, Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); -} - - -void LCodeGen::DoConstantE(LConstantE* instr) { - __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - Register reg = ToRegister(instr->result()); - Handle object = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ LoadObject(reg, object); -} - - -Operand LCodeGen::BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding) { - if (index->IsConstantOperand()) { - int offset = ToRepresentation(LConstantOperand::cast(index), - Representation::Integer32()); - if (encoding == String::TWO_BYTE_ENCODING) { - offset *= kUC16Size; - } - STATIC_ASSERT(kCharSize == 1); - return FieldOperand(string, SeqString::kHeaderSize + offset); - } - return FieldOperand( - string, ToRegister(index), - encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, - SeqString::kHeaderSize); -} - - -void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register result = ToRegister(instr->result()); - Register string = ToRegister(instr->string()); - - if (FLAG_debug_code) { - __ push(string); - __ mov(string, FieldOperand(string, HeapObject::kMapOffset)); - __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset)); - - __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(equal, kUnexpectedStringType); - __ pop(string); - } - - Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (encoding == String::ONE_BYTE_ENCODING) { - __ movzx_b(result, operand); - } else { - __ movzx_w(result, operand); - } -} - - -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - String::Encoding encoding = instr->hydrogen()->encoding(); - Register string = ToRegister(instr->string()); - - if (FLAG_debug_code) { - Register value = ToRegister(instr->value()); - Register index = ToRegister(instr->index()); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - int encoding_mask = - instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type; - __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); - } - - Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); - if (instr->value()->IsConstantOperand()) { - int value = ToRepresentation(LConstantOperand::cast(instr->value()), - Representation::Integer32()); - DCHECK_LE(0, value); - if (encoding == String::ONE_BYTE_ENCODING) { - DCHECK_LE(value, String::kMaxOneByteCharCode); - __ mov_b(operand, static_cast(value)); - } else { - DCHECK_LE(value, String::kMaxUtf16CodeUnit); - __ mov_w(operand, static_cast(value)); - } - } else { - Register value = ToRegister(instr->value()); - if (encoding == String::ONE_BYTE_ENCODING) { - __ mov_b(operand, value); - } else { - __ mov_w(operand, value); - } - } -} - - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - - if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { - if (right->IsConstantOperand()) { - int32_t offset = ToRepresentation(LConstantOperand::cast(right), - instr->hydrogen()->representation()); - __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); - } else { - Operand address(ToRegister(left), ToRegister(right), times_1, 0); - __ lea(ToRegister(instr->result()), address); - } - } else { - if (right->IsConstantOperand()) { - __ add(ToOperand(left), - ToImmediate(right, instr->hydrogen()->representation())); - } else { - __ add(ToRegister(left), ToOperand(right)); - } - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } - } -} - - -void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - DCHECK(left->Equals(instr->result())); - HMathMinMax::Operation operation = instr->hydrogen()->operation(); - if (instr->hydrogen()->representation().IsSmiOrInteger32()) { - Label return_left; - Condition condition = (operation == HMathMinMax::kMathMin) - ? less_equal - : greater_equal; - if (right->IsConstantOperand()) { - Operand left_op = ToOperand(left); - Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()), - instr->hydrogen()->representation()); - __ cmp(left_op, immediate); - __ j(condition, &return_left, Label::kNear); - __ mov(left_op, immediate); - } else { - Register left_reg = ToRegister(left); - Operand right_op = ToOperand(right); - __ cmp(left_reg, right_op); - __ j(condition, &return_left, Label::kNear); - __ mov(left_reg, right_op); - } - __ bind(&return_left); - } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - Label check_nan_left, check_zero, return_left, return_right; - Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; - X87Register left_reg = ToX87Register(left); - X87Register right_reg = ToX87Register(right); - - X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result())); - __ fld(1); - __ fld(1); - __ FCmp(); - __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. - __ j(equal, &check_zero, Label::kNear); // left == right. - __ j(condition, &return_left, Label::kNear); - __ jmp(&return_right, Label::kNear); - - __ bind(&check_zero); - __ fld(0); - __ fldz(); - __ FCmp(); - __ j(not_equal, &return_left, Label::kNear); // left == right != 0. - // At this point, both left and right are either 0 or -0. - if (operation == HMathMinMax::kMathMin) { - // Push st0 and st1 to stack, then pop them to temp registers and OR them, - // load it to left. - Register scratch_reg = ToRegister(instr->temp()); - __ fld(1); - __ fld(1); - __ sub(esp, Immediate(2 * kPointerSize)); - __ fstp_s(MemOperand(esp, 0)); - __ fstp_s(MemOperand(esp, kPointerSize)); - __ pop(scratch_reg); - __ or_(MemOperand(esp, 0), scratch_reg); - X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand); - __ pop(scratch_reg); // restore esp - } else { - // Since we operate on +0 and/or -0, addsd and andsd have the same effect. - // Should put the result in stX0 - __ fadd_i(1); - } - __ jmp(&return_left, Label::kNear); - - __ bind(&check_nan_left); - __ fld(0); - __ fld(0); - __ FCmp(); // NaN check. - __ j(parity_even, &return_left, Label::kNear); // left == NaN. - - __ bind(&return_right); - X87Mov(left_reg, right_reg); - - __ bind(&return_left); - } -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - X87Register left = ToX87Register(instr->left()); - X87Register right = ToX87Register(instr->right()); - X87Register result = ToX87Register(instr->result()); - if (instr->op() != Token::MOD) { - X87PrepareBinaryOp(left, right, result); - } - // Set the precision control to double-precision. - __ X87SetFPUCW(0x027F); - switch (instr->op()) { - case Token::ADD: - __ fadd_i(1); - break; - case Token::SUB: - __ fsub_i(1); - break; - case Token::MUL: - __ fmul_i(1); - break; - case Token::DIV: - __ fdiv_i(1); - break; - case Token::MOD: { - // Pass two doubles as arguments on the stack. - __ PrepareCallCFunction(4, eax); - X87Mov(Operand(esp, 1 * kDoubleSize), right); - X87Mov(Operand(esp, 0), left); - X87Free(right); - DCHECK(left.is(result)); - X87PrepareToWrite(result); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), - 4); - - // Return value is in st(0) on ia32. - X87CommitWrite(result); - break; - } - default: - UNREACHABLE(); - break; - } - - // Restore the default value of control word. - __ X87SetFPUCW(0x037F); -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->left()).is(edx)); - DCHECK(ToRegister(instr->right()).is(eax)); - DCHECK(ToRegister(instr->result()).is(eax)); - - UNREACHABLE(); -} - - -template -void LCodeGen::EmitBranch(InstrType instr, Condition cc) { - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - - int next_block = GetNextEmittedBlock(); - - if (right_block == left_block || cc == no_condition) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); - } else if (right_block == next_block) { - __ j(cc, chunk_->GetAssemblyLabel(left_block)); - } else { - __ j(cc, chunk_->GetAssemblyLabel(left_block)); - __ jmp(chunk_->GetAssemblyLabel(right_block)); - } -} - - -template -void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) { - int true_block = instr->TrueDestination(chunk_); - if (cc == no_condition) { - __ jmp(chunk_->GetAssemblyLabel(true_block)); - } else { - __ j(cc, chunk_->GetAssemblyLabel(true_block)); - } -} - - -template -void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { - int false_block = instr->FalseDestination(chunk_); - if (cc == no_condition) { - __ jmp(chunk_->GetAssemblyLabel(false_block)); - } else { - __ j(cc, chunk_->GetAssemblyLabel(false_block)); - } -} - - -void LCodeGen::DoBranch(LBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsSmiOrInteger32()) { - Register reg = ToRegister(instr->value()); - __ test(reg, Operand(reg)); - EmitBranch(instr, not_zero); - } else if (r.IsDouble()) { - X87Register reg = ToX87Register(instr->value()); - X87LoadForUsage(reg); - __ fldz(); - __ FCmp(); - EmitBranch(instr, not_zero); - } else { - DCHECK(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsBoolean()) { - DCHECK(!info()->IsStub()); - __ cmp(reg, factory()->true_value()); - EmitBranch(instr, equal); - } else if (type.IsSmi()) { - DCHECK(!info()->IsStub()); - __ test(reg, Operand(reg)); - EmitBranch(instr, not_equal); - } else if (type.IsJSArray()) { - DCHECK(!info()->IsStub()); - EmitBranch(instr, no_condition); - } else if (type.IsHeapNumber()) { - UNREACHABLE(); - } else if (type.IsString()) { - DCHECK(!info()->IsStub()); - __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); - EmitBranch(instr, not_equal); - } else { - ToBooleanHints expected = instr->hydrogen()->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - if (expected & ToBooleanHint::kUndefined) { - // undefined -> false. - __ cmp(reg, factory()->undefined_value()); - __ j(equal, instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kBoolean) { - // true -> true. - __ cmp(reg, factory()->true_value()); - __ j(equal, instr->TrueLabel(chunk_)); - // false -> false. - __ cmp(reg, factory()->false_value()); - __ j(equal, instr->FalseLabel(chunk_)); - } - if (expected & ToBooleanHint::kNull) { - // 'null' -> false. - __ cmp(reg, factory()->null_value()); - __ j(equal, instr->FalseLabel(chunk_)); - } - - if (expected & ToBooleanHint::kSmallInteger) { - // Smis: 0 -> false, all other -> true. - __ test(reg, Operand(reg)); - __ j(equal, instr->FalseLabel(chunk_)); - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - } else if (expected & ToBooleanHint::kNeedsMap) { - // If we need a map later and have a Smi -> deopt. - __ test(reg, Immediate(kSmiTagMask)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi); - } - - Register map = no_reg; // Keep the compiler happy. - if (expected & ToBooleanHint::kNeedsMap) { - map = ToRegister(instr->temp()); - DCHECK(!map.is(reg)); - __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); - - if (expected & ToBooleanHint::kCanBeUndetectable) { - // Undetectable -> false. - __ test_b(FieldOperand(map, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, instr->FalseLabel(chunk_)); - } - } - - if (expected & ToBooleanHint::kReceiver) { - // spec object -> true. - __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE); - __ j(above_equal, instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kString) { - // String value -> false iff empty. - Label not_string; - __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); - __ j(above_equal, ¬_string, Label::kNear); - __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); - __ j(not_zero, instr->TrueLabel(chunk_)); - __ jmp(instr->FalseLabel(chunk_)); - __ bind(¬_string); - } - - if (expected & ToBooleanHint::kSymbol) { - // Symbol value -> true. - __ CmpInstanceType(map, SYMBOL_TYPE); - __ j(equal, instr->TrueLabel(chunk_)); - } - - if (expected & ToBooleanHint::kHeapNumber) { - // heap number -> false iff +0, -0, or NaN. - Label not_heap_number; - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - __ j(not_equal, ¬_heap_number, Label::kNear); - __ fldz(); - __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); - __ FCmp(); - __ j(zero, instr->FalseLabel(chunk_)); - __ jmp(instr->TrueLabel(chunk_)); - __ bind(¬_heap_number); - } - - if (expected != ToBooleanHint::kAny) { - // We've seen something for the first time -> deopt. - // This can only happen if we are not generic already. - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject); - } - } - } -} - - -void LCodeGen::EmitGoto(int block) { - if (!IsNextEmittedBlock(block)) { - __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); - } -} - - -void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { -} - - -void LCodeGen::DoGoto(LGoto* instr) { - EmitGoto(instr->block_id()); -} - - -Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { - Condition cond = no_condition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = equal; - break; - case Token::NE: - case Token::NE_STRICT: - cond = not_equal; - break; - case Token::LT: - cond = is_unsigned ? below : less; - break; - case Token::GT: - cond = is_unsigned ? above : greater; - break; - case Token::LTE: - cond = is_unsigned ? below_equal : less_equal; - break; - case Token::GTE: - cond = is_unsigned ? above_equal : greater_equal; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - bool is_unsigned = - instr->is_double() || - instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || - instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); - Condition cc = TokenToCondition(instr->op(), is_unsigned); - - if (left->IsConstantOperand() && right->IsConstantOperand()) { - // We can statically evaluate the comparison. - double left_val = ToDouble(LConstantOperand::cast(left)); - double right_val = ToDouble(LConstantOperand::cast(right)); - int next_block = Token::EvalComparison(instr->op(), left_val, right_val) - ? instr->TrueDestination(chunk_) - : instr->FalseDestination(chunk_); - EmitGoto(next_block); - } else { - if (instr->is_double()) { - X87LoadForUsage(ToX87Register(right), ToX87Register(left)); - __ FCmp(); - // Don't base result on EFLAGS when a NaN is involved. Instead - // jump to the false block. - __ j(parity_even, instr->FalseLabel(chunk_)); - } else { - if (right->IsConstantOperand()) { - __ cmp(ToOperand(left), - ToImmediate(right, instr->hydrogen()->representation())); - } else if (left->IsConstantOperand()) { - __ cmp(ToOperand(right), - ToImmediate(left, instr->hydrogen()->representation())); - // We commuted the operands, so commute the condition. - cc = CommuteCondition(cc); - } else { - __ cmp(ToRegister(left), ToOperand(right)); - } - } - EmitBranch(instr, cc); - } -} - - -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - - if (instr->right()->IsConstantOperand()) { - Handle right = ToHandle(LConstantOperand::cast(instr->right())); - __ CmpObject(left, right); - } else { - Operand right = ToOperand(instr->right()); - __ cmp(left, right); - } - EmitBranch(instr, equal); -} - - -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ cmp(input_reg, factory()->the_hole_value()); - EmitBranch(instr, equal); - return; - } - - // Put the value to the top of stack - X87Register src = ToX87Register(instr->object()); - X87LoadForUsage(src); - __ fld(0); - __ fld(0); - __ FCmp(); - Label ok; - __ j(parity_even, &ok, Label::kNear); - __ fstp(0); - EmitFalseBranch(instr, no_condition); - __ bind(&ok); - - - __ sub(esp, Immediate(kDoubleSize)); - __ fstp_d(MemOperand(esp, 0)); - - __ add(esp, Immediate(kDoubleSize)); - int offset = sizeof(kHoleNanUpper32); - __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); - EmitBranch(instr, equal); -} - - -Condition LCodeGen::EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed = INLINE_SMI_CHECK) { - if (check_needed == INLINE_SMI_CHECK) { - __ JumpIfSmi(input, is_not_string); - } - - Condition cond = masm_->IsObjectStringType(input, temp1, temp1); - - return cond; -} - - -void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - - Condition true_cond = EmitIsString( - reg, temp, instr->FalseLabel(chunk_), check_needed); - - EmitBranch(instr, true_cond); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - Operand input = ToOperand(instr->value()); - - __ test(input, Immediate(kSmiTagMask)); - EmitBranch(instr, zero); -} - - -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); - __ test_b(FieldOperand(temp, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - EmitBranch(instr, not_zero); -} - - -static Condition ComputeCompareCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return equal; - case Token::LT: - return less; - case Token::GT: - return greater; - case Token::LTE: - return less_equal; - case Token::GTE: - return greater_equal; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->left()).is(edx)); - DCHECK(ToRegister(instr->right()).is(eax)); - - Handle code = CodeFactory::StringCompare(isolate(), instr->op()).code(); - CallCode(code, RelocInfo::CODE_TARGET, instr); - __ CompareRoot(eax, Heap::kTrueValueRootIndex); - EmitBranch(instr, equal); -} - - -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == FIRST_TYPE) return to; - DCHECK(from == to || to == LAST_TYPE); - return from; -} - - -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { - InstanceType from = instr->from(); - InstanceType to = instr->to(); - if (from == to) return equal; - if (to == LAST_TYPE) return above_equal; - if (from == FIRST_TYPE) return below_equal; - UNREACHABLE(); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ JumpIfSmi(input, instr->FalseLabel(chunk_)); - } - - __ CmpObjectType(input, TestType(instr->hydrogen()), temp); - EmitBranch(instr, BranchCondition(instr->hydrogen())); -} - -// Branches to a label or falls through with the answer in the z flag. Trashes -// the temp registers, but not the input. -void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, - Handle class_name, Register input, - Register temp, Register temp2) { - DCHECK(!input.is(temp)); - DCHECK(!input.is(temp2)); - DCHECK(!temp.is(temp2)); - __ JumpIfSmi(input, is_false); - - __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp); - STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); - if (String::Equals(isolate()->factory()->Function_string(), class_name)) { - __ j(above_equal, is_true); - } else { - __ j(above_equal, is_false); - } - - // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. - // Check if the constructor in the map is a function. - __ GetMapConstructor(temp, temp, temp2); - // Objects with a non-function constructor have class 'Object'. - __ CmpInstanceType(temp2, JS_FUNCTION_TYPE); - if (String::Equals(class_name, isolate()->factory()->Object_string())) { - __ j(not_equal, is_true); - } else { - __ j(not_equal, is_false); - } - - // temp now contains the constructor function. Grab the - // instance class name from there. - __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); - __ mov(temp, - FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is internalized since it's a literal. - // The name in the constructor is internalized because of the way the context - // is booted. This routine isn't expected to work for random API-created - // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are internalized it is sufficient to use an - // identity comparison. - __ cmp(temp, class_name); - // End with the answer in the z flag. -} - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); - - Handle class_name = instr->hydrogen()->class_name(); - - EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), - class_name, input, temp, temp2); - - EmitBranch(instr, equal); -} - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->value()); - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); - EmitBranch(instr, equal); -} - - -void LCodeGen::DoHasInPrototypeChainAndBranch( - LHasInPrototypeChainAndBranch* instr) { - Register const object = ToRegister(instr->object()); - Register const object_map = ToRegister(instr->scratch()); - Register const object_prototype = object_map; - Register const prototype = ToRegister(instr->prototype()); - - // The {object} must be a spec object. It's sufficient to know that {object} - // is not a smi, since all other non-spec objects have {null} prototypes and - // will be ruled out below. - if (instr->hydrogen()->ObjectNeedsSmiCheck()) { - __ test(object, Immediate(kSmiTagMask)); - EmitFalseBranch(instr, zero); - } - - // Loop through the {object}s prototype chain looking for the {prototype}. - __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset)); - Label loop; - __ bind(&loop); - - // Deoptimize if the object needs to be access checked. - __ test_b(FieldOperand(object_map, Map::kBitFieldOffset), - Immediate(1 << Map::kIsAccessCheckNeeded)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck); - // Deoptimize for proxies. - __ CmpInstanceType(object_map, JS_PROXY_TYPE); - DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy); - - __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset)); - __ cmp(object_prototype, factory()->null_value()); - EmitFalseBranch(instr, equal); - __ cmp(object_prototype, prototype); - EmitTrueBranch(instr, equal); - __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset)); - __ jmp(&loop); -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - Token::Value op = instr->op(); - - Handle ic = CodeFactory::CompareIC(isolate(), op).code(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - - Condition condition = ComputeCompareCondition(op); - Label true_value, done; - __ test(eax, Operand(eax)); - __ j(condition, &true_value, Label::kNear); - __ mov(ToRegister(instr->result()), factory()->false_value()); - __ jmp(&done, Label::kNear); - __ bind(&true_value); - __ mov(ToRegister(instr->result()), factory()->true_value()); - __ bind(&done); -} - -void LCodeGen::EmitReturn(LReturn* instr) { - int extra_value_count = 1; - - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); - } else { - DCHECK(info()->IsStub()); // Functions would need to drop one more value. - Register reg = ToRegister(instr->parameter_count()); - // The argument count parameter is a smi - __ SmiUntag(reg); - Register return_addr_reg = reg.is(ecx) ? ebx : ecx; - - // emit code to restore stack based on instr->parameter_count() - __ pop(return_addr_reg); // save return address - __ shl(reg, kPointerSizeLog2); - __ add(esp, reg); - __ jmp(return_addr_reg); - } -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace && info()->IsOptimizing()) { - // Preserve the return value on the stack and rely on the runtime call - // to return the value in the same register. We're leaving the code - // managed by the register allocator and tearing down the frame, it's - // safe to write to the context register. - __ push(eax); - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kTraceExit); - } - if (NeedsEagerFrame()) { - __ mov(esp, ebp); - __ pop(ebp); - } - - EmitReturn(instr); -} - - -void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register result = ToRegister(instr->result()); - __ mov(result, ContextOperand(context, instr->slot_index())); - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ cmp(result, factory()->the_hole_value()); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } else { - Label is_not_hole; - __ j(not_equal, &is_not_hole, Label::kNear); - __ mov(result, factory()->undefined_value()); - __ bind(&is_not_hole); - } - } -} - - -void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { - Register context = ToRegister(instr->context()); - Register value = ToRegister(instr->value()); - - Label skip_assignment; - - Operand target = ContextOperand(context, instr->slot_index()); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ cmp(target, factory()->the_hole_value()); - if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } else { - __ j(not_equal, &skip_assignment, Label::kNear); - } - } - - __ mov(target, value); - if (instr->hydrogen()->NeedsWriteBarrier()) { - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - Register temp = ToRegister(instr->temp()); - int offset = Context::SlotOffset(instr->slot_index()); - __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs, - EMIT_REMEMBERED_SET, check_needed); - } - - __ bind(&skip_assignment); -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - Register result = ToRegister(instr->result()); - MemOperand operand = instr->object()->IsConstantOperand() - ? MemOperand::StaticVariable(ToExternalReference( - LConstantOperand::cast(instr->object()))) - : MemOperand(ToRegister(instr->object()), offset); - __ Load(result, operand, access.representation()); - return; - } - - Register object = ToRegister(instr->object()); - if (instr->hydrogen()->representation().IsDouble()) { - X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); - return; - } - - Register result = ToRegister(instr->result()); - if (!access.IsInobject()) { - __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); - object = result; - } - __ Load(result, FieldOperand(object, offset), access.representation()); -} - - -void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { - DCHECK(!operand->IsDoubleRegister()); - if (operand->IsConstantOperand()) { - Handle object = ToHandle(LConstantOperand::cast(operand)); - AllowDeferredHandleDereference smi_check; - if (object->IsSmi()) { - __ Push(Handle::cast(object)); - } else { - __ PushHeapObject(Handle::cast(object)); - } - } else if (operand->IsRegister()) { - __ push(ToRegister(operand)); - } else { - __ push(ToOperand(operand)); - } -} - - -void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { - Register function = ToRegister(instr->function()); - Register temp = ToRegister(instr->temp()); - Register result = ToRegister(instr->result()); - - // Get the prototype or initial map from the function. - __ mov(result, - FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // Check that the function has a prototype or an initial map. - __ cmp(Operand(result), Immediate(factory()->the_hole_value())); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - - // If the function does not have an initial map, we're done. - Label done; - __ CmpObjectType(result, MAP_TYPE, temp); - __ j(not_equal, &done, Label::kNear); - - // Get the prototype from the initial map. - __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); - - // All done. - __ bind(&done); -} - - -void LCodeGen::DoLoadRoot(LLoadRoot* instr) { - Register result = ToRegister(instr->result()); - __ LoadRoot(result, instr->index()); -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Register arguments = ToRegister(instr->arguments()); - Register result = ToRegister(instr->result()); - if (instr->length()->IsConstantOperand() && - instr->index()->IsConstantOperand()) { - int const_index = ToInteger32(LConstantOperand::cast(instr->index())); - int const_length = ToInteger32(LConstantOperand::cast(instr->length())); - int index = (const_length - const_index) + 1; - __ mov(result, Operand(arguments, index * kPointerSize)); - } else { - Register length = ToRegister(instr->length()); - Operand index = ToOperand(instr->index()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - __ sub(length, index); - __ mov(result, Operand(arguments, length, times_4, kPointerSize)); - } -} - - -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = instr->key(); - if (!key->IsConstantOperand() && - ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), - elements_kind)) { - __ SmiUntag(ToRegister(key)); - } - Operand operand(BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - elements_kind, - instr->base_offset())); - if (elements_kind == FLOAT32_ELEMENTS) { - X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); - } else if (elements_kind == FLOAT64_ELEMENTS) { - X87Mov(ToX87Register(instr->result()), operand); - } else { - Register result(ToRegister(instr->result())); - switch (elements_kind) { - case INT8_ELEMENTS: - __ movsx_b(result, operand); - break; - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ movzx_b(result, operand); - break; - case INT16_ELEMENTS: - __ movsx_w(result, operand); - break; - case UINT16_ELEMENTS: - __ movzx_w(result, operand); - break; - case INT32_ELEMENTS: - __ mov(result, operand); - break; - case UINT32_ELEMENTS: - __ mov(result, operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ test(result, Operand(result)); - DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue); - } - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { - if (instr->hydrogen()->RequiresHoleCheck()) { - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset() + sizeof(kHoleNanLower32)); - __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } - - Operand double_load_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset()); - X87Mov(ToX87Register(instr->result()), double_load_operand); -} - - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - Register result = ToRegister(instr->result()); - - // Load the result. - __ mov(result, - BuildFastArrayOperand(instr->elements(), instr->key(), - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, instr->base_offset())); - - // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ test(result, Immediate(kSmiTagMask)); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi); - } else { - __ cmp(result, factory()->the_hole_value()); - DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); - } - } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { - DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); - Label done; - __ cmp(result, factory()->the_hole_value()); - __ j(not_equal, &done); - if (info()->IsStub()) { - // A stub can safely convert the hole to undefined only if the array - // protector cell contains (Smi) Isolate::kProtectorValid. - // Otherwise it needs to bail out. - __ LoadRoot(result, Heap::kArrayProtectorRootIndex); - __ cmp(FieldOperand(result, PropertyCell::kValueOffset), - Immediate(Smi::FromInt(Isolate::kProtectorValid))); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole); - } - __ mov(result, isolate()->factory()->undefined_value()); - __ bind(&done); - } -} - - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_fixed_typed_array()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } -} - - -Operand LCodeGen::BuildFastArrayOperand( - LOperand* elements_pointer, - LOperand* key, - Representation key_representation, - ElementsKind elements_kind, - uint32_t base_offset) { - Register elements_pointer_reg = ToRegister(elements_pointer); - int element_shift_size = ElementsKindToShiftSize(elements_kind); - int shift_size = element_shift_size; - if (key->IsConstantOperand()) { - int constant_value = ToInteger32(LConstantOperand::cast(key)); - if (constant_value & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); - } - return Operand(elements_pointer_reg, - ((constant_value) << shift_size) - + base_offset); - } else { - // Take the tag bit into account while computing the shift size. - if (key_representation.IsSmi() && (shift_size >= 1)) { - shift_size -= kSmiTagSize; - } - ScaleFactor scale_factor = static_cast(shift_size); - return Operand(elements_pointer_reg, - ToRegister(key), - scale_factor, - base_offset); - } -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->from_inlined()) { - __ lea(result, Operand(esp, -2 * kPointerSize)); - } else if (instr->hydrogen()->arguments_adaptor()) { - // Check for arguments adapter frame. - Label done, adapted; - __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(result, - Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ cmp(Operand(result), - Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(equal, &adapted, Label::kNear); - - // No arguments adaptor frame. - __ mov(result, Operand(ebp)); - __ jmp(&done, Label::kNear); - - // Arguments adaptor frame present. - __ bind(&adapted); - __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ bind(&done); - } else { - __ mov(result, Operand(ebp)); - } -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Operand elem = ToOperand(instr->elements()); - Register result = ToRegister(instr->result()); - - Label done; - - // If no arguments adaptor frame the number of arguments is fixed. - __ cmp(ebp, elem); - __ mov(result, Immediate(scope()->num_parameters())); - __ j(equal, &done, Label::kNear); - - // Arguments adaptor frame present. Get argument length from there. - __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(result, Operand(result, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(result); - - // Argument length is in result register. - __ bind(&done); -} - - -void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. - Label receiver_ok, global_object; - Label::Distance dist; - - // For x87 debug version jitted code's size exceeds 128 bytes whether - // FLAG_deopt_every_n_times - // is set or not. Always use Label:kFar for label distance for debug mode. - if (FLAG_debug_code) - dist = Label::kFar; - else - dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; - - Register scratch = ToRegister(instr->temp()); - - if (!instr->hydrogen()->known_function()) { - // Do not transform the receiver to object for strict mode functions or - // builtins. - __ mov(scratch, - FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ test(FieldOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset), - Immediate(SharedFunctionInfo::IsStrictBit::kMask | - SharedFunctionInfo::IsNativeBit::kMask)); - __ j(not_equal, &receiver_ok, dist); - } - - // Normal function. Replace undefined or null with global receiver. - __ cmp(receiver, factory()->null_value()); - __ j(equal, &global_object, dist); - __ cmp(receiver, factory()->undefined_value()); - __ j(equal, &global_object, dist); - - // The receiver should be a JS object. - __ test(receiver, Immediate(kSmiTagMask)); - DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi); - __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch); - DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject); - - __ jmp(&receiver_ok, dist); - __ bind(&global_object); - __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); - __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX)); - __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX)); - __ bind(&receiver_ok); -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Register receiver = ToRegister(instr->receiver()); - Register function = ToRegister(instr->function()); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); - DCHECK(receiver.is(eax)); // Used for parameter count. - DCHECK(function.is(edi)); // Required by InvokeFunction. - DCHECK(ToRegister(instr->result()).is(eax)); - - // Copy the arguments to this function possibly from the - // adaptor frame below it. - const uint32_t kArgumentsLimit = 1 * KB; - __ cmp(length, kArgumentsLimit); - DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments); - - __ push(receiver); - __ mov(receiver, length); - - // Loop through the arguments pushing them onto the execution - // stack. - Label invoke, loop; - // length is a small non-negative integer, due to the test above. - __ test(length, Operand(length)); - __ j(zero, &invoke, Label::kNear); - __ bind(&loop); - __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); - __ dec(length); - __ j(not_zero, &loop); - - // Invoke the function. - __ bind(&invoke); - - InvokeFlag flag = CALL_FUNCTION; - if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { - DCHECK(!info()->saves_caller_doubles()); - // TODO(ishell): drop current frame before pushing arguments to the stack. - flag = JUMP_FUNCTION; - ParameterCount actual(eax); - // It is safe to use ebx, ecx and edx as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) ebx (expected arguments count) and edx (new.target) will be - // initialized below. - PrepareForTailCall(actual, ebx, ecx, edx); - } - - DCHECK(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(eax); - __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); -} - - -void LCodeGen::DoDebugBreak(LDebugBreak* instr) { - __ int3(); -} - - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - EmitPushTaggedOperand(argument); -} - - -void LCodeGen::DoDrop(LDrop* instr) { - __ Drop(instr->count()); -} - - -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); -} - - -void LCodeGen::DoContext(LContext* instr) { - Register result = ToRegister(instr->result()); - if (info()->IsOptimizing()) { - __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); - } else { - // If there is no frame, the context must be in esi. - DCHECK(result.is(esi)); - } -} - - -void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - __ push(Immediate(instr->hydrogen()->declarations())); - __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); - __ push(Immediate(instr->hydrogen()->feedback_vector())); - CallRuntime(Runtime::kDeclareGlobals, instr); -} - -void LCodeGen::CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr) { - bool dont_adapt_arguments = - formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; - bool can_invoke_directly = - dont_adapt_arguments || formal_parameter_count == arity; - - Register function_reg = edi; - - if (can_invoke_directly) { - // Change context. - __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset)); - - // Always initialize new target and number of actual arguments. - __ mov(edx, factory()->undefined_value()); - __ mov(eax, arity); - - bool is_self_call = function.is_identical_to(info()->closure()); - - // Invoke function directly. - if (is_self_call) { - Handle self(reinterpret_cast(__ CodeObject().location())); - if (is_tail_call) { - __ Jump(self, RelocInfo::CODE_TARGET); - } else { - __ Call(self, RelocInfo::CODE_TARGET); - } - } else { - Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset); - if (is_tail_call) { - __ jmp(target); - } else { - __ call(target); - } - } - - if (!is_tail_call) { - // Set up deoptimization. - RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - } - } else { - // We need to adapt arguments. - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator( - this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(arity); - ParameterCount expected(formal_parameter_count); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(function_reg, expected, actual, flag, generator); - } -} - - -void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - DCHECK(ToRegister(instr->result()).is(eax)); - - if (instr->hydrogen()->IsTailCall()) { - if (NeedsEagerFrame()) __ leave(); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - __ jmp(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(target); - } - } else { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - - if (instr->target()->IsConstantOperand()) { - LConstantOperand* target = LConstantOperand::cast(instr->target()); - Handle code = Handle::cast(ToHandle(target)); - generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - __ call(code, RelocInfo::CODE_TARGET); - } else { - DCHECK(instr->target()->IsRegister()); - Register target = ToRegister(instr->target()); - generator.BeforeCall(__ CallSize(Operand(target))); - __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ call(target); - } - generator.AfterCall(); - } -} - - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - Register input_reg = ToRegister(instr->value()); - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - - Label slow, allocated, done; - uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit(); - available_regs &= ~input_reg.bit(); - if (instr->context()->IsRegister()) { - // Make sure that the context isn't overwritten in the AllocateHeapNumber - // macro below. - available_regs &= ~ToRegister(instr->context()).bit(); - } - - Register tmp = - Register::from_code(base::bits::CountTrailingZeros32(available_regs)); - available_regs &= ~tmp.bit(); - Register tmp2 = - Register::from_code(base::bits::CountTrailingZeros32(available_regs)); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - - __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - // Check the sign of the argument. If the argument is positive, just - // return it. We do not need to patch the stack since |input| and - // |result| are the same register and |input| will be restored - // unchanged by popping safepoint registers. - __ test(tmp, Immediate(HeapNumber::kSignMask)); - __ j(zero, &done, Label::kNear); - - __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); - __ jmp(&allocated, Label::kNear); - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, - instr, instr->context()); - // Set the pointer to the new heap number in tmp. - if (!tmp.is(eax)) __ mov(tmp, eax); - // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input_reg, input_reg); - - __ bind(&allocated); - __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - __ and_(tmp2, ~HeapNumber::kSignMask); - __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); - __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); - __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); - __ StoreToSafepointRegisterSlot(input_reg, tmp); - - __ bind(&done); -} - - -void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { - Register input_reg = ToRegister(instr->value()); - __ test(input_reg, Operand(input_reg)); - Label is_positive; - __ j(not_sign, &is_positive, Label::kNear); - __ neg(input_reg); // Sets flags. - DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow); - __ bind(&is_positive); -} - - -void LCodeGen::DoMathAbs(LMathAbs* instr) { - // Class for deferred case. - class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { - public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, - LMathAbs* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LMathAbs* instr_; - }; - - DCHECK(instr->value()->Equals(instr->result())); - Representation r = instr->hydrogen()->value()->representation(); - - if (r.IsDouble()) { - X87Register value = ToX87Register(instr->value()); - X87Fxch(value); - __ fabs(); - } else if (r.IsSmiOrInteger32()) { - EmitIntegerMathAbs(instr); - } else { // Tagged case. - DeferredMathAbsTaggedHeapNumber* deferred = - new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); - Register input_reg = ToRegister(instr->value()); - // Smi check. - __ JumpIfNotSmi(input_reg, deferred->entry()); - EmitIntegerMathAbs(instr); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoMathFloor(LMathFloor* instr) { - Register output_reg = ToRegister(instr->result()); - X87Register input_reg = ToX87Register(instr->value()); - X87Fxch(input_reg); - - Label not_minus_zero, done; - // Deoptimize on unordered. - __ fldz(); - __ fld(1); - __ FCmp(); - DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN); - __ j(below, ¬_minus_zero, Label::kNear); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Check for negative zero. - __ j(not_equal, ¬_minus_zero, Label::kNear); - // +- 0.0. - __ fld(0); - __ FXamSign(); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - __ Move(output_reg, Immediate(0)); - __ jmp(&done, Label::kFar); - } - - // Positive input. - // rc=01B, round down. - __ bind(¬_minus_zero); - __ fnclex(); - __ X87SetRC(0x0400); - __ sub(esp, Immediate(kPointerSize)); - __ fist_s(Operand(esp, 0)); - __ pop(output_reg); - __ X87SetRC(0x0000); - __ X87CheckIA(); - DeoptimizeIf(equal, instr, DeoptimizeReason::kOverflow); - __ fnclex(); - __ X87SetRC(0x0000); - __ bind(&done); -} - - -void LCodeGen::DoMathRound(LMathRound* instr) { - X87Register input_reg = ToX87Register(instr->value()); - Register result = ToRegister(instr->result()); - X87Fxch(input_reg); - Label below_one_half, below_minus_one_half, done; - - ExternalReference one_half = ExternalReference::address_of_one_half(); - ExternalReference minus_one_half = - ExternalReference::address_of_minus_one_half(); - - __ fld_d(Operand::StaticVariable(one_half)); - __ fld(1); - __ FCmp(); - __ j(carry, &below_one_half); - - // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x) - __ fld(0); - __ fadd_d(Operand::StaticVariable(one_half)); - // rc=11B, round toward zero. - __ X87SetRC(0x0c00); - __ sub(esp, Immediate(kPointerSize)); - // Clear exception bits. - __ fnclex(); - __ fistp_s(MemOperand(esp, 0)); - // Restore round mode. - __ X87SetRC(0x0000); - // Check overflow. - __ X87CheckIA(); - __ pop(result); - DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow); - __ fnclex(); - // Restore round mode. - __ X87SetRC(0x0000); - __ jmp(&done); - - __ bind(&below_one_half); - __ fld_d(Operand::StaticVariable(minus_one_half)); - __ fld(1); - __ FCmp(); - __ j(carry, &below_minus_one_half); - // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if - // we can ignore the difference between a result of -0 and +0. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // If the sign is positive, we return +0. - __ fld(0); - __ FXamSign(); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - } - __ Move(result, Immediate(0)); - __ jmp(&done); - - __ bind(&below_minus_one_half); - __ fld(0); - __ fadd_d(Operand::StaticVariable(one_half)); - // rc=01B, round down. - __ X87SetRC(0x0400); - __ sub(esp, Immediate(kPointerSize)); - // Clear exception bits. - __ fnclex(); - __ fistp_s(MemOperand(esp, 0)); - // Restore round mode. - __ X87SetRC(0x0000); - // Check overflow. - __ X87CheckIA(); - __ pop(result); - DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow); - __ fnclex(); - // Restore round mode. - __ X87SetRC(0x0000); - - __ bind(&done); -} - - -void LCodeGen::DoMathFround(LMathFround* instr) { - X87Register input_reg = ToX87Register(instr->value()); - X87Fxch(input_reg); - __ sub(esp, Immediate(kPointerSize)); - __ fstp_s(MemOperand(esp, 0)); - X87Fld(MemOperand(esp, 0), kX87FloatOperand); - __ add(esp, Immediate(kPointerSize)); -} - - -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - X87Register input_reg = ToX87Register(instr->value()); - __ X87SetFPUCW(0x027F); - X87Fxch(input_reg); - __ fsqrt(); - __ X87SetFPUCW(0x037F); -} - - -void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - X87Register input_reg = ToX87Register(instr->value()); - DCHECK(ToX87Register(instr->result()).is(input_reg)); - X87Fxch(input_reg); - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label done, sqrt; - // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1 - __ fxam(); - __ push(eax); - __ fnstsw_ax(); - __ and_(eax, Immediate(0x4700)); - __ cmp(eax, Immediate(0x0700)); - __ j(not_equal, &sqrt, Label::kNear); - // If input is -Infinity, return Infinity. - __ fchs(); - __ jmp(&done, Label::kNear); - - // Square root. - __ bind(&sqrt); - __ fldz(); - __ faddp(); // Convert -0 to +0. - __ fsqrt(); - __ bind(&done); - __ pop(eax); -} - - -void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - X87Register result = ToX87Register(instr->result()); - // Having marked this as a call, we can use any registers. - X87Register base = ToX87Register(instr->left()); - ExternalReference one_half = ExternalReference::address_of_one_half(); - - if (exponent_type.IsSmi()) { - Register exponent = ToRegister(instr->right()); - X87LoadForUsage(base); - __ SmiUntag(exponent); - __ push(exponent); - __ fild_s(MemOperand(esp, 0)); - __ pop(exponent); - } else if (exponent_type.IsTagged()) { - Register exponent = ToRegister(instr->right()); - Register temp = exponent.is(ecx) ? eax : ecx; - Label no_deopt, done; - X87LoadForUsage(base); - __ JumpIfSmi(exponent, &no_deopt); - __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - // Heap number(double) - __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset)); - __ jmp(&done); - // SMI - __ bind(&no_deopt); - __ SmiUntag(exponent); - __ push(exponent); - __ fild_s(MemOperand(esp, 0)); - __ pop(exponent); - __ bind(&done); - } else if (exponent_type.IsInteger32()) { - Register exponent = ToRegister(instr->right()); - X87LoadForUsage(base); - __ push(exponent); - __ fild_s(MemOperand(esp, 0)); - __ pop(exponent); - } else { - DCHECK(exponent_type.IsDouble()); - X87Register exponent_double = ToX87Register(instr->right()); - X87LoadForUsage(base, exponent_double); - } - - // FP data stack {base, exponent(TOS)}. - // Handle (exponent==+-0.5 && base == -0). - Label not_plus_0; - __ fld(0); - __ fabs(); - X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand); - __ FCmp(); - __ j(parity_even, ¬_plus_0, Label::kNear); // NaN. - __ j(not_equal, ¬_plus_0, Label::kNear); - __ fldz(); - // FP data stack {base, exponent(TOS), zero}. - __ faddp(2); - __ bind(¬_plus_0); - - { - __ PrepareCallCFunction(4, eax); - __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value. - __ fstp_d(MemOperand(esp, 0)); // Base value. - X87PrepareToWrite(result); - __ CallCFunction(ExternalReference::power_double_double_function(isolate()), - 4); - // Return value is in st(0) on ia32. - X87CommitWrite(result); - } -} - - -void LCodeGen::DoMathLog(LMathLog* instr) { - DCHECK(instr->value()->Equals(instr->result())); - X87Register result = ToX87Register(instr->result()); - X87Register input_reg = ToX87Register(instr->value()); - X87Fxch(input_reg); - - // Pass one double as argument on the stack. - __ PrepareCallCFunction(2, eax); - __ fstp_d(MemOperand(esp, 0)); - X87PrepareToWrite(result); - __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2); - // Return value is in st(0) on ia32. - X87CommitWrite(result); -} - - -void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - - __ Lzcnt(result, input); -} - -void LCodeGen::DoMathCos(LMathCos* instr) { - X87Register result = ToX87Register(instr->result()); - X87Register input_reg = ToX87Register(instr->value()); - __ fld(x87_stack_.st(input_reg)); - - // Pass one double as argument on the stack. - __ PrepareCallCFunction(2, eax); - __ fstp_d(MemOperand(esp, 0)); - X87PrepareToWrite(result); - __ X87SetFPUCW(0x027F); - __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2); - __ X87SetFPUCW(0x037F); - // Return value is in st(0) on ia32. - X87CommitWrite(result); -} - -void LCodeGen::DoMathSin(LMathSin* instr) { - X87Register result = ToX87Register(instr->result()); - X87Register input_reg = ToX87Register(instr->value()); - __ fld(x87_stack_.st(input_reg)); - - // Pass one double as argument on the stack. - __ PrepareCallCFunction(2, eax); - __ fstp_d(MemOperand(esp, 0)); - X87PrepareToWrite(result); - __ X87SetFPUCW(0x027F); - __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2); - __ X87SetFPUCW(0x037F); - // Return value is in st(0) on ia32. - X87CommitWrite(result); -} - -void LCodeGen::DoMathExp(LMathExp* instr) { - X87Register result = ToX87Register(instr->result()); - X87Register input_reg = ToX87Register(instr->value()); - __ fld(x87_stack_.st(input_reg)); - - // Pass one double as argument on the stack. - __ PrepareCallCFunction(2, eax); - __ fstp_d(MemOperand(esp, 0)); - X87PrepareToWrite(result); - __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2); - // Return value is in st(0) on ia32. - X87CommitWrite(result); -} - -void LCodeGen::PrepareForTailCall(const ParameterCount& actual, - Register scratch1, Register scratch2, - Register scratch3) { -#if DEBUG - if (actual.is_reg()) { - DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); - } else { - DCHECK(!AreAliased(scratch1, scratch2, scratch3)); - } -#endif - if (FLAG_code_comments) { - if (actual.is_reg()) { - Comment(";;; PrepareForTailCall, actual: %s {", - RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( - actual.reg().code())); - } else { - Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); - } - } - - // Check if next frame is an arguments adaptor frame. - Register caller_args_count_reg = scratch1; - Label no_arguments_adaptor, formal_parameter_count_loaded; - __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset), - Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(not_equal, &no_arguments_adaptor, Label::kNear); - - // Drop current frame and load arguments count from arguments adaptor frame. - __ mov(ebp, scratch2); - __ mov(caller_args_count_reg, - Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(caller_args_count_reg); - __ jmp(&formal_parameter_count_loaded, Label::kNear); - - __ bind(&no_arguments_adaptor); - // Load caller's formal parameter count. - __ mov(caller_args_count_reg, - Immediate(info()->literal()->parameter_count())); - - __ bind(&formal_parameter_count_loaded); - __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3, - ReturnAddressState::kNotOnStack, 0); - Comment(";;; }"); -} - -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - HInvokeFunction* hinstr = instr->hydrogen(); - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->function()).is(edi)); - DCHECK(instr->HasPointerMap()); - - bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; - - if (is_tail_call) { - DCHECK(!info()->saves_caller_doubles()); - ParameterCount actual(instr->arity()); - // It is safe to use ebx, ecx and edx as scratch registers here given that - // 1) we are not going to return to caller function anyway, - // 2) ebx (expected arguments count) and edx (new.target) will be - // initialized below. - PrepareForTailCall(actual, ebx, ecx, edx); - } - - Handle known_function = hinstr->known_function(); - if (known_function.is_null()) { - LPointerMap* pointers = instr->pointer_map(); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount actual(instr->arity()); - InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; - __ InvokeFunction(edi, no_reg, actual, flag, generator); - } else { - CallKnownFunction(known_function, hinstr->formal_parameter_count(), - instr->arity(), is_tail_call, instr); - } -} - - -void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->constructor()).is(edi)); - DCHECK(ToRegister(instr->result()).is(eax)); - - __ Move(eax, Immediate(instr->arity())); - __ mov(ebx, instr->hydrogen()->site()); - - ElementsKind kind = instr->hydrogen()->elements_kind(); - AllocationSiteOverrideMode override_mode = AllocationSite::ShouldTrack(kind) - ? DISABLE_ALLOCATION_SITES - : DONT_OVERRIDE; - - if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } else if (instr->arity() == 1) { - Label done; - if (IsFastPackedElementsKind(kind)) { - Label packed_case; - // We might need a change here - // look at the first argument - __ mov(ecx, Operand(esp, 0)); - __ test(ecx, ecx); - __ j(zero, &packed_case, Label::kNear); - - ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(isolate(), - holey_kind, - override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ jmp(&done, Label::kNear); - __ bind(&packed_case); - } - - ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ArrayNArgumentsConstructorStub stub(isolate()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - } -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); -} - - -void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { - Register function = ToRegister(instr->function()); - Register code_object = ToRegister(instr->code_object()); - __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); - __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); -} - - -void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { - Register result = ToRegister(instr->result()); - Register base = ToRegister(instr->base_object()); - if (instr->offset()->IsConstantOperand()) { - LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ lea(result, Operand(base, ToInteger32(offset))); - } else { - Register offset = ToRegister(instr->offset()); - __ lea(result, Operand(base, offset, times_1, 0)); - } -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - Representation representation = instr->hydrogen()->field_representation(); - - HObjectAccess access = instr->hydrogen()->access(); - int offset = access.offset(); - - if (access.IsExternalMemory()) { - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - MemOperand operand = instr->object()->IsConstantOperand() - ? MemOperand::StaticVariable( - ToExternalReference(LConstantOperand::cast(instr->object()))) - : MemOperand(ToRegister(instr->object()), offset); - if (instr->value()->IsConstantOperand()) { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - __ mov(operand, Immediate(ToInteger32(operand_value))); - } else { - Register value = ToRegister(instr->value()); - __ Store(value, operand, representation); - } - return; - } - - Register object = ToRegister(instr->object()); - __ AssertNotSmi(object); - DCHECK(!representation.IsSmi() || - !instr->value()->IsConstantOperand() || - IsSmi(LConstantOperand::cast(instr->value()))); - if (representation.IsDouble()) { - DCHECK(access.IsInobject()); - DCHECK(!instr->hydrogen()->has_transition()); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - X87Register value = ToX87Register(instr->value()); - X87Mov(FieldOperand(object, offset), value); - return; - } - - if (instr->hydrogen()->has_transition()) { - Handle transition = instr->hydrogen()->transition_map(); - AddDeprecationDependency(transition); - __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); - if (instr->hydrogen()->NeedsWriteBarrierForMap()) { - Register temp = ToRegister(instr->temp()); - Register temp_map = ToRegister(instr->temp_map()); - __ mov(temp_map, transition); - __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); - // Update the write barrier for the map field. - __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs); - } - } - - // Do the store. - Register write_register = object; - if (!access.IsInobject()) { - write_register = ToRegister(instr->temp()); - __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); - } - - MemOperand operand = FieldOperand(write_register, offset); - if (instr->value()->IsConstantOperand()) { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - if (operand_value->IsRegister()) { - Register value = ToRegister(operand_value); - __ Store(value, operand, representation); - } else if (representation.IsInteger32() || representation.IsExternal()) { - Immediate immediate = ToImmediate(operand_value, representation); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - __ mov(operand, immediate); - } else { - Handle handle_value = ToHandle(operand_value); - DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); - __ mov(operand, handle_value); - } - } else { - Register value = ToRegister(instr->value()); - __ Store(value, operand, representation); - } - - if (instr->hydrogen()->NeedsWriteBarrier()) { - Register value = ToRegister(instr->value()); - Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; - // Update the write barrier for the object for in-object properties. - __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs, - EMIT_REMEMBERED_SET, - instr->hydrogen()->SmiCheckForWriteBarrier(), - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal; - if (instr->index()->IsConstantOperand()) { - __ cmp(ToOperand(instr->length()), - ToImmediate(LConstantOperand::cast(instr->index()), - instr->hydrogen()->length()->representation())); - cc = CommuteCondition(cc); - } else if (instr->length()->IsConstantOperand()) { - __ cmp(ToOperand(instr->index()), - ToImmediate(LConstantOperand::cast(instr->length()), - instr->hydrogen()->index()->representation())); - } else { - __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); - } - if (FLAG_debug_code && instr->hydrogen()->skip_check()) { - Label done; - __ j(NegateCondition(cc), &done, Label::kNear); - __ int3(); - __ bind(&done); - } else { - DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); - } -} - - -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = instr->key(); - if (!key->IsConstantOperand() && - ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), - elements_kind)) { - __ SmiUntag(ToRegister(key)); - } - Operand operand(BuildFastArrayOperand( - instr->elements(), - key, - instr->hydrogen()->key()->representation(), - elements_kind, - instr->base_offset())); - if (elements_kind == FLOAT32_ELEMENTS) { - X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand); - } else if (elements_kind == FLOAT64_ELEMENTS) { - uint64_t int_val = kHoleNanInt64; - int32_t lower = static_cast(int_val); - int32_t upper = static_cast(int_val >> (kBitsPerInt)); - Operand operand2 = BuildFastArrayOperand( - instr->elements(), instr->key(), - instr->hydrogen()->key()->representation(), elements_kind, - instr->base_offset() + kPointerSize); - - Label no_special_nan_handling, done; - X87Register value = ToX87Register(instr->value()); - X87Fxch(value); - __ lea(esp, Operand(esp, -kDoubleSize)); - __ fst_d(MemOperand(esp, 0)); - __ lea(esp, Operand(esp, kDoubleSize)); - int offset = sizeof(kHoleNanUpper32); - __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); - __ j(not_equal, &no_special_nan_handling, Label::kNear); - __ mov(operand, Immediate(lower)); - __ mov(operand2, Immediate(upper)); - __ jmp(&done, Label::kNear); - - __ bind(&no_special_nan_handling); - __ fst_d(operand); - __ bind(&done); - } else { - Register value = ToRegister(instr->value()); - switch (elements_kind) { - case UINT8_ELEMENTS: - case INT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - __ mov_b(operand, value); - break; - case UINT16_ELEMENTS: - case INT16_ELEMENTS: - __ mov_w(operand, value); - break; - case UINT32_ELEMENTS: - case INT32_ELEMENTS: - __ mov(operand, value); - break; - case FLOAT32_ELEMENTS: - case FLOAT64_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case FAST_SLOPPY_ARGUMENTS_ELEMENTS: - case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: - case FAST_STRING_WRAPPER_ELEMENTS: - case SLOW_STRING_WRAPPER_ELEMENTS: - case NO_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - instr->base_offset()); - - uint64_t int_val = kHoleNanInt64; - int32_t lower = static_cast(int_val); - int32_t upper = static_cast(int_val >> (kBitsPerInt)); - Operand double_store_operand2 = BuildFastArrayOperand( - instr->elements(), instr->key(), - instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, - instr->base_offset() + kPointerSize); - - if (instr->hydrogen()->IsConstantHoleStore()) { - // This means we should store the (double) hole. No floating point - // registers required. - __ mov(double_store_operand, Immediate(lower)); - __ mov(double_store_operand2, Immediate(upper)); - } else { - Label no_special_nan_handling, done; - X87Register value = ToX87Register(instr->value()); - X87Fxch(value); - - if (instr->NeedsCanonicalization()) { - __ fld(0); - __ fld(0); - __ FCmp(); - __ j(parity_odd, &no_special_nan_handling, Label::kNear); - // All NaNs are Canonicalized to 0x7fffffffffffffff - __ mov(double_store_operand, Immediate(0xffffffff)); - __ mov(double_store_operand2, Immediate(0x7fffffff)); - __ jmp(&done, Label::kNear); - } else { - __ lea(esp, Operand(esp, -kDoubleSize)); - __ fst_d(MemOperand(esp, 0)); - __ lea(esp, Operand(esp, kDoubleSize)); - int offset = sizeof(kHoleNanUpper32); - __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); - __ j(not_equal, &no_special_nan_handling, Label::kNear); - __ mov(double_store_operand, Immediate(lower)); - __ mov(double_store_operand2, Immediate(upper)); - __ jmp(&done, Label::kNear); - } - __ bind(&no_special_nan_handling); - __ fst_d(double_store_operand); - __ bind(&done); - } -} - - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - Register elements = ToRegister(instr->elements()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - - Operand operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, - instr->base_offset()); - if (instr->value()->IsRegister()) { - __ mov(operand, ToRegister(instr->value())); - } else { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - if (IsSmi(operand_value)) { - Immediate immediate = ToImmediate(operand_value, Representation::Smi()); - __ mov(operand, immediate); - } else { - DCHECK(!IsInteger32(operand_value)); - Handle handle_value = ToHandle(operand_value); - __ mov(operand, handle_value); - } - } - - if (instr->hydrogen()->NeedsWriteBarrier()) { - DCHECK(instr->value()->IsRegister()); - Register value = ToRegister(instr->value()); - DCHECK(!instr->key()->IsConstantOperand()); - SmiCheck check_needed = - instr->hydrogen()->value()->type().IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ lea(key, operand); - __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed, - instr->hydrogen()->PointersToHereCheckForValue()); - } -} - - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - // By cases...external, fast-double, fast - if (instr->is_fixed_typed_array()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - - -void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { - Register object = ToRegister(instr->object()); - Register temp = ToRegister(instr->temp()); - Label no_memento_found; - __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); - DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound); - __ bind(&no_memento_found); -} - - -void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { - class DeferredMaybeGrowElements final : public LDeferredCode { - public: - DeferredMaybeGrowElements(LCodeGen* codegen, - LMaybeGrowElements* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) {} - void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LMaybeGrowElements* instr_; - }; - - Register result = eax; - DeferredMaybeGrowElements* deferred = - new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_); - LOperand* key = instr->key(); - LOperand* current_capacity = instr->current_capacity(); - - DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); - DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); - DCHECK(key->IsConstantOperand() || key->IsRegister()); - DCHECK(current_capacity->IsConstantOperand() || - current_capacity->IsRegister()); - - if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - if (constant_key >= constant_capacity) { - // Deferred case. - __ jmp(deferred->entry()); - } - } else if (key->IsConstantOperand()) { - int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); - __ cmp(ToOperand(current_capacity), Immediate(constant_key)); - __ j(less_equal, deferred->entry()); - } else if (current_capacity->IsConstantOperand()) { - int32_t constant_capacity = - ToInteger32(LConstantOperand::cast(current_capacity)); - __ cmp(ToRegister(key), Immediate(constant_capacity)); - __ j(greater_equal, deferred->entry()); - } else { - __ cmp(ToRegister(key), ToRegister(current_capacity)); - __ j(greater_equal, deferred->entry()); - } - - __ mov(result, ToOperand(instr->elements())); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register result = eax; - __ Move(result, Immediate(0)); - - // We have to call a stub. - { - PushSafepointRegistersScope scope(this); - if (instr->object()->IsRegister()) { - __ Move(result, ToRegister(instr->object())); - } else { - __ mov(result, ToOperand(instr->object())); - } - - LOperand* key = instr->key(); - if (key->IsConstantOperand()) { - LConstantOperand* constant_key = LConstantOperand::cast(key); - int32_t int_key = ToInteger32(constant_key); - if (Smi::IsValid(int_key)) { - __ mov(ebx, Immediate(Smi::FromInt(int_key))); - } else { - // We should never get here at runtime because there is a smi check on - // the key before this point. - __ int3(); - } - } else { - __ Move(ebx, ToRegister(key)); - __ SmiTag(ebx); - } - - GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); - __ CallStub(&stub); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - __ StoreToSafepointRegisterSlot(result, result); - } - - // Deopt on smi, which means the elements array changed to dictionary mode. - __ test(result, Immediate(kSmiTagMask)); - DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi); -} - - -void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { - UNREACHABLE(); -} - - -void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { - class DeferredStringCharCodeAt final : public LDeferredCode { - public: - DeferredStringCharCodeAt(LCodeGen* codegen, - LStringCharCodeAt* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStringCharCodeAt* instr_; - }; - - DeferredStringCharCodeAt* deferred = - new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); - - StringCharLoadGenerator::Generate(masm(), - factory(), - ToRegister(instr->string()), - ToRegister(instr->index()), - ToRegister(instr->result()), - deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { - Register string = ToRegister(instr->string()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Move(result, Immediate(0)); - - PushSafepointRegistersScope scope(this); - __ push(string); - // Push the index as a smi. This is safe because of the checks in - // DoStringCharCodeAt above. - STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); - if (instr->index()->IsConstantOperand()) { - Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()), - Representation::Smi()); - __ push(immediate); - } else { - Register index = ToRegister(instr->index()); - __ SmiTag(index); - __ push(index); - } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, - instr, instr->context()); - __ AssertSmi(eax); - __ SmiUntag(eax); - __ StoreToSafepointRegisterSlot(result, eax); -} - - -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode final : public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, - LStringCharFromCode* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredStringCharFromCode(instr_); - } - LInstruction* instr() override { return instr_; } - - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); - - DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - DCHECK(!char_code.is(result)); - - __ cmp(char_code, String::kMaxOneByteCharCode); - __ j(above, deferred->entry()); - __ Move(result, Immediate(factory()->single_character_string_cache())); - __ mov(result, FieldOperand(result, - char_code, times_pointer_size, - FixedArray::kHeaderSize)); - __ cmp(result, factory()->undefined_value()); - __ j(equal, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Move(result, Immediate(0)); - - PushSafepointRegistersScope scope(this); - __ SmiTag(char_code); - __ push(char_code); - CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, - instr->context()); - __ StoreToSafepointRegisterSlot(result, eax); -} - - -void LCodeGen::DoStringAdd(LStringAdd* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->left()).is(edx)); - DCHECK(ToRegister(instr->right()).is(eax)); - StringAddStub stub(isolate(), - instr->hydrogen()->flags(), - instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - DCHECK(input->IsRegister() || input->IsStackSlot()); - DCHECK(output->IsDoubleRegister()); - if (input->IsRegister()) { - Register input_reg = ToRegister(input); - __ push(input_reg); - X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); - __ pop(input_reg); - } else { - X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); - } -} - - -void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->value(); - LOperand* output = instr->result(); - X87Register res = ToX87Register(output); - X87PrepareToWrite(res); - __ LoadUint32NoSSE2(ToRegister(input)); - X87CommitWrite(res); -} - - -void LCodeGen::DoNumberTagI(LNumberTagI* instr) { - class DeferredNumberTagI final : public LDeferredCode { - public: - DeferredNumberTagI(LCodeGen* codegen, - LNumberTagI* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), - SIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister() && input->Equals(instr->result())); - Register reg = ToRegister(input); - - DeferredNumberTagI* deferred = - new(zone()) DeferredNumberTagI(this, instr, x87_stack_); - __ SmiTag(reg); - __ j(overflow, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoNumberTagU(LNumberTagU* instr) { - class DeferredNumberTagU final : public LDeferredCode { - public: - DeferredNumberTagU(LCodeGen* codegen, - LNumberTagU* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), - UNSIGNED_INT32); - } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagU* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister() && input->Equals(instr->result())); - Register reg = ToRegister(input); - - DeferredNumberTagU* deferred = - new(zone()) DeferredNumberTagU(this, instr, x87_stack_); - __ cmp(reg, Immediate(Smi::kMaxValue)); - __ j(above, deferred->entry()); - __ SmiTag(reg); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp, - IntegerSignedness signedness) { - Label done, slow; - Register reg = ToRegister(value); - Register tmp = ToRegister(temp); - - if (signedness == SIGNED_INT32) { - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - __ SmiUntag(reg); - __ xor_(reg, 0x80000000); - __ push(reg); - __ fild_s(Operand(esp, 0)); - __ pop(reg); - } else { - // There's no fild variant for unsigned values, so zero-extend to a 64-bit - // int manually. - __ push(Immediate(0)); - __ push(reg); - __ fild_d(Operand(esp, 0)); - __ pop(reg); - __ pop(reg); - } - - if (FLAG_inline_new) { - __ AllocateHeapNumber(reg, tmp, no_reg, &slow); - __ jmp(&done, Label::kNear); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // TODO(3095996): Put a valid pointer value in the stack slot where the - // result register is stored, as this register is in the pointer map, but - // contains an integer value. - __ Move(reg, Immediate(0)); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(esi)) { - __ Move(esi, Immediate(0)); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(reg, eax); - } - - __ bind(&done); - __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD final : public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, - LNumberTagD* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LNumberTagD* instr_; - }; - - Register reg = ToRegister(instr->result()); - - // Put the value to the top of stack - X87Register src = ToX87Register(instr->value()); - // Don't use X87LoadForUsage here, which is only used by Instruction which - // clobbers fp registers. - x87_stack_.Fxch(src); - - DeferredNumberTagD* deferred = - new(zone()) DeferredNumberTagD(this, instr, x87_stack_); - if (FLAG_inline_new) { - Register tmp = ToRegister(instr->temp()); - __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); - } else { - __ jmp(deferred->entry()); - } - __ bind(deferred->exit()); - __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ Move(reg, Immediate(0)); - - PushSafepointRegistersScope scope(this); - // Reset the context register. - if (!reg.is(esi)) { - __ Move(esi, Immediate(0)); - } - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(reg, eax); -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - HChange* hchange = instr->hydrogen(); - Register input = ToRegister(instr->value()); - if (hchange->CheckFlag(HValue::kCanOverflow) && - hchange->value()->CheckFlag(HValue::kUint32)) { - __ test(input, Immediate(0xc0000000)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow); - } - __ SmiTag(input); - if (hchange->CheckFlag(HValue::kCanOverflow) && - !hchange->value()->CheckFlag(HValue::kUint32)) { - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); - } -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - LOperand* input = instr->value(); - Register result = ToRegister(input); - DCHECK(input->IsRegister() && input->Equals(instr->result())); - if (instr->needs_check()) { - __ test(result, Immediate(kSmiTagMask)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi); - } else { - __ AssertSmi(result); - } - __ SmiUntag(result); -} - - -void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg, - Register temp_reg, X87Register res_reg, - NumberUntagDMode mode) { - bool can_convert_undefined_to_nan = instr->truncating(); - bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); - - Label load_smi, done; - - X87PrepareToWrite(res_reg); - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ JumpIfSmi(input_reg, &load_smi); - - // Heap number map check. - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - if (!can_convert_undefined_to_nan) { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - } else { - Label heap_number, convert; - __ j(equal, &heap_number); - - // Convert undefined (or hole) to NaN. - __ cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, instr, - DeoptimizeReason::kNotAHeapNumberUndefined); - - __ bind(&convert); - __ push(Immediate(0xfff80000)); - __ push(Immediate(0x00000000)); - __ fld_d(MemOperand(esp, 0)); - __ lea(esp, Operand(esp, kDoubleSize)); - __ jmp(&done, Label::kNear); - - __ bind(&heap_number); - } - // Heap number to x87 conversion. - __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - __ fldz(); - __ FCmp(); - __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ j(not_zero, &done, Label::kNear); - - // Use general purpose registers to check if we have -0.0 - __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - __ test(temp_reg, Immediate(HeapNumber::kSignMask)); - __ j(zero, &done, Label::kNear); - - // Pop FPU stack before deoptimizing. - __ fstp(0); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - } - __ jmp(&done, Label::kNear); - } else { - DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); - } - - __ bind(&load_smi); - // Clobbering a temp is faster than re-tagging the - // input register since we avoid dependencies. - __ mov(temp_reg, input_reg); - __ SmiUntag(temp_reg); // Untag smi before converting to float. - __ push(temp_reg); - __ fild_s(Operand(esp, 0)); - __ add(esp, Immediate(kPointerSize)); - __ bind(&done); - X87CommitWrite(res_reg); -} - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { - Register input_reg = ToRegister(instr->value()); - - // The input was optimistically untagged; revert it. - STATIC_ASSERT(kSmiTagSize == 1); - __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); - - if (instr->truncating()) { - Label truncate; - Label::Distance truncate_distance = - DeoptEveryNTimes() ? Label::kFar : Label::kNear; - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - __ j(equal, &truncate, truncate_distance); - __ push(input_reg); - __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg); - __ pop(input_reg); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball); - __ bind(&truncate); - __ TruncateHeapNumberToI(input_reg, input_reg); - } else { - // TODO(olivf) Converting a number on the fpu is actually quite slow. We - // should first try a fast conversion and then bailout to this slow case. - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - isolate()->factory()->heap_number_map()); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); - - __ sub(esp, Immediate(kPointerSize)); - __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); - - if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { - Label no_precision_lost, not_nan, zero_check; - __ fld(0); - - __ fist_s(MemOperand(esp, 0)); - __ fild_s(MemOperand(esp, 0)); - __ FCmp(); - __ pop(input_reg); - - __ j(equal, &no_precision_lost, Label::kNear); - __ fstp(0); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); - __ bind(&no_precision_lost); - - __ j(parity_odd, ¬_nan); - __ fstp(0); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); - __ bind(¬_nan); - - __ test(input_reg, Operand(input_reg)); - __ j(zero, &zero_check, Label::kNear); - __ fstp(0); - __ jmp(done); - - __ bind(&zero_check); - // To check for minus zero, we load the value again as float, and check - // if that is still 0. - __ sub(esp, Immediate(kPointerSize)); - __ fstp_s(Operand(esp, 0)); - __ pop(input_reg); - __ test(input_reg, Operand(input_reg)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); - } else { - __ fist_s(MemOperand(esp, 0)); - __ fild_s(MemOperand(esp, 0)); - __ FCmp(); - __ pop(input_reg); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision); - DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN); - } - } -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI final : public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, - LTaggedToI* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); } - LInstruction* instr() override { return instr_; } - - private: - LTaggedToI* instr_; - }; - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register input_reg = ToRegister(input); - DCHECK(input_reg.is(ToRegister(instr->result()))); - - if (instr->hydrogen()->value()->representation().IsSmi()) { - __ SmiUntag(input_reg); - } else { - DeferredTaggedToI* deferred = - new(zone()) DeferredTaggedToI(this, instr, x87_stack_); - // Optimistically untag the input. - // If the input is a HeapObject, SmiUntag will set the carry flag. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ SmiUntag(input_reg); - // Branch to deferred code if the input was tagged. - // The deferred code will take care of restoring the tag. - __ j(carry, deferred->entry()); - __ bind(deferred->exit()); - } -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - LOperand* temp = instr->temp(); - DCHECK(temp->IsRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - Register temp_reg = ToRegister(temp); - - HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; - - EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result), - mode); -} - - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsDoubleRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsRegister()); - Register result_reg = ToRegister(result); - - if (instr->truncating()) { - X87Register input_reg = ToX87Register(input); - X87Fxch(input_reg); - __ TruncateX87TOSToI(result_reg); - } else { - Label lost_precision, is_nan, minus_zero, done; - X87Register input_reg = ToX87Register(input); - X87Fxch(input_reg); - __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), - &lost_precision, &is_nan, &minus_zero); - __ jmp(&done); - __ bind(&lost_precision); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); - __ bind(&is_nan); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); - __ bind(&minus_zero); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - } -} - - -void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { - LOperand* input = instr->value(); - DCHECK(input->IsDoubleRegister()); - LOperand* result = instr->result(); - DCHECK(result->IsRegister()); - Register result_reg = ToRegister(result); - - Label lost_precision, is_nan, minus_zero, done; - X87Register input_reg = ToX87Register(input); - X87Fxch(input_reg); - __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), - &lost_precision, &is_nan, &minus_zero); - __ jmp(&done); - __ bind(&lost_precision); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); - __ bind(&is_nan); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); - __ bind(&minus_zero); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); - __ bind(&done); - __ SmiTag(result_reg); - DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->value(); - __ test(ToOperand(input), Immediate(kSmiTagMask)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - LOperand* input = instr->value(); - __ test(ToOperand(input), Immediate(kSmiTagMask)); - DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi); - } -} - - -void LCodeGen::DoCheckArrayBufferNotNeutered( - LCheckArrayBufferNotNeutered* instr) { - Register view = ToRegister(instr->view()); - Register scratch = ToRegister(instr->scratch()); - - __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset)); - __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset), - Immediate(1 << JSArrayBuffer::WasNeutered::kShift)); - DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->value()); - Register temp = ToRegister(instr->temp()); - - __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); - - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first)); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType); - } else { - DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType); - // Omit check for the last type. - if (last != LAST_TYPE) { - __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last)); - DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType); - } - } - } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (base::bits::IsPowerOfTwo32(mask)) { - DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); - __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask)); - DeoptimizeIf(tag == 0 ? not_zero : zero, instr, - DeoptimizeReason::kWrongInstanceType); - } else { - __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); - __ and_(temp, mask); - __ cmp(temp, tag); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType); - } - } -} - - -void LCodeGen::DoCheckValue(LCheckValue* instr) { - Handle object = instr->hydrogen()->object().handle(); - if (instr->hydrogen()->object_in_new_space()) { - Register reg = ToRegister(instr->value()); - Handle cell = isolate()->factory()->NewCell(object); - __ cmp(reg, Operand::ForCell(cell)); - } else { - Operand operand = ToOperand(instr->value()); - __ cmp(operand, object); - } - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch); -} - - -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - Label deopt, done; - // If the map is not deprecated the migration attempt does not make sense. - __ push(object); - __ mov(object, FieldOperand(object, HeapObject::kMapOffset)); - __ test(FieldOperand(object, Map::kBitField3Offset), - Immediate(Map::Deprecated::kMask)); - __ pop(object); - __ j(zero, &deopt); - - { - PushSafepointRegistersScope scope(this); - __ push(object); - __ xor_(esi, esi); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); - RecordSafepointWithRegisters( - instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); - - __ test(eax, Immediate(kSmiTagMask)); - } - __ j(not_zero, &done); - - __ bind(&deopt); - DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed); - - __ bind(&done); -} - - -void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps final : public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, - LCheckMaps* instr, - Register object, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { - SetExit(check_maps()); - } - void Generate() override { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - LInstruction* instr() override { return instr_; } - - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - - if (instr->hydrogen()->IsStabilityCheck()) { - const UniqueSet* maps = instr->hydrogen()->maps(); - for (int i = 0; i < maps->size(); ++i) { - AddStabilityDependency(maps->at(i).handle()); - } - return; - } - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register reg = ToRegister(input); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); - __ bind(deferred->check_maps()); - } - - const UniqueSet* maps = instr->hydrogen()->maps(); - Label success; - for (int i = 0; i < maps->size() - 1; i++) { - Handle map = maps->at(i).handle(); - __ CompareMap(reg, map); - __ j(equal, &success, Label::kNear); - } - - Handle map = maps->at(maps->size() - 1).handle(); - __ CompareMap(reg, map); - if (instr->hydrogen()->HasMigrationTarget()) { - __ j(not_equal, deferred->entry()); - } else { - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap); - } - - __ bind(&success); -} - - -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - X87Register value_reg = ToX87Register(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - X87Fxch(value_reg); - __ ClampTOSToUint8(result_reg); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - DCHECK(instr->unclamped()->Equals(instr->result())); - Register value_reg = ToRegister(instr->result()); - __ ClampUint8(value_reg); -} - - -void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { - Register input_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - Register scratch = ToRegister(instr->scratch()); - Register scratch2 = ToRegister(instr->scratch2()); - Register scratch3 = ToRegister(instr->scratch3()); - Label is_smi, done, heap_number, valid_exponent, - largest_value, zero_result, maybe_nan_or_infinity; - - __ JumpIfSmi(input_reg, &is_smi); - - // Check for heap number - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - __ j(equal, &heap_number, Label::kNear); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - __ cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined); - __ jmp(&zero_result, Label::kNear); - - // Heap number - __ bind(&heap_number); - - // Surprisingly, all of the hand-crafted bit-manipulations below are much - // faster than the x86 FPU built-in instruction, especially since "banker's - // rounding" would be additionally very expensive - - // Get exponent word. - __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); - - // Test for negative values --> clamp to zero - __ test(scratch, scratch); - __ j(negative, &zero_result, Label::kNear); - - // Get exponent alone in scratch2. - __ mov(scratch2, scratch); - __ and_(scratch2, HeapNumber::kExponentMask); - __ shr(scratch2, HeapNumber::kExponentShift); - __ j(zero, &zero_result, Label::kNear); - __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); - __ j(negative, &zero_result, Label::kNear); - - const uint32_t non_int8_exponent = 7; - __ cmp(scratch2, Immediate(non_int8_exponent + 1)); - // If the exponent is too big, check for special values. - __ j(greater, &maybe_nan_or_infinity, Label::kNear); - - __ bind(&valid_exponent); - // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent - // < 7. The shift bias is the number of bits to shift the mantissa such that - // with an exponent of 7 such the that top-most one is in bit 30, allowing - // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to - // 1). - int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; - __ lea(result_reg, MemOperand(scratch2, shift_bias)); - // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the - // top bits of the mantissa. - __ and_(scratch, HeapNumber::kMantissaMask); - // Put back the implicit 1 of the mantissa - __ or_(scratch, 1 << HeapNumber::kExponentShift); - // Shift up to round - __ shl_cl(scratch); - // Use "banker's rounding" to spec: If fractional part of number is 0.5, then - // use the bit in the "ones" place and add it to the "halves" place, which has - // the effect of rounding to even. - __ mov(scratch2, scratch); - const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; - const uint32_t one_bit_shift = one_half_bit_shift + 1; - __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); - __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); - Label no_round; - __ j(less, &no_round, Label::kNear); - Label round_up; - __ mov(scratch2, Immediate(1 << one_half_bit_shift)); - __ j(greater, &round_up, Label::kNear); - __ test(scratch3, scratch3); - __ j(not_zero, &round_up, Label::kNear); - __ mov(scratch2, scratch); - __ and_(scratch2, Immediate(1 << one_bit_shift)); - __ shr(scratch2, 1); - __ bind(&round_up); - __ add(scratch, scratch2); - __ j(overflow, &largest_value, Label::kNear); - __ bind(&no_round); - __ shr(scratch, 23); - __ mov(result_reg, scratch); - __ jmp(&done, Label::kNear); - - __ bind(&maybe_nan_or_infinity); - // Check for NaN/Infinity, all other values map to 255 - __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); - __ j(not_equal, &largest_value, Label::kNear); - - // Check for NaN, which differs from Infinity in that at least one mantissa - // bit is set. - __ and_(scratch, HeapNumber::kMantissaMask); - __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); - __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN - // Infinity -> Fall through to map to 255. - - __ bind(&largest_value); - __ mov(result_reg, Immediate(255)); - __ jmp(&done, Label::kNear); - - __ bind(&zero_result); - __ xor_(result_reg, result_reg); - __ jmp(&done, Label::kNear); - - // smi - __ bind(&is_smi); - if (!input_reg.is(result_reg)) { - __ mov(result_reg, input_reg); - } - __ SmiUntag(result_reg); - __ ClampUint8(result_reg); - __ bind(&done); -} - - -void LCodeGen::DoAllocate(LAllocate* instr) { - class DeferredAllocate final : public LDeferredCode { - public: - DeferredAllocate(LCodeGen* codegen, - LAllocate* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { codegen()->DoDeferredAllocate(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LAllocate* instr_; - }; - - DeferredAllocate* deferred = - new(zone()) DeferredAllocate(this, instr, x87_stack_); - - Register result = ToRegister(instr->result()); - Register temp = ToRegister(instr->temp()); - - // Allocate memory for the object. - AllocationFlags flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - flags = static_cast(flags | ALLOCATION_FOLDING_DOMINATOR); - } - DCHECK(!instr->hydrogen()->IsAllocationFolded()); - - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); - } else { - Register size = ToRegister(instr->size()); - __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); - } - - __ bind(deferred->exit()); - - if (instr->hydrogen()->MustPrefillWithFiller()) { - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ mov(temp, (size / kPointerSize) - 1); - } else { - temp = ToRegister(instr->size()); - __ shr(temp, kPointerSizeLog2); - __ dec(temp); - } - Label loop; - __ bind(&loop); - __ mov(FieldOperand(result, temp, times_pointer_size, 0), - isolate()->factory()->one_pointer_filler_map()); - __ dec(temp); - __ j(not_zero, &loop); - } -} - -void LCodeGen::DoFastAllocate(LFastAllocate* instr) { - DCHECK(instr->hydrogen()->IsAllocationFolded()); - DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); - Register result = ToRegister(instr->result()); - Register temp = ToRegister(instr->temp()); - - AllocationFlags flags = ALLOCATION_FOLDED; - if (instr->hydrogen()->MustAllocateDoubleAligned()) { - flags = static_cast(flags | DOUBLE_ALIGNMENT); - } - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = static_cast(flags | PRETENURE); - } - if (instr->size()->IsConstantOperand()) { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - CHECK(size <= kMaxRegularHeapObjectSize); - __ FastAllocate(size, result, temp, flags); - } else { - Register size = ToRegister(instr->size()); - __ FastAllocate(size, result, temp, flags); - } -} - -void LCodeGen::DoDeferredAllocate(LAllocate* instr) { - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ Move(result, Immediate(Smi::kZero)); - - PushSafepointRegistersScope scope(this); - if (instr->size()->IsRegister()) { - Register size = ToRegister(instr->size()); - DCHECK(!size.is(result)); - __ SmiTag(ToRegister(instr->size())); - __ push(size); - } else { - int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - if (size >= 0 && size <= Smi::kMaxValue) { - __ push(Immediate(Smi::FromInt(size))); - } else { - // We should never get here at runtime => abort - __ int3(); - return; - } - } - - int flags = AllocateDoubleAlignFlag::encode( - instr->hydrogen()->MustAllocateDoubleAligned()); - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - flags = AllocateTargetSpace::update(flags, OLD_SPACE); - } else { - flags = AllocateTargetSpace::update(flags, NEW_SPACE); - } - __ push(Immediate(Smi::FromInt(flags))); - - CallRuntimeFromDeferred( - Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); - __ StoreToSafepointRegisterSlot(result, eax); - - if (instr->hydrogen()->IsAllocationFoldingDominator()) { - AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; - if (instr->hydrogen()->IsOldSpaceAllocation()) { - DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); - allocation_flags = static_cast(flags | PRETENURE); - } - // If the allocation folding dominator allocate triggered a GC, allocation - // happend in the runtime. We have to reset the top pointer to virtually - // undo the allocation. - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); - __ sub(eax, Immediate(kHeapObjectTag)); - __ mov(Operand::StaticVariable(allocation_top), eax); - __ add(eax, Immediate(kHeapObjectTag)); - } -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - DCHECK(ToRegister(instr->value()).is(ebx)); - Label end, do_call; - Register value_register = ToRegister(instr->value()); - __ JumpIfNotSmi(value_register, &do_call); - __ mov(eax, Immediate(isolate()->factory()->number_string())); - __ jmp(&end); - __ bind(&do_call); - Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof); - CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); - __ bind(&end); -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->value()); - Condition final_branch_condition = EmitTypeofIs(instr, input); - if (final_branch_condition != no_condition) { - EmitBranch(instr, final_branch_condition); - } -} - - -Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { - Label* true_label = instr->TrueLabel(chunk_); - Label* false_label = instr->FalseLabel(chunk_); - Handle type_name = instr->type_literal(); - int left_block = instr->TrueDestination(chunk_); - int right_block = instr->FalseDestination(chunk_); - int next_block = GetNextEmittedBlock(); - - Label::Distance true_distance = left_block == next_block ? Label::kNear - : Label::kFar; - Label::Distance false_distance = right_block == next_block ? Label::kNear - : Label::kFar; - Condition final_branch_condition = no_condition; - if (String::Equals(type_name, factory()->number_string())) { - __ JumpIfSmi(input, true_label, true_distance); - __ cmp(FieldOperand(input, HeapObject::kMapOffset), - factory()->heap_number_map()); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory()->string_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); - final_branch_condition = below; - - } else if (String::Equals(type_name, factory()->symbol_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ CmpObjectType(input, SYMBOL_TYPE, input); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory()->boolean_string())) { - __ cmp(input, factory()->true_value()); - __ j(equal, true_label, true_distance); - __ cmp(input, factory()->false_value()); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory()->undefined_string())) { - __ cmp(input, factory()->null_value()); - __ j(equal, false_label, false_distance); - __ JumpIfSmi(input, false_label, false_distance); - // Check for undetectable objects => true. - __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); - __ test_b(FieldOperand(input, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - final_branch_condition = not_zero; - - } else if (String::Equals(type_name, factory()->function_string())) { - __ JumpIfSmi(input, false_label, false_distance); - // Check for callable and not undetectable objects => true. - __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); - __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset)); - __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); - __ cmp(input, 1 << Map::kIsCallable); - final_branch_condition = equal; - - } else if (String::Equals(type_name, factory()->object_string())) { - __ JumpIfSmi(input, false_label, false_distance); - __ cmp(input, factory()->null_value()); - __ j(equal, true_label, true_distance); - STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input); - __ j(below, false_label, false_distance); - // Check for callable or undetectable objects => false. - __ test_b(FieldOperand(input, Map::kBitFieldOffset), - Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); - final_branch_condition = zero; - - } else { - __ jmp(false_label, false_distance); - } - return final_branch_condition; -} - - -void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { - if (info()->ShouldEnsureSpaceForLazyDeopt()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - __ Nop(padding_size); - } - } - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - last_lazy_deopt_pc_ = masm()->pc_offset(); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - Deoptimizer::BailoutType type = instr->hydrogen()->type(); - // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the - // needed return address), even though the implementation of LAZY and EAGER is - // now identical. When LAZY is eventually completely folded into EAGER, remove - // the special case below. - if (info()->IsStub() && type == Deoptimizer::EAGER) { - type = Deoptimizer::LAZY; - } - DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type); -} - - -void LCodeGen::DoDummy(LDummy* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDummyUse(LDummyUse* instr) { - // Nothing to see here, move on! -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this); - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); - RecordSafepointWithLazyDeopt( - instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck final : public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, - LStackCheck* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } - void Generate() override { codegen()->DoDeferredStackCheck(instr_); } - LInstruction* instr() override { return instr_; } - - private: - LStackCheck* instr_; - }; - - DCHECK(instr->HasEnvironment()); - LEnvironment* env = instr->environment(); - // There is no LLazyBailout instruction for stack-checks. We have to - // prepare for lazy deoptimization explicitly here. - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(isolate()); - __ cmp(esp, Operand::StaticVariable(stack_limit)); - __ j(above_equal, &done, Label::kNear); - - DCHECK(instr->context()->IsRegister()); - DCHECK(ToRegister(instr->context()).is(esi)); - CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); - __ bind(&done); - } else { - DCHECK(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new(zone()) DeferredStackCheck(this, instr, x87_stack_); - ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(isolate()); - __ cmp(esp, Operand::StaticVariable(stack_limit)); - __ j(below, deferred_stack_check->entry()); - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); - // Don't record a deoptimization index for the safepoint here. - // This will be done explicitly when emitting call and the safepoint in - // the deferred code. - } -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - // This is a pseudo-instruction that ensures that the environment here is - // properly registered for deoptimization and records the assembler's PC - // offset. - LEnvironment* environment = instr->environment(); - - // If the environment were already registered, we would have no way of - // backpatching it with the spill slot operands. - DCHECK(!environment->HasBeenRegistered()); - RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - - GenerateOsrPrologue(); -} - - -void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - DCHECK(ToRegister(instr->context()).is(esi)); - - Label use_cache, call_runtime; - __ CheckEnumCache(&call_runtime); - - __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); - __ jmp(&use_cache, Label::kNear); - - // Get the set of properties to enumerate. - __ bind(&call_runtime); - __ push(eax); - CallRuntime(Runtime::kForInEnumerate, instr); - __ bind(&use_cache); -} - - -void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { - Register map = ToRegister(instr->map()); - Register result = ToRegister(instr->result()); - Label load_cache, done; - __ EnumLength(result, map); - __ cmp(result, Immediate(Smi::kZero)); - __ j(not_equal, &load_cache, Label::kNear); - __ mov(result, isolate()->factory()->empty_fixed_array()); - __ jmp(&done, Label::kNear); - - __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result); - __ mov(result, FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset)); - __ mov(result, - FieldOperand(result, FixedArray::SizeFor(instr->idx()))); - __ bind(&done); - __ test(result, result); - DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache); -} - - -void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { - Register object = ToRegister(instr->value()); - __ cmp(ToRegister(instr->map()), - FieldOperand(object, HeapObject::kMapOffset)); - DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap); -} - - -void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register object, - Register index) { - PushSafepointRegistersScope scope(this); - __ push(object); - __ push(index); - __ xor_(esi, esi); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); - RecordSafepointWithRegisters( - instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(object, eax); -} - - -void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { - class DeferredLoadMutableDouble final : public LDeferredCode { - public: - DeferredLoadMutableDouble(LCodeGen* codegen, - LLoadFieldByIndex* instr, - Register object, - Register index, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), - instr_(instr), - object_(object), - index_(index) { - } - void Generate() override { - codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); - } - LInstruction* instr() override { return instr_; } - - private: - LLoadFieldByIndex* instr_; - Register object_; - Register index_; - }; - - Register object = ToRegister(instr->object()); - Register index = ToRegister(instr->index()); - - DeferredLoadMutableDouble* deferred; - deferred = new(zone()) DeferredLoadMutableDouble( - this, instr, object, index, x87_stack_); - - Label out_of_object, done; - __ test(index, Immediate(Smi::FromInt(1))); - __ j(not_zero, deferred->entry()); - - __ sar(index, 1); - - __ cmp(index, Immediate(0)); - __ j(less, &out_of_object, Label::kNear); - __ mov(object, FieldOperand(object, - index, - times_half_pointer_size, - JSObject::kHeaderSize)); - __ jmp(&done, Label::kNear); - - __ bind(&out_of_object); - __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset)); - __ neg(index); - // Index is now equal to out of object property index plus 1. - __ mov(object, FieldOperand(object, - index, - times_half_pointer_size, - FixedArray::kHeaderSize - kPointerSize)); - __ bind(deferred->exit()); - __ bind(&done); -} - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_X87 diff --git a/src/crankshaft/x87/lithium-codegen-x87.h b/src/crankshaft/x87/lithium-codegen-x87.h deleted file mode 100644 index e183fab963..0000000000 --- a/src/crankshaft/x87/lithium-codegen-x87.h +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_ -#define V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_ - -#include - -#include "src/ast/scopes.h" -#include "src/base/logging.h" -#include "src/crankshaft/lithium-codegen.h" -#include "src/crankshaft/x87/lithium-gap-resolver-x87.h" -#include "src/crankshaft/x87/lithium-x87.h" -#include "src/deoptimizer.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class LGapNode; -class SafepointGenerator; - -class LCodeGen: public LCodeGenBase { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : LCodeGenBase(chunk, assembler, info), - jump_table_(4, info->zone()), - scope_(info->scope()), - deferred_(8, info->zone()), - frame_is_built_(false), - x87_stack_(assembler), - safepoints_(info->zone()), - resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - int LookupDestination(int block_id) const { - return chunk()->LookupDestination(block_id); - } - - bool IsNextEmittedBlock(int block_id) const { - return LookupDestination(block_id) == GetNextEmittedBlock(); - } - - bool NeedsEagerFrame() const { - return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || - !info()->IsStub() || info()->requires_frame(); - } - bool NeedsDeferredFrame() const { - return !NeedsEagerFrame() && info()->is_deferred_calling(); - } - - // Support for converting LOperands to assembler types. - Operand ToOperand(LOperand* op) const; - Register ToRegister(LOperand* op) const; - X87Register ToX87Register(LOperand* op) const; - - bool IsInteger32(LConstantOperand* op) const; - bool IsSmi(LConstantOperand* op) const; - Immediate ToImmediate(LOperand* op, const Representation& r) const { - return Immediate(ToRepresentation(LConstantOperand::cast(op), r)); - } - double ToDouble(LConstantOperand* op) const; - - // Support for non-sse2 (x87) floating point stack handling. - // These functions maintain the mapping of physical stack registers to our - // virtual registers between instructions. - enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand }; - - void X87Mov(X87Register reg, Operand src, - X87OperandType operand = kX87DoubleOperand); - void X87Mov(Operand src, X87Register reg, - X87OperandType operand = kX87DoubleOperand); - void X87Mov(X87Register reg, X87Register src, - X87OperandType operand = kX87DoubleOperand); - - void X87PrepareBinaryOp( - X87Register left, X87Register right, X87Register result); - - void X87LoadForUsage(X87Register reg); - void X87LoadForUsage(X87Register reg1, X87Register reg2); - void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); } - void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); } - - void X87Fxch(X87Register reg, int other_slot = 0) { - x87_stack_.Fxch(reg, other_slot); - } - void X87Free(X87Register reg) { - x87_stack_.Free(reg); - } - - - bool X87StackEmpty() { - return x87_stack_.depth() == 0; - } - - Handle ToHandle(LConstantOperand* op) const; - - // The operand denoting the second word (the one with a higher address) of - // a double stack slot. - Operand HighOperand(LOperand* op); - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - // Deferred code support. - void DoDeferredNumberTagD(LNumberTagD* instr); - - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; - void DoDeferredNumberTagIU(LInstruction* instr, - LOperand* value, - LOperand* temp, - IntegerSignedness signedness); - - void DoDeferredTaggedToI(LTaggedToI* instr, Label* done); - void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); - void DoDeferredStackCheck(LStackCheck* instr); - void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); - void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocate(LAllocate* instr); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); - void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, - Register object, - Register index); - - // Parallel move support. - void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); - - // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); - - void EnsureRelocSpaceForDeoptimization(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - Scope* scope() const { return scope_; } - - void EmitClassOfTest(Label* if_true, Label* if_false, - Handle class_name, Register input, - Register temporary, Register temporary2); - - bool HasAllocatedStackSlots() const { - return chunk()->HasAllocatedStackSlots(); - } - int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } - int GetTotalFrameSlotCount() const { - return chunk()->GetTotalFrameSlotCount(); - } - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } - - // Code generation passes. Returns true if code generation should - // continue. - void GenerateBodyInstructionPre(LInstruction* instr) override; - void GenerateBodyInstructionPost(LInstruction* instr) override; - bool GeneratePrologue(); - bool GenerateDeferredCode(); - bool GenerateJumpTable(); - bool GenerateSafepointTable(); - - // Generates the custom OSR entrypoint and sets the osr_pc_offset. - void GenerateOsrPrologue(); - - enum SafepointMode { - RECORD_SIMPLE_SAFEPOINT, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS - }; - - void CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr); - - void CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode); - - void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - - void CallRuntime(Runtime::FunctionId id, - int argc, - LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, argc, instr); - } - - void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, instr); - } - - void CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context); - - void LoadContextFromDeferred(LOperand* context); - - void PrepareForTailCall(const ParameterCount& actual, Register scratch1, - Register scratch2, Register scratch3); - - // Generate a direct call to a known function. Expects the function - // to be in edi. - void CallKnownFunction(Handle function, - int formal_parameter_count, int arity, - bool is_tail_call, LInstruction* instr); - - void RecordSafepointWithLazyDeopt(LInstruction* instr, - SafepointMode safepoint_mode); - - void RegisterEnvironmentForDeoptimization(LEnvironment* environment, - Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason, - Deoptimizer::BailoutType bailout_type); - void DeoptimizeIf(Condition cc, LInstruction* instr, - DeoptimizeReason deopt_reason); - - bool DeoptEveryNTimes() { - return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); - } - - void AddToTranslation(LEnvironment* environment, - Translation* translation, - LOperand* op, - bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); - - Register ToRegister(int index) const; - X87Register ToX87Register(int index) const; - int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; - int32_t ToInteger32(LConstantOperand* op) const; - ExternalReference ToExternalReference(LConstantOperand* op) const; - - Operand BuildFastArrayOperand(LOperand* elements_pointer, - LOperand* key, - Representation key_representation, - ElementsKind elements_kind, - uint32_t base_offset); - - Operand BuildSeqStringOperand(Register string, - LOperand* index, - String::Encoding encoding); - - void EmitIntegerMathAbs(LMathAbs* instr); - - // Support for recording safepoint information. - void RecordSafepoint(LPointerMap* pointers, - Safepoint::Kind kind, - int arguments, - Safepoint::DeoptMode mode); - void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); - void RecordSafepoint(Safepoint::DeoptMode mode); - void RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); - - static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block); - - // EmitBranch expects to be the last instruction of a block. - template - void EmitBranch(InstrType instr, Condition cc); - template - void EmitTrueBranch(InstrType instr, Condition cc); - template - void EmitFalseBranch(InstrType instr, Condition cc); - void EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input, - Register temp, X87Register res_reg, - NumberUntagDMode mode); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input); - - // Emits optimized code for %_IsString(x). Preserves input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitIsString(Register input, - Register temp1, - Label* is_not_string, - SmiCheck check_needed); - - // Emits optimized code to deep-copy the contents of statically known - // object graphs (e.g. object literal boilerplate). - void EmitDeepCopy(Handle object, - Register result, - Register source, - int* offset, - AllocationSiteMode mode); - - void EnsureSpaceForLazyDeopt(int space_needed) override; - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); - - template - void EmitVectorLoadICRegisters(T* instr); - - void EmitReturn(LReturn* instr); - - // Emits code for pushing either a tagged constant, a (non-double) - // register, or a stack slot operand. - void EmitPushTaggedOperand(LOperand* operand); - - void X87Fld(Operand src, X87OperandType opts); - - void EmitFlushX87ForDeopt(); - void FlushX87StackIfNecessary(LInstruction* instr) { - x87_stack_.FlushIfNecessary(instr, this); - } - friend class LGapResolver; - -#ifdef _MSC_VER - // On windows, you may not access the stack more than one page below - // the most recently mapped page. To make the allocated area randomly - // accessible, we write an arbitrary value to each page in range - // esp + offset - page_size .. esp in turn. - void MakeSureStackPagesMapped(int offset); -#endif - - ZoneList jump_table_; - Scope* const scope_; - ZoneList deferred_; - bool frame_is_built_; - - class X87Stack : public ZoneObject { - public: - explicit X87Stack(MacroAssembler* masm) - : stack_depth_(0), is_mutable_(true), masm_(masm) { } - explicit X87Stack(const X87Stack& other) - : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) { - for (int i = 0; i < stack_depth_; i++) { - stack_[i] = other.stack_[i]; - } - } - bool operator==(const X87Stack& other) const { - if (stack_depth_ != other.stack_depth_) return false; - for (int i = 0; i < stack_depth_; i++) { - if (!stack_[i].is(other.stack_[i])) return false; - } - return true; - } - X87Stack& operator=(const X87Stack& other) { - stack_depth_ = other.stack_depth_; - for (int i = 0; i < stack_depth_; i++) { - stack_[i] = other.stack_[i]; - } - return *this; - } - bool Contains(X87Register reg); - void Fxch(X87Register reg, int other_slot = 0); - void Free(X87Register reg); - void PrepareToWrite(X87Register reg); - void CommitWrite(X87Register reg); - void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen); - void LeavingBlock(int current_block_id, LGoto* goto_instr, LCodeGen* cgen); - int depth() const { return stack_depth_; } - int GetLayout(); - int st(X87Register reg) { return st2idx(ArrayIndex(reg)); } - void pop() { - DCHECK(is_mutable_); - USE(is_mutable_); - stack_depth_--; - } - void push(X87Register reg) { - DCHECK(is_mutable_); - DCHECK(stack_depth_ < X87Register::kMaxNumAllocatableRegisters); - stack_[stack_depth_] = reg; - stack_depth_++; - } - - MacroAssembler* masm() const { return masm_; } - Isolate* isolate() const { return masm_->isolate(); } - - private: - int ArrayIndex(X87Register reg); - int st2idx(int pos); - - X87Register stack_[X87Register::kMaxNumAllocatableRegisters]; - int stack_depth_; - bool is_mutable_; - MacroAssembler* masm_; - }; - X87Stack x87_stack_; - // block_id -> X87Stack*; - typedef std::map X87StackMap; - X87StackMap x87_stack_map_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - // Compiler from a set of parallel moves to a sequential list of moves. - LGapResolver resolver_; - - Safepoint::Kind expected_safepoint_kind_; - - class PushSafepointRegistersScope final BASE_EMBEDDED { - public: - explicit PushSafepointRegistersScope(LCodeGen* codegen) - : codegen_(codegen) { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->masm_->PushSafepointRegisters(); - codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - DCHECK(codegen_->info()->is_calling()); - } - - ~PushSafepointRegistersScope() { - DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); - codegen_->masm_->PopSafepointRegisters(); - codegen_->expected_safepoint_kind_ = Safepoint::kSimple; - } - - private: - LCodeGen* codegen_; - }; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - friend class X87Stack; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode : public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_), - x87_stack_(x87_stack) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() {} - virtual void Generate() = 0; - virtual LInstruction* instr() = 0; - - void SetExit(Label* exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); } - int instruction_index() const { return instruction_index_; } - const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; - Label done_; - int instruction_index_; - LCodeGen::X87Stack x87_stack_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_ diff --git a/src/crankshaft/x87/lithium-gap-resolver-x87.cc b/src/crankshaft/x87/lithium-gap-resolver-x87.cc deleted file mode 100644 index 6bfc2e2a07..0000000000 --- a/src/crankshaft/x87/lithium-gap-resolver-x87.cc +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_X87 - -#include "src/crankshaft/x87/lithium-gap-resolver-x87.h" -#include "src/register-configuration.h" - -#include "src/crankshaft/x87/lithium-codegen-x87.h" - -namespace v8 { -namespace internal { - -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), - moves_(32, owner->zone()), - source_uses_(), - destination_uses_(), - spilled_register_(-1) {} - - -void LGapResolver::Resolve(LParallelMove* parallel_move) { - DCHECK(HasBeenReset()); - // Build up a worklist of moves. - BuildInitialMoveList(parallel_move); - - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands move = moves_[i]; - // Skip constants to perform them last. They don't block other moves - // and skipping such moves with register destinations keeps those - // registers free for the whole algorithm. - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { - PerformMove(i); - } - } - - // Perform the moves with constant sources. - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated()) { - DCHECK(moves_[i].source()->IsConstantOperand()); - EmitMove(i); - } - } - - Finish(); - DCHECK(HasBeenReset()); -} - - -void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { - // Perform a linear sweep of the moves to add them to the initial list of - // moves to perform, ignoring any move that is redundant (the source is - // the same as the destination, the destination is ignored and - // unallocated, or the move was already eliminated). - const ZoneList* moves = parallel_move->move_operands(); - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) AddMove(move); - } - Verify(); -} - - -void LGapResolver::PerformMove(int index) { - // Each call to this function performs a move and deletes it from the move - // graph. We first recursively perform any move blocking this one. We - // mark a move as "pending" on entry to PerformMove in order to detect - // cycles in the move graph. We use operand swaps to resolve cycles, - // which means that a call to PerformMove could change any source operand - // in the move graph. - - DCHECK(!moves_[index].IsPending()); - DCHECK(!moves_[index].IsRedundant()); - - // Clear this move's destination to indicate a pending move. The actual - // destination is saved on the side. - DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. - LOperand* destination = moves_[index].destination(); - moves_[index].set_destination(NULL); - - // Perform a depth-first traversal of the move graph to resolve - // dependencies. Any unperformed, unpending move with a source the same - // as this one's destination blocks this one so recursively perform all - // such moves. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination) && !other_move.IsPending()) { - // Though PerformMove can change any source operand in the move graph, - // this call cannot create a blocking move via a swap (this loop does - // not miss any). Assume there is a non-blocking move with source A - // and this move is blocked on source B and there is a swap of A and - // B. Then A and B must be involved in the same cycle (or they would - // not be swapped). Since this move's destination is B and there is - // only a single incoming edge to an operand, this move must also be - // involved in the same cycle. In that case, the blocking move will - // be created but will be "pending" when we return from PerformMove. - PerformMove(i); - } - } - - // We are about to resolve this move and don't need it marked as - // pending, so restore its destination. - moves_[index].set_destination(destination); - - // This move's source may have changed due to swaps to resolve cycles and - // so it may now be the last move in the cycle. If so remove it. - if (moves_[index].source()->Equals(destination)) { - RemoveMove(index); - return; - } - - // The move may be blocked on a (at most one) pending move, in which case - // we have a cycle. Search for such a blocking move and perform a swap to - // resolve it. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(destination)) { - DCHECK(other_move.IsPending()); - EmitSwap(index); - return; - } - } - - // This move is not blocked. - EmitMove(index); -} - - -void LGapResolver::AddMove(LMoveOperands move) { - LOperand* source = move.source(); - if (source->IsRegister()) ++source_uses_[source->index()]; - - LOperand* destination = move.destination(); - if (destination->IsRegister()) ++destination_uses_[destination->index()]; - - moves_.Add(move, cgen_->zone()); -} - - -void LGapResolver::RemoveMove(int index) { - LOperand* source = moves_[index].source(); - if (source->IsRegister()) { - --source_uses_[source->index()]; - DCHECK(source_uses_[source->index()] >= 0); - } - - LOperand* destination = moves_[index].destination(); - if (destination->IsRegister()) { - --destination_uses_[destination->index()]; - DCHECK(destination_uses_[destination->index()] >= 0); - } - - moves_[index].Eliminate(); -} - - -int LGapResolver::CountSourceUses(LOperand* operand) { - int count = 0; - for (int i = 0; i < moves_.length(); ++i) { - if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { - ++count; - } - } - return count; -} - - -Register LGapResolver::GetFreeRegisterNot(Register reg) { - int skip_index = reg.is(no_reg) ? -1 : reg.code(); - const RegisterConfiguration* config = RegisterConfiguration::Crankshaft(); - for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { - int code = config->GetAllocatableGeneralCode(i); - if (source_uses_[code] == 0 && destination_uses_[code] > 0 && - code != skip_index) { - return Register::from_code(code); - } - } - return no_reg; -} - - -bool LGapResolver::HasBeenReset() { - if (!moves_.is_empty()) return false; - if (spilled_register_ >= 0) return false; - const RegisterConfiguration* config = RegisterConfiguration::Crankshaft(); - for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { - int code = config->GetAllocatableGeneralCode(i); - if (source_uses_[code] != 0) return false; - if (destination_uses_[code] != 0) return false; - } - return true; -} - - -void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_DCHECKS - // No operand should be the destination for more than one move. - for (int i = 0; i < moves_.length(); ++i) { - LOperand* destination = moves_[i].destination(); - for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_DCHECK(!destination->Equals(moves_[j].destination())); - } - } -#endif -} - - -#define __ ACCESS_MASM(cgen_->masm()) - -void LGapResolver::Finish() { - if (spilled_register_ >= 0) { - __ pop(Register::from_code(spilled_register_)); - spilled_register_ = -1; - } - moves_.Rewind(0); -} - - -void LGapResolver::EnsureRestored(LOperand* operand) { - if (operand->IsRegister() && operand->index() == spilled_register_) { - __ pop(Register::from_code(spilled_register_)); - spilled_register_ = -1; - } -} - - -Register LGapResolver::EnsureTempRegister() { - // 1. We may have already spilled to create a temp register. - if (spilled_register_ >= 0) { - return Register::from_code(spilled_register_); - } - - // 2. We may have a free register that we can use without spilling. - Register free = GetFreeRegisterNot(no_reg); - if (!free.is(no_reg)) return free; - - // 3. Prefer to spill a register that is not used in any remaining move - // because it will not need to be restored until the end. - const RegisterConfiguration* config = RegisterConfiguration::Crankshaft(); - for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { - int code = config->GetAllocatableGeneralCode(i); - if (source_uses_[code] == 0 && destination_uses_[code] == 0) { - Register scratch = Register::from_code(code); - __ push(scratch); - spilled_register_ = code; - return scratch; - } - } - - // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. - spilled_register_ = config->GetAllocatableGeneralCode(0); - Register scratch = Register::from_code(spilled_register_); - __ push(scratch); - return scratch; -} - - -void LGapResolver::EmitMove(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - EnsureRestored(source); - EnsureRestored(destination); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - if (source->IsRegister()) { - DCHECK(destination->IsRegister() || destination->IsStackSlot()); - Register src = cgen_->ToRegister(source); - Operand dst = cgen_->ToOperand(destination); - __ mov(dst, src); - - } else if (source->IsStackSlot()) { - DCHECK(destination->IsRegister() || destination->IsStackSlot()); - Operand src = cgen_->ToOperand(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - __ mov(dst, src); - } else { - // Spill on demand to use a temporary register for memory-to-memory - // moves. - Register tmp = EnsureTempRegister(); - Operand dst = cgen_->ToOperand(destination); - __ mov(tmp, src); - __ mov(dst, tmp); - } - - } else if (source->IsConstantOperand()) { - LConstantOperand* constant_source = LConstantOperand::cast(source); - if (destination->IsRegister()) { - Register dst = cgen_->ToRegister(destination); - Representation r = cgen_->IsSmi(constant_source) - ? Representation::Smi() : Representation::Integer32(); - if (cgen_->IsInteger32(constant_source)) { - __ Move(dst, cgen_->ToImmediate(constant_source, r)); - } else { - __ LoadObject(dst, cgen_->ToHandle(constant_source)); - } - } else if (destination->IsDoubleRegister()) { - double v = cgen_->ToDouble(constant_source); - uint64_t int_val = bit_cast(v); - int32_t lower = static_cast(int_val); - int32_t upper = static_cast(int_val >> kBitsPerInt); - __ push(Immediate(upper)); - __ push(Immediate(lower)); - X87Register dst = cgen_->ToX87Register(destination); - cgen_->X87Mov(dst, MemOperand(esp, 0)); - __ add(esp, Immediate(kDoubleSize)); - } else { - DCHECK(destination->IsStackSlot()); - Operand dst = cgen_->ToOperand(destination); - Representation r = cgen_->IsSmi(constant_source) - ? Representation::Smi() : Representation::Integer32(); - if (cgen_->IsInteger32(constant_source)) { - __ Move(dst, cgen_->ToImmediate(constant_source, r)); - } else { - Register tmp = EnsureTempRegister(); - __ LoadObject(tmp, cgen_->ToHandle(constant_source)); - __ mov(dst, tmp); - } - } - - } else if (source->IsDoubleRegister()) { - // load from the register onto the stack, store in destination, which must - // be a double stack slot in the non-SSE2 case. - if (destination->IsDoubleStackSlot()) { - Operand dst = cgen_->ToOperand(destination); - X87Register src = cgen_->ToX87Register(source); - cgen_->X87Mov(dst, src); - } else { - X87Register dst = cgen_->ToX87Register(destination); - X87Register src = cgen_->ToX87Register(source); - cgen_->X87Mov(dst, src); - } - } else if (source->IsDoubleStackSlot()) { - // load from the stack slot on top of the floating point stack, and then - // store in destination. If destination is a double register, then it - // represents the top of the stack and nothing needs to be done. - if (destination->IsDoubleStackSlot()) { - Register tmp = EnsureTempRegister(); - Operand src0 = cgen_->ToOperand(source); - Operand src1 = cgen_->HighOperand(source); - Operand dst0 = cgen_->ToOperand(destination); - Operand dst1 = cgen_->HighOperand(destination); - __ mov(tmp, src0); // Then use tmp to copy source to destination. - __ mov(dst0, tmp); - __ mov(tmp, src1); - __ mov(dst1, tmp); - } else { - Operand src = cgen_->ToOperand(source); - X87Register dst = cgen_->ToX87Register(destination); - cgen_->X87Mov(dst, src); - } - } else { - UNREACHABLE(); - } - - RemoveMove(index); -} - - -void LGapResolver::EmitSwap(int index) { - LOperand* source = moves_[index].source(); - LOperand* destination = moves_[index].destination(); - EnsureRestored(source); - EnsureRestored(destination); - - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - if (source->IsRegister() && destination->IsRegister()) { - // Register-register. - Register src = cgen_->ToRegister(source); - Register dst = cgen_->ToRegister(destination); - __ xchg(dst, src); - - } else if ((source->IsRegister() && destination->IsStackSlot()) || - (source->IsStackSlot() && destination->IsRegister())) { - // Register-memory. Use a free register as a temp if possible. Do not - // spill on demand because the simple spill implementation cannot avoid - // spilling src at this point. - Register tmp = GetFreeRegisterNot(no_reg); - Register reg = - cgen_->ToRegister(source->IsRegister() ? source : destination); - Operand mem = - cgen_->ToOperand(source->IsRegister() ? destination : source); - if (tmp.is(no_reg)) { - __ xor_(reg, mem); - __ xor_(mem, reg); - __ xor_(reg, mem); - } else { - __ mov(tmp, mem); - __ mov(mem, reg); - __ mov(reg, tmp); - } - - } else if (source->IsStackSlot() && destination->IsStackSlot()) { - // Memory-memory. Spill on demand to use a temporary. If there is a - // free register after that, use it as a second temporary. - Register tmp0 = EnsureTempRegister(); - Register tmp1 = GetFreeRegisterNot(tmp0); - Operand src = cgen_->ToOperand(source); - Operand dst = cgen_->ToOperand(destination); - if (tmp1.is(no_reg)) { - // Only one temp register available to us. - __ mov(tmp0, dst); - __ xor_(tmp0, src); - __ xor_(src, tmp0); - __ xor_(tmp0, src); - __ mov(dst, tmp0); - } else { - __ mov(tmp0, dst); - __ mov(tmp1, src); - __ mov(dst, tmp1); - __ mov(src, tmp0); - } - } else { - // No other combinations are possible. - UNREACHABLE(); - } - - // The swap of source and destination has executed a move from source to - // destination. - RemoveMove(index); - - // Any unperformed (including pending) move with a source of either - // this move's source or destination needs to have their source - // changed to reflect the state of affairs after the swap. - for (int i = 0; i < moves_.length(); ++i) { - LMoveOperands other_move = moves_[i]; - if (other_move.Blocks(source)) { - moves_[i].set_source(destination); - } else if (other_move.Blocks(destination)) { - moves_[i].set_source(source); - } - } - - // In addition to swapping the actual uses as sources, we need to update - // the use counts. - if (source->IsRegister() && destination->IsRegister()) { - int temp = source_uses_[source->index()]; - source_uses_[source->index()] = source_uses_[destination->index()]; - source_uses_[destination->index()] = temp; - } else if (source->IsRegister()) { - // We don't have use counts for non-register operands like destination. - // Compute those counts now. - source_uses_[source->index()] = CountSourceUses(source); - } else if (destination->IsRegister()) { - source_uses_[destination->index()] = CountSourceUses(destination); - } -} - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_X87 diff --git a/src/crankshaft/x87/lithium-gap-resolver-x87.h b/src/crankshaft/x87/lithium-gap-resolver-x87.h deleted file mode 100644 index 6b6e2e64b6..0000000000 --- a/src/crankshaft/x87/lithium-gap-resolver-x87.h +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_ -#define V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_ - -#include "src/crankshaft/lithium.h" - -namespace v8 { -namespace internal { - -class LCodeGen; -class LGapResolver; - -class LGapResolver final BASE_EMBEDDED { - public: - explicit LGapResolver(LCodeGen* owner); - - // Resolve a set of parallel moves, emitting assembler instructions. - void Resolve(LParallelMove* parallel_move); - - private: - // Build the initial list of moves. - void BuildInitialMoveList(LParallelMove* parallel_move); - - // Perform the move at the moves_ index in question (possibly requiring - // other moves to satisfy dependencies). - void PerformMove(int index); - - // Emit any code necessary at the end of a gap move. - void Finish(); - - // Add or delete a move from the move graph without emitting any code. - // Used to build up the graph and remove trivial moves. - void AddMove(LMoveOperands move); - void RemoveMove(int index); - - // Report the count of uses of operand as a source in a not-yet-performed - // move. Used to rebuild use counts. - int CountSourceUses(LOperand* operand); - - // Emit a move and remove it from the move graph. - void EmitMove(int index); - - // Execute a move by emitting a swap of two operands. The move from - // source to destination is removed from the move graph. - void EmitSwap(int index); - - // Ensure that the given operand is not spilled. - void EnsureRestored(LOperand* operand); - - // Return a register that can be used as a temp register, spilling - // something if necessary. - Register EnsureTempRegister(); - - // Return a known free register different from the given one (which could - // be no_reg---returning any free register), or no_reg if there is no such - // register. - Register GetFreeRegisterNot(Register reg); - - // Verify that the state is the initial one, ready to resolve a single - // parallel move. - bool HasBeenReset(); - - // Verify the move list before performing moves. - void Verify(); - - LCodeGen* cgen_; - - // List of moves not yet resolved. - ZoneList moves_; - - // Source and destination use counts for the general purpose registers. - int source_uses_[Register::kNumRegisters]; - int destination_uses_[DoubleRegister::kMaxNumRegisters]; - - // If we had to spill on demand, the currently spilled register's - // allocation index. - int spilled_register_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_ diff --git a/src/crankshaft/x87/lithium-x87.cc b/src/crankshaft/x87/lithium-x87.cc deleted file mode 100644 index be0f6834a1..0000000000 --- a/src/crankshaft/x87/lithium-x87.cc +++ /dev/null @@ -1,2453 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/x87/lithium-x87.h" - -#include - -#if V8_TARGET_ARCH_X87 - -#include "src/crankshaft/lithium-inl.h" -#include "src/crankshaft/x87/lithium-codegen-x87.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - - -#ifdef DEBUG -void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. - DCHECK(Output() == NULL || - LUnallocated::cast(Output())->HasFixedPolicy() || - !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() || - operand->IsUsedAtStart()); - } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); - } -} -#endif - - -bool LInstruction::HasDoubleRegisterResult() { - return HasResult() && result()->IsDoubleRegister(); -} - - -bool LInstruction::HasDoubleRegisterInput() { - for (int i = 0; i < InputCount(); i++) { - LOperand* op = InputAt(i); - if (op != NULL && op->IsDoubleRegister()) { - return true; - } - } - return false; -} - - -bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) { - for (int i = 0; i < InputCount(); i++) { - LOperand* op = InputAt(i); - if (op != NULL && op->IsDoubleRegister()) { - if (cgen->ToX87Register(op).is(reg)) return true; - } - } - return false; -} - - -void LInstruction::PrintTo(StringStream* stream) { - stream->Add("%s ", this->Mnemonic()); - - PrintOutputOperandTo(stream); - - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LInstruction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - for (int i = 0; i < InputCount(); i++) { - if (i > 0) stream->Add(" "); - if (InputAt(i) == NULL) { - stream->Add("NULL"); - } else { - InputAt(i)->PrintTo(stream); - } - } -} - - -void LInstruction::PrintOutputOperandTo(StringStream* stream) { - if (HasResult()) result()->PrintTo(stream); -} - - -void LLabel::PrintDataTo(StringStream* stream) { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-d"; - case Token::SUB: return "sub-d"; - case Token::MUL: return "mul-d"; - case Token::DIV: return "div-d"; - case Token::MOD: return "mod-d"; - default: - UNREACHABLE(); - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-t"; - case Token::SUB: return "sub-t"; - case Token::MUL: return "mul-t"; - case Token::MOD: return "mod-t"; - case Token::DIV: return "div-t"; - case Token::BIT_AND: return "bit-and-t"; - case Token::BIT_OR: return "bit-or-t"; - case Token::BIT_XOR: return "bit-xor-t"; - case Token::ROR: return "ror-t"; - case Token::SHL: return "sal-t"; - case Token::SAR: return "sar-t"; - case Token::SHR: return "shr-t"; - default: - UNREACHABLE(); - } -} - - -bool LGoto::HasInterestingComment(LCodeGen* gen) const { - return !gen->IsNextEmittedBlock(block_id()); -} - - -void LGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", block_id()); -} - - -void LBranch::PrintDataTo(StringStream* stream) { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - value()->PrintTo(stream); -} - - -void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsStringAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_string("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_smi("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if string_compare("); - left()->PrintTo(stream); - right()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if has_instance_type("); - value()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if class_of_test("); - value()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), - true_block_id(), false_block_id()); -} - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if typeof "); - value()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - hydrogen()->type_literal()->ToCString().get(), - true_block_id(), false_block_id()); -} - - -void LStoreCodeEntry::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - function()->PrintTo(stream); - stream->Add(".code_entry = "); - code_object()->PrintTo(stream); -} - - -void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { - stream->Add(" = "); - base_object()->PrintTo(stream); - stream->Add(" + "); - offset()->PrintTo(stream); -} - - -void LCallWithDescriptor::PrintDataTo(StringStream* stream) { - for (int i = 0; i < InputCount(); i++) { - InputAt(i)->PrintTo(stream); - stream->Add(" "); - } - stream->Add("#%d / ", arity()); -} - - -void LLoadContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d]", slot_index()); -} - - -void LStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintTo(stream); - stream->Add("[%d] <- ", slot_index()); - value()->PrintTo(stream); -} - - -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - context()->PrintTo(stream); - stream->Add(" "); - function()->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add("= "); - context()->PrintTo(stream); - stream->Add(" "); - constructor()->PrintTo(stream); - stream->Add(" #%d / ", arity()); - ElementsKind kind = hydrogen()->elements_kind(); - stream->Add(" (%s) ", ElementsKindToString(kind)); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintTo(stream); - - stream->Add(" length "); - length()->PrintTo(stream); - - stream->Add(" index "); - index()->PrintTo(stream); -} - - -int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { - // Skip a slot if for a double-width slot. - if (kind == DOUBLE_REGISTERS) { - current_frame_slots_++; - current_frame_slots_ |= 1; - num_double_slots_++; - } - return current_frame_slots_++; -} - - -LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { - int index = GetNextSpillIndex(kind); - if (kind == DOUBLE_REGISTERS) { - return LDoubleStackSlot::Create(index, zone()); - } else { - DCHECK(kind == GENERAL_REGISTERS); - return LStackSlot::Create(index, zone()); - } -} - - -void LStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - std::ostringstream os; - os << hydrogen()->access() << " <- "; - stream->Add(os.str().c_str()); - value()->PrintTo(stream); -} - - -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", base_offset()); - } else { - stream->Add("]"); - } -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", base_offset()); - } else { - stream->Add("] <- "); - } - - if (value() == NULL) { - DCHECK(hydrogen()->IsConstantHoleStore() && - hydrogen()->value()->representation().IsDouble()); - stream->Add(""); - } else { - value()->PrintTo(stream); - } -} - - -void LTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); -} - - -LPlatformChunk* LChunkBuilder::Build() { - DCHECK(is_unused()); - chunk_ = new(zone()) LPlatformChunk(info(), graph()); - LPhase phase("L_Building chunk", chunk_); - status_ = BUILDING; - - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code()); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) { - return new (zone()) - LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code()); -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE)); -} - - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new(zone()) LUnallocated(LUnallocated::NONE, - LUnallocated::USED_AT_START)); -} - - -static inline bool CanBeImmediateConstant(HValue* value) { - return value->IsConstant() && HConstant::cast(value)->NotInNewSpace(); -} - - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - - -LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value, - Register fixed_register) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseFixed(value, fixed_register); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return CanBeImmediateConstant(value) - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - - -LOperand* LChunkBuilder::UseConstant(HValue* value) { - return chunk_->DefineConstantOperand(HConstant::cast(value)); -} - - -LOperand* LChunkBuilder::UseAny(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - operand->set_virtual_register(value->id()); - return operand; -} - - -LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result) { - result->set_virtual_register(current_instruction_->id()); - instr->set_result(result); - return instr; -} - - -LInstruction* LChunkBuilder::DefineAsRegister( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled( - LTemplateResultInstruction<1>* instr, - int index) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst( - LTemplateResultInstruction<1>* instr) { - return Define(instr, - new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, - X87Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env); -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - info()->MarkAsNonDeferredCalling(); - -#ifdef DEBUG - instr->VerifyCall(); -#endif - instr->MarkAsCall(); - instr = AssignPointerMap(instr); - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = - (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || - !hinstr->HasObservableSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - // We can't really figure out if the environment is needed or not. - instr->environment()->set_has_been_used(); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - DCHECK(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(zone())); - return instr; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = - new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - int vreg = allocator_->GetVirtualRegister(); - if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); - vreg = 0; - } - operand->set_virtual_register(vreg); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - DCHECK(operand->HasFixedPolicy()); - return operand; -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new(zone()) LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { - return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new(zone()) LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); - } - } else { - right = UseFixed(right_value, ecx); - } - - // Shift operations can only deoptimize if we do a logical shift by 0 and - // the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } - - LInstruction* result = - DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; - } else { - return DoArithmeticT(op, instr); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - if (op == Token::MOD) { - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return MarkAsCall(DefineSameAsFirst(result), instr); - } else { - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineSameAsFirst(result); - } -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HBinaryOperation* instr) { - HValue* left = instr->left(); - HValue* right = instr->right(); - DCHECK(left->representation().IsTagged()); - DCHECK(right->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left_operand = UseFixed(left, edx); - LOperand* right_operand = UseFixed(right, eax); - LArithmeticT* result = - new(zone()) LArithmeticT(op, context, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, eax), instr); -} - - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - DCHECK(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - DCHECK(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - DCHECK(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - DCHECK(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - DCHECK(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - if (phi->HasMergedIndex()) { - last_environment->SetValueAt(phi->merged_index(), phi); - } - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - if (block->deleted_phis()->at(i) < last_environment->length()) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - - LInstruction* instr = NULL; - if (current->CanReplaceWithDummyUses()) { - if (current->OperandCount() == 0) { - instr = DefineAsRegister(new(zone()) LDummy()); - } else { - DCHECK(!current->OperandAt(0)->IsControlInstruction()); - instr = DefineAsRegister(new(zone()) - LDummyUse(UseAny(current->OperandAt(0)))); - } - for (int i = 1; i < current->OperandCount(); ++i) { - if (current->OperandAt(i)->IsControlInstruction()) continue; - LInstruction* dummy = - new(zone()) LDummyUse(UseAny(current->OperandAt(i))); - dummy->set_hydrogen_value(current); - chunk_->AddInstruction(dummy, current_block_); - } - } else { - HBasicBlock* successor; - if (current->IsControlInstruction() && - HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && - successor != NULL) { - // Always insert a fpu register barrier here when branch is optimized to - // be a direct goto. - // TODO(weiliang): require a better solution. - if (!current->IsGoto()) { - LClobberDoubles* clobber = new (zone()) LClobberDoubles(isolate()); - clobber->set_hydrogen_value(current); - chunk_->AddInstruction(clobber, current_block_); - } - instr = new(zone()) LGoto(successor); - } else { - instr = current->CompileToLithium(this); - } - } - - argument_count_ += current->argument_delta(); - DCHECK(argument_count_ >= 0); - - if (instr != NULL) { - AddInstruction(instr, current); - } - - current_instruction_ = old_current; -} - - -void LChunkBuilder::AddInstruction(LInstruction* instr, - HInstruction* hydrogen_val) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(hydrogen_val); - -#if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && - instr->ClobbersDoubleRegisters(isolate()))) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - DCHECK(fixed == 0 || used_at_start == 0); - } -#endif - - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - if (instr->IsGoto() && - (LGoto::cast(instr)->jumps_to_join() || next_block_->is_osr_entry())) { - // TODO(olivf) Since phis of spilled values are joined as registers - // (not in the stack slot), we need to allow the goto gaps to keep one - // x87 register alive. To ensure all other values are still spilled, we - // insert a fpu register barrier right before. - LClobberDoubles* clobber = new(zone()) LClobberDoubles(isolate()); - clobber->set_hydrogen_value(hydrogen_val); - chunk_->AddInstruction(clobber, current_block_); - } - chunk_->AddInstruction(instr, current_block_); - - CreateLazyBailoutForCall(current_block_, instr, hydrogen_val); -} - - -LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) { - LInstruction* result = new (zone()) LPrologue(); - if (info_->scope()->NeedsContext()) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()); -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - Representation r = value->representation(); - HType type = value->type(); - ToBooleanHints expected = instr->expected_input_types(); - if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; - - bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || - type.IsJSArray() || type.IsHeapNumber() || type.IsString(); - LOperand* temp = !easy_case && (expected & ToBooleanHint::kNeedsMap) - ? TempRegister() - : NULL; - LInstruction* branch = - temp != NULL ? new (zone()) LBranch(UseRegister(value), temp) - : new (zone()) LBranch(UseRegisterAtStart(value), temp); - if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) && - (expected & ToBooleanHint::kNeedsMap)) || - expected != ToBooleanHint::kAny)) { - branch = AssignEnvironment(branch); - } - return branch; -} - - -LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { - return new(zone()) LDebugBreak(); -} - - -LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - return new(zone()) LCmpMapAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value()))); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - info()->MarkAsRequiresFrame(); - return DefineAsRegister(new(zone()) LArgumentsElements); -} - - -LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch( - HHasInPrototypeChainAndBranch* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* prototype = UseRegister(instr->prototype()); - LOperand* temp = TempRegister(); - LHasInPrototypeChainAndBranch* result = - new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { - LOperand* receiver = UseRegister(instr->receiver()); - LOperand* function = UseRegister(instr->function()); - LOperand* temp = TempRegister(); - LWrapReceiver* result = - new(zone()) LWrapReceiver(receiver, function, temp); - return AssignEnvironment(DefineSameAsFirst(result)); -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), edi); - LOperand* receiver = UseFixed(instr->receiver(), eax); - LOperand* length = UseFixed(instr->length(), ebx); - LOperand* elements = UseFixed(instr->elements(), ecx); - LApplyArguments* result = new(zone()) LApplyArguments(function, - receiver, - length, - elements); - return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { - int argc = instr->OperandCount(); - for (int i = 0; i < argc; ++i) { - LOperand* argument = UseAny(instr->argument(i)); - AddInstruction(new(zone()) LPushArgument(argument), instr); - } - return NULL; -} - - -LInstruction* LChunkBuilder::DoStoreCodeEntry( - HStoreCodeEntry* store_code_entry) { - LOperand* function = UseRegister(store_code_entry->function()); - LOperand* code_object = UseTempRegister(store_code_entry->code_object()); - return new(zone()) LStoreCodeEntry(function, code_object); -} - - -LInstruction* LChunkBuilder::DoInnerAllocatedObject( - HInnerAllocatedObject* instr) { - LOperand* base_object = UseRegisterAtStart(instr->base_object()); - LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); - return DefineAsRegister( - new(zone()) LInnerAllocatedObject(base_object, offset)); -} - - -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() - ? NULL - : DefineAsRegister(new(zone()) LThisFunction); -} - - -LInstruction* LChunkBuilder::DoContext(HContext* instr) { - if (instr->HasNoUses()) return NULL; - - if (info()->IsStub()) { - return DefineFixed(new(zone()) LContext, esi); - } - - return DefineAsRegister(new(zone()) LContext); -} - - -LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - LOperand* context = UseFixed(instr->context(), esi); - return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); -} - - -LInstruction* LChunkBuilder::DoCallWithDescriptor( - HCallWithDescriptor* instr) { - CallInterfaceDescriptor descriptor = instr->descriptor(); - DCHECK_EQ(descriptor.GetParameterCount() + - LCallWithDescriptor::kImplicitRegisterParameterCount, - instr->OperandCount()); - - LOperand* target = UseRegisterOrConstantAtStart(instr->target()); - ZoneList ops(instr->OperandCount(), zone()); - // Target - ops.Add(target, zone()); - // Context - LOperand* op = UseFixed(instr->OperandAt(1), esi); - ops.Add(op, zone()); - // Load register parameters. - int i = 0; - for (; i < descriptor.GetRegisterParameterCount(); i++) { - op = UseFixed(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount), - descriptor.GetRegisterParameter(i)); - ops.Add(op, zone()); - } - // Push stack parameters. - for (; i < descriptor.GetParameterCount(); i++) { - op = UseAny(instr->OperandAt( - i + LCallWithDescriptor::kImplicitRegisterParameterCount)); - AddInstruction(new (zone()) LPushArgument(op), instr); - } - - LCallWithDescriptor* result = new(zone()) LCallWithDescriptor( - descriptor, ops, zone()); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* function = UseFixed(instr->function(), edi); - LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); - if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) { - result->MarkAsSyntacticTailCall(); - } - return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathCos: - return DoMathCos(instr); - case kMathFloor: - return DoMathFloor(instr); - case kMathRound: - return DoMathRound(instr); - case kMathFround: - return DoMathFround(instr); - case kMathAbs: - return DoMathAbs(instr); - case kMathLog: - return DoMathLog(instr); - case kMathExp: - return DoMathExp(instr); - case kMathSqrt: - return DoMathSqrt(instr); - case kMathPowHalf: - return DoMathPowHalf(instr); - case kMathClz32: - return DoMathClz32(instr); - case kMathSin: - return DoMathSin(instr); - default: - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathFloor* result = new(zone()) LMathFloor(input); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LInstruction* result = DefineAsRegister(new (zone()) LMathRound(input)); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); - LMathFround* result = new (zone()) LMathFround(input); - return DefineSameAsFirst(result); -} - - -LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { - LOperand* context = UseAny(instr->context()); // Deferred use. - LOperand* input = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineSameAsFirst(new(zone()) LMathAbs(context, input)); - Representation r = instr->value()->representation(); - if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); - if (!r.IsDouble()) result = AssignEnvironment(result); - return result; -} - - -LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr); -} - - -LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathClz32* result = new(zone()) LMathClz32(input); - return DefineAsRegister(result); -} - -LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return MarkAsCall(DefineSameAsFirst(new (zone()) LMathCos(input)), instr); -} - -LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return MarkAsCall(DefineSameAsFirst(new (zone()) LMathSin(input)), instr); -} - -LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->value()->representation().IsDouble()); - LOperand* input = UseRegisterAtStart(instr->value()); - return MarkAsCall(DefineSameAsFirst(new (zone()) LMathExp(input)), instr); -} - - -LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LOperand* temp1 = FixedTemp(ecx); - LOperand* temp2 = FixedTemp(edx); - LMathSqrt* result = new(zone()) LMathSqrt(input, temp1, temp2); - return MarkAsCall(DefineSameAsFirst(result), instr); -} - - -LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathPowHalf* result = new (zone()) LMathPowHalf(input); - return DefineSameAsFirst(result); -} - - -LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* constructor = UseFixed(instr->constructor(), edi); - LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); - return MarkAsCall(DefineFixed(result, eax), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - LOperand* context = UseFixed(instr->context(), esi); - return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr); -} - - -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); - - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); - return DefineSameAsFirst(new(zone()) LBitI(left, right)); - } else { - return DoArithmeticT(instr->op(), instr); - } -} - - -LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || - (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && - divisor != 1 && divisor != -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(eax); - LOperand* temp2 = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LDivByConstI( - dividend, divisor, temp1, temp2), edx); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), eax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LDivI( - dividend, divisor, temp), eax); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanOverflow) || - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoDivByConstI(instr); - } else { - return DoDivI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I( - dividend, divisor)); - if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || - (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(eax); - LOperand* temp2 = FixedTemp(edx); - LOperand* temp3 = - ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || - (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? - NULL : TempRegister(); - LInstruction* result = - DefineFixed(new(zone()) LFlooringDivByConstI(dividend, - divisor, - temp1, - temp2, - temp3), - edx); - if (divisor == 0 || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), eax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LFlooringDivI( - dividend, divisor, temp), eax); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - if (instr->RightIsPowerOf2()) { - return DoFlooringDivByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoFlooringDivByConstI(instr); - } else { - return DoFlooringDivI(instr); - } -} - - -LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegisterAtStart(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( - dividend, divisor)); - if (instr->CheckFlag(HValue::kLeftCanBeNegative) && - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseRegister(instr->left()); - int32_t divisor = instr->right()->GetInteger32Constant(); - LOperand* temp1 = FixedTemp(eax); - LOperand* temp2 = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LModByConstI( - dividend, divisor, temp1, temp2), eax); - if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoModI(HMod* instr) { - DCHECK(instr->representation().IsSmiOrInteger32()); - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* dividend = UseFixed(instr->left(), eax); - LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = FixedTemp(edx); - LInstruction* result = DefineFixed(new(zone()) LModI( - dividend, divisor, temp), edx); - if (instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsSmiOrInteger32()) { - if (instr->RightIsPowerOf2()) { - return DoModByPowerOf2I(instr); - } else if (instr->right()->IsConstant()) { - return DoModByConstI(instr); - } else { - return DoModI(instr); - } - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MOD, instr); - } else { - return DoArithmeticT(Token::MOD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - HValue* h_right = instr->BetterRightOperand(); - LOperand* right = UseOrConstant(h_right); - LOperand* temp = NULL; - if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - temp = TempRegister(); - } - LMulI* mul = new(zone()) LMulI(left, right, temp); - int constant_value = - h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0; - // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls - // |DeoptimizeIf|. - bool needs_environment = - instr->CheckFlag(HValue::kCanOverflow) || - (instr->CheckFlag(HValue::kBailoutOnMinusZero) && - (!right->IsConstantOperand() || constant_value <= 0)); - if (needs_environment) { - AssignEnvironment(mul); - } - return DefineSameAsFirst(mul); - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - LSubI* sub = new(zone()) LSubI(left, right); - LInstruction* result = DefineSameAsFirst(sub); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - // Check to see if it would be advantageous to use an lea instruction rather - // than an add. This is the case when no overflow check is needed and there - // are multiple uses of the add's inputs, so using a 3-register add will - // preserve all input values for later uses. - bool use_lea = LAddI::UseLea(instr); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - HValue* right_candidate = instr->BetterRightOperand(); - LOperand* right = use_lea - ? UseRegisterOrConstantAtStart(right_candidate) - : UseOrConstantAtStart(right_candidate); - LAddI* add = new(zone()) LAddI(left, right); - bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - LInstruction* result = use_lea - ? DefineAsRegister(add) - : DefineSameAsFirst(add); - if (can_overflow) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::ADD, instr); - } else if (instr->representation().IsExternal()) { - DCHECK(instr->IsConsistentExternalRepresentation()); - DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); - bool use_lea = LAddI::UseLea(instr); - LOperand* left = UseRegisterAtStart(instr->left()); - HValue* right_candidate = instr->right(); - LOperand* right = use_lea - ? UseRegisterOrConstantAtStart(right_candidate) - : UseOrConstantAtStart(right_candidate); - LAddI* add = new(zone()) LAddI(left, right); - LInstruction* result = use_lea - ? DefineAsRegister(add) - : DefineSameAsFirst(add); - return result; - } else { - return DoArithmeticT(Token::ADD, instr); - } -} - - -LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { - LOperand* left = NULL; - LOperand* right = NULL; - LOperand* scratch = TempRegister(); - - if (instr->representation().IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(instr->representation())); - DCHECK(instr->right()->representation().Equals(instr->representation())); - left = UseRegisterAtStart(instr->BetterLeftOperand()); - right = UseOrConstantAtStart(instr->BetterRightOperand()); - } else { - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - LMathMinMax* minmax = new (zone()) LMathMinMax(left, right, scratch); - return DefineSameAsFirst(minmax); -} - - -LInstruction* LChunkBuilder::DoPower(HPower* instr) { - // Unlike ia32, we don't have a MathPowStub and directly call c function. - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LPower* result = new (zone()) LPower(left, right); - return MarkAsCall(DefineSameAsFirst(result), instr); -} - - -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - DCHECK(instr->left()->representation().IsSmiOrTagged()); - DCHECK(instr->right()->representation().IsSmiOrTagged()); - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left = UseFixed(instr->left(), edx); - LOperand* right = UseFixed(instr->right(), eax); - LCmpT* result = new(zone()) LCmpT(context, left, right); - return MarkAsCall(DefineFixed(result, eax), instr); -} - - -LInstruction* LChunkBuilder::DoCompareNumericAndBranch( - HCompareNumericAndBranch* instr) { - Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - DCHECK(instr->left()->representation().Equals(r)); - DCHECK(instr->right()->representation().Equals(r)); - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - return new(zone()) LCompareNumericAndBranch(left, right); - } else { - DCHECK(r.IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - DCHECK(instr->right()->representation().IsDouble()); - LOperand* left; - LOperand* right; - if (CanBeImmediateConstant(instr->left()) && - CanBeImmediateConstant(instr->right())) { - // The code generator requires either both inputs to be constant - // operands, or neither. - left = UseConstant(instr->left()); - right = UseConstant(instr->right()); - } else { - left = UseRegisterAtStart(instr->left()); - right = UseRegisterAtStart(instr->right()); - } - return new(zone()) LCompareNumericAndBranch(left, right); - } -} - - -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - return new(zone()) LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) LCmpHoleAndBranch(value); -} - - -LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - LOperand* temp = TempRegister(); - return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp); -} - - -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsSmiAndBranch(Use(instr->value())); -} - - -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LIsUndetectableAndBranch( - UseRegisterAtStart(instr->value()), TempRegister()); -} - - -LInstruction* LChunkBuilder::DoStringCompareAndBranch( - HStringCompareAndBranch* instr) { - DCHECK(instr->left()->representation().IsTagged()); - DCHECK(instr->right()->representation().IsTagged()); - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left = UseFixed(instr->left(), edx); - LOperand* right = UseFixed(instr->right(), eax); - - LStringCompareAndBranch* result = new(zone()) - LStringCompareAndBranch(context, left, right); - - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new(zone()) LHasInstanceTypeAndBranch( - UseRegisterAtStart(instr->value()), - TempRegister()); -} - -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { - DCHECK(instr->value()->representation().IsTagged()); - return new (zone()) LClassOfTestAndBranch(UseRegister(instr->value()), - TempRegister(), TempRegister()); -} - -LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index)); -} - - -LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) { - if (instr->encoding() == String::ONE_BYTE_ENCODING) { - if (FLAG_debug_code) { - return UseFixed(instr->value(), eax); - } else { - return UseFixedOrConstant(instr->value(), eax); - } - } else { - if (FLAG_debug_code) { - return UseRegisterAtStart(instr->value()); - } else { - return UseRegisterOrConstantAtStart(instr->value()); - } - } -} - - -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegisterAtStart(instr->string()); - LOperand* index = FLAG_debug_code - ? UseRegisterAtStart(instr->index()) - : UseRegisterOrConstantAtStart(instr->index()); - LOperand* value = GetSeqStringSetCharOperand(instr); - LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL; - LInstruction* result = new(zone()) LSeqStringSetChar(context, string, - index, value); - if (FLAG_debug_code) { - result = MarkAsCall(result, instr); - } - return result; -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - if (!FLAG_debug_code && instr->skip_check()) return NULL; - LOperand* index = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = !index->IsConstantOperand() - ? UseOrConstantAtStart(instr->length()) - : UseAtStart(instr->length()); - LInstruction* result = new(zone()) LBoundsCheck(index, length); - if (!FLAG_debug_code || !instr->skip_check()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { - // The control instruction marking the end of a block that completed - // abruptly (e.g., threw an exception). There is nothing specific to do. - return NULL; -} - - -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { - return NULL; -} - - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - HValue* val = instr->value(); - if (from.IsSmi()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - from = Representation::Tagged(); - } - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(val); - LOperand* temp = TempRegister(); - LInstruction* result = - DefineAsRegister(new(zone()) LNumberUntagD(value, temp)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - if (val->type().IsSmi()) { - return DefineSameAsFirst(new(zone()) LDummyUse(value)); - } - return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); - } else { - DCHECK(to.IsInteger32()); - if (val->type().IsSmi() || val->representation().IsSmi()) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LSmiUntag(value, false)); - } else { - LOperand* value = UseRegister(val); - LInstruction* result = DefineSameAsFirst(new(zone()) LTaggedToI(value)); - if (!val->representation().IsSmi()) result = AssignEnvironment(result); - return result; - } - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - info()->MarkAsDeferredCalling(); - LOperand* value = UseRegisterAtStart(val); - LOperand* temp = FLAG_inline_new ? TempRegister() : NULL; - LUnallocated* result_temp = TempRegister(); - LNumberTagD* result = new(zone()) LNumberTagD(value, temp); - return AssignPointerMap(Define(result, result_temp)); - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - return AssignEnvironment( - DefineAsRegister(new(zone()) LDoubleToSmi(value))); - } else { - DCHECK(to.IsInteger32()); - bool truncating = instr->CanTruncateToInt32(); - LOperand* value = UseRegister(val); - LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); - if (!truncating) result = AssignEnvironment(result); - return result; - } - } else if (from.IsInteger32()) { - info()->MarkAsDeferredCalling(); - if (to.IsTagged()) { - if (!instr->CheckFlag(HValue::kCanOverflow)) { - LOperand* value = UseRegister(val); - return DefineSameAsFirst(new(zone()) LSmiTag(value)); - } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegister(val); - LOperand* temp = TempRegister(); - LNumberTagU* result = new(zone()) LNumberTagU(value, temp); - return AssignPointerMap(DefineSameAsFirst(result)); - } else { - LOperand* value = UseRegister(val); - LOperand* temp = TempRegister(); - LNumberTagI* result = new(zone()) LNumberTagI(value, temp); - return AssignPointerMap(DefineSameAsFirst(result)); - } - } else if (to.IsSmi()) { - LOperand* value = UseRegister(val); - LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value)); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else { - DCHECK(to.IsDouble()); - if (val->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); - } else { - return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); - } - } - } - UNREACHABLE(); -} - - -LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { - LOperand* value = UseAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckNonSmi(value); - if (!instr->value()->type().IsHeapObject()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered( - HCheckArrayBufferNotNeutered* instr) { - LOperand* view = UseRegisterAtStart(instr->value()); - LOperand* scratch = TempRegister(); - LCheckArrayBufferNotNeutered* result = - new (zone()) LCheckArrayBufferNotNeutered(view, scratch); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { - // If the object is in new space, we'll emit a global cell compare and so - // want the value in a register. If the object gets promoted before we - // emit code, we will still get the register but will do an immediate - // compare instead of the cell compare. This is safe. - LOperand* value = instr->object_in_new_space() - ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckValue(value)); -} - - -LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); - if (instr->HasMigrationTarget()) { - info()->MarkAsDeferredCalling(); - result = AssignPointerMap(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - if (input_rep.IsDouble()) { - LOperand* reg = UseRegister(value); - return DefineFixed(new (zone()) LClampDToUint8(reg), eax); - } else if (input_rep.IsInteger32()) { - LOperand* reg = UseFixed(value, eax); - return DefineFixed(new(zone()) LClampIToUint8(reg), eax); - } else { - DCHECK(input_rep.IsSmiOrTagged()); - LOperand* value = UseRegister(instr->value()); - LClampTToUint8NoSSE2* res = - new(zone()) LClampTToUint8NoSSE2(value, TempRegister(), - TempRegister(), TempRegister()); - return AssignEnvironment(DefineFixed(res, ecx)); - } -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL; - LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn( - UseFixed(instr->value(), eax), context, parameter_count); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsSmi()) { - return DefineAsRegister(new(zone()) LConstantS); - } else if (r.IsInteger32()) { - return DefineAsRegister(new(zone()) LConstantI); - } else if (r.IsDouble()) { - return DefineAsRegister(new (zone()) LConstantD); - } else if (r.IsExternal()) { - return DefineAsRegister(new(zone()) LConstantE); - } else if (r.IsTagged()) { - return DefineAsRegister(new(zone()) LConstantT); - } else { - UNREACHABLE(); - } -} - - -LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - LOperand* context = UseRegisterAtStart(instr->value()); - LInstruction* result = - DefineAsRegister(new(zone()) LLoadContextSlot(context)); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* value; - LOperand* temp; - LOperand* context = UseRegister(instr->context()); - if (instr->NeedsWriteBarrier()) { - value = UseTempRegister(instr->value()); - temp = TempRegister(); - } else { - value = UseRegister(instr->value()); - temp = NULL; - } - LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp); - if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - LOperand* obj = (instr->access().IsExternalMemory() && - instr->access().offset() == 0) - ? UseRegisterOrConstantAtStart(instr->object()) - : UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LLoadNamedField(obj)); -} - - -LInstruction* LChunkBuilder::DoLoadFunctionPrototype( - HLoadFunctionPrototype* instr) { - return AssignEnvironment(DefineAsRegister( - new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()), - TempRegister()))); -} - - -LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { - return DefineAsRegister(new(zone()) LLoadRoot); -} - - -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - DCHECK(instr->key()->representation().IsSmiOrInteger32()); - ElementsKind elements_kind = instr->elements_kind(); - bool clobbers_key = ExternalArrayOpRequiresTemp( - instr->key()->representation(), elements_kind); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - LInstruction* result = NULL; - - if (!instr->is_fixed_typed_array()) { - LOperand* obj = UseRegisterAtStart(instr->elements()); - result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr)); - } else { - DCHECK( - (instr->representation().IsInteger32() && - !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) || - (instr->representation().IsDouble() && - (IsDoubleOrFloatElementsKind(instr->elements_kind())))); - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - result = DefineAsRegister( - new (zone()) LLoadKeyed(backing_store, key, backing_store_owner)); - } - - bool needs_environment; - if (instr->is_fixed_typed_array()) { - // see LCodeGen::DoLoadKeyedExternalArray - needs_environment = elements_kind == UINT32_ELEMENTS && - !instr->CheckFlag(HInstruction::kUint32); - } else { - // see LCodeGen::DoLoadKeyedFixedDoubleArray and - // LCodeGen::DoLoadKeyedFixedArray - needs_environment = - instr->RequiresHoleCheck() || - (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub()); - } - - if (needs_environment) { - result = AssignEnvironment(result); - } - return result; -} - - -LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - - // Determine if we need a byte register in this case for the value. - bool val_is_fixed_register = - elements_kind == UINT8_ELEMENTS || - elements_kind == INT8_ELEMENTS || - elements_kind == UINT8_CLAMPED_ELEMENTS; - if (val_is_fixed_register) { - return UseFixed(instr->value(), eax); - } - - if (IsDoubleOrFloatElementsKind(elements_kind)) { - return UseRegisterAtStart(instr->value()); - } - - return UseRegister(instr->value()); -} - - -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - if (!instr->is_fixed_typed_array()) { - DCHECK(instr->elements()->representation().IsTagged()); - DCHECK(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsSmi()); - - if (instr->value()->representation().IsDouble()) { - LOperand* object = UseRegisterAtStart(instr->elements()); - // For storing double hole, no fp register required. - LOperand* val = instr->IsConstantHoleStore() - ? NULL - : UseRegisterAtStart(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - return new (zone()) LStoreKeyed(object, key, val, nullptr); - } else { - DCHECK(instr->value()->representation().IsSmiOrTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - - LOperand* obj = UseRegister(instr->elements()); - LOperand* val; - LOperand* key; - if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - key = UseTempRegister(instr->key()); - } else { - val = UseRegisterOrConstantAtStart(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } - return new (zone()) LStoreKeyed(obj, key, val, nullptr); - } - } - - ElementsKind elements_kind = instr->elements_kind(); - DCHECK( - (instr->value()->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(elements_kind)) || - (instr->value()->representation().IsDouble() && - IsDoubleOrFloatElementsKind(elements_kind))); - DCHECK(instr->elements()->representation().IsExternal()); - - LOperand* backing_store = UseRegister(instr->elements()); - LOperand* val = GetStoreKeyedValueOperand(instr); - bool clobbers_key = ExternalArrayOpRequiresTemp( - instr->key()->representation(), elements_kind); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - LOperand* backing_store_owner = UseAny(instr->backing_store_owner()); - return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner); -} - - -LInstruction* LChunkBuilder::DoTransitionElementsKind( - HTransitionElementsKind* instr) { - if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { - LOperand* object = UseRegister(instr->object()); - LOperand* new_map_reg = TempRegister(); - LOperand* temp_reg = TempRegister(); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, NULL, - new_map_reg, temp_reg); - return result; - } else { - LOperand* object = UseFixed(instr->object(), eax); - LOperand* context = UseFixed(instr->context(), esi); - LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, context, NULL, NULL); - return MarkAsCall(result, instr); - } -} - - -LInstruction* LChunkBuilder::DoTrapAllocationMemento( - HTrapAllocationMemento* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* temp = TempRegister(); - LTrapAllocationMemento* result = - new(zone()) LTrapAllocationMemento(object, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { - info()->MarkAsDeferredCalling(); - LOperand* context = UseFixed(instr->context(), esi); - LOperand* object = Use(instr->object()); - LOperand* elements = Use(instr->elements()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity()); - - LMaybeGrowElements* result = new (zone()) - LMaybeGrowElements(context, object, elements, key, current_capacity); - DefineFixed(result, eax); - return AssignPointerMap(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool is_in_object = instr->access().IsInobject(); - bool is_external_location = instr->access().IsExternalMemory() && - instr->access().offset() == 0; - bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = instr->has_transition() && - instr->NeedsWriteBarrierForMap(); - - LOperand* obj; - if (needs_write_barrier) { - obj = is_in_object - ? UseRegister(instr->object()) - : UseTempRegister(instr->object()); - } else if (is_external_location) { - DCHECK(!is_in_object); - DCHECK(!needs_write_barrier); - DCHECK(!needs_write_barrier_for_map); - obj = UseRegisterOrConstant(instr->object()); - } else { - obj = needs_write_barrier_for_map - ? UseRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - } - - bool can_be_constant = instr->value()->IsConstant() && - HConstant::cast(instr->value())->NotInNewSpace() && - !instr->field_representation().IsDouble(); - - LOperand* val; - if (instr->field_representation().IsInteger8() || - instr->field_representation().IsUInteger8()) { - // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx). - // Just force the value to be in eax and we're safe here. - val = UseFixed(instr->value(), eax); - } else if (needs_write_barrier) { - val = UseTempRegister(instr->value()); - } else if (can_be_constant) { - val = UseRegisterOrConstant(instr->value()); - } else if (instr->field_representation().IsDouble()) { - val = UseRegisterAtStart(instr->value()); - } else { - val = UseRegister(instr->value()); - } - - // We only need a scratch register if we have a write barrier or we - // have a store into the properties array (not in-object-property). - LOperand* temp = (!is_in_object || needs_write_barrier || - needs_write_barrier_for_map) ? TempRegister() : NULL; - - // We need a temporary register for write barrier of the map field. - LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL; - - return new(zone()) LStoreNamedField(obj, val, temp, temp_map); -} - - -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left = UseFixed(instr->left(), edx); - LOperand* right = UseFixed(instr->right(), eax); - LStringAdd* string_add = new(zone()) LStringAdd(context, left, right); - return MarkAsCall(DefineFixed(string_add, eax), instr); -} - - -LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - LOperand* string = UseTempRegister(instr->string()); - LOperand* index = UseTempRegister(instr->index()); - LOperand* context = UseAny(instr->context()); - LStringCharCodeAt* result = - new(zone()) LStringCharCodeAt(context, string, index); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LOperand* context = UseAny(instr->context()); - LStringCharFromCode* result = - new(zone()) LStringCharFromCode(context, char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { - LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size()) - : UseRegister(instr->size()); - if (instr->IsAllocationFolded()) { - LOperand* temp = TempRegister(); - LFastAllocate* result = new (zone()) LFastAllocate(size, temp); - return DefineAsRegister(result); - } else { - info()->MarkAsDeferredCalling(); - LOperand* context = UseAny(instr->context()); - LOperand* temp = TempRegister(); - LAllocate* result = new (zone()) LAllocate(context, size, temp); - return AssignPointerMap(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - DCHECK(argument_count_ == 0); - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new(zone()) LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - LParameter* result = new(zone()) LParameter; - if (instr->kind() == HParameter::STACK_PARAMETER) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(result, spill_index); - } else { - DCHECK(info()->IsStub()); - CallInterfaceDescriptor descriptor = graph()->descriptor(); - int index = static_cast(instr->index()); - Register reg = descriptor.GetRegisterParameter(index); - return DefineFixed(result, reg); - } -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - // Use an index that corresponds to the location in the unoptimized frame, - // which the optimized frame will subsume. - int env_index = instr->index(); - int spill_index = 0; - if (instr->environment()->is_parameter_index(env_index)) { - spill_index = chunk()->GetParameterStackSlot(env_index); - } else { - spill_index = env_index - instr->environment()->first_local_index(); - if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Retry(kNotEnoughSpillSlotsForOsr); - spill_index = 0; - } - spill_index += StandardFrameConstants::kFixedSlotCount; - } - return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object. - // arguments.length and element access are supported directly on - // stack arguments, and any real arguments object use causes a bailout. - // So this value is never used. - return NULL; -} - - -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - - // There are no real uses of a captured object. - return NULL; -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - info()->MarkAsRequiresFrame(); - LOperand* args = UseRegister(instr->arguments()); - LOperand* length; - LOperand* index; - if (instr->length()->IsConstant() && instr->index()->IsConstant()) { - length = UseRegisterOrConstant(instr->length()); - index = UseOrConstant(instr->index()); - } else { - length = UseTempRegister(instr->length()); - index = Use(instr->index()); - } - return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* value = UseFixed(instr->value(), ebx); - LTypeof* result = new(zone()) LTypeof(context, value); - return MarkAsCall(DefineFixed(result, eax), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); -} - - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - instr->ReplayEnvironment(current_block_->last_environment()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - info()->MarkAsDeferredCalling(); - if (instr->is_function_entry()) { - LOperand* context = UseFixed(instr->context(), esi); - return MarkAsCall(new(zone()) LStackCheck(context), instr); - } else { - DCHECK(instr->is_backwards_branch()); - LOperand* context = UseAny(instr->context()); - return AssignEnvironment( - AssignPointerMap(new(zone()) LStackCheck(context))); - } -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - outer->set_ast_id(instr->ReturnId()); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining( - instr->closure(), instr->arguments_count(), instr->function(), undefined, - instr->inlining_kind(), instr->syntactic_tail_call_mode()); - // Only replay binding of arguments object if it wasn't removed from graph. - if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { - inner->Bind(instr->arguments_var(), instr->arguments_object()); - } - inner->BindContext(instr->closure_context()); - inner->set_entry(instr); - current_block_->UpdateEnvironment(inner); - return NULL; -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - LInstruction* pop = NULL; - - HEnvironment* env = current_block_->last_environment(); - - if (env->entry()->arguments_pushed()) { - int argument_count = env->arguments_environment()->parameter_count(); - pop = new(zone()) LDrop(argument_count); - DCHECK(instr->argument_delta() == -argument_count); - } - - HEnvironment* outer = current_block_->last_environment()-> - DiscardInlined(false); - current_block_->UpdateEnvironment(outer); - return pop; -} - - -LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { - LOperand* context = UseFixed(instr->context(), esi); - LOperand* object = UseFixed(instr->enumerable(), eax); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); - return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { - LOperand* map = UseRegister(instr->map()); - return AssignEnvironment(DefineAsRegister( - new(zone()) LForInCacheArray(map))); -} - - -LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* map = UseRegisterAtStart(instr->map()); - return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); -} - - -LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { - LOperand* object = UseRegister(instr->object()); - LOperand* index = UseTempRegister(instr->index()); - LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); - LInstruction* result = DefineSameAsFirst(load); - return AssignPointerMap(result); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_X87 diff --git a/src/crankshaft/x87/lithium-x87.h b/src/crankshaft/x87/lithium-x87.h deleted file mode 100644 index 220f0db3bb..0000000000 --- a/src/crankshaft/x87/lithium-x87.h +++ /dev/null @@ -1,2508 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CRANKSHAFT_X87_LITHIUM_X87_H_ -#define V8_CRANKSHAFT_X87_LITHIUM_X87_H_ - -#include "src/crankshaft/hydrogen.h" -#include "src/crankshaft/lithium.h" -#include "src/crankshaft/lithium-allocator.h" -#include "src/safepoint-table.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -namespace compiler { -class RCodeVisualizer; -} - -// Forward declarations. -class LCodeGen; - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallWithDescriptor) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CheckArrayBufferNotNeutered) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8NoSSE2) \ - V(ClassOfTestAndBranch) \ - V(ClobberDoubles) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(Context) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FastAllocate) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(Goto) \ - V(HasInPrototypeChainAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadKeyed) \ - V(LoadNamedField) \ - V(LoadRoot) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathCos) \ - V(MathExp) \ - V(MathFloor) \ - V(MathFround) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(MaybeGrowElements) \ - V(MathSin) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(Prologue) \ - V(PushArgument) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreKeyed) \ - V(StoreNamedField) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ - V(WrapReceiver) - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - Opcode opcode() const final { return LInstruction::k##type; } \ - void CompileToNative(LCodeGen* generator) final; \ - const char* Mnemonic() const final { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - DCHECK(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { \ - return H##type::cast(hydrogen_value()); \ - } - - -class LInstruction : public ZoneObject { - public: - LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) { - } - - virtual ~LInstruction() {} - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - - enum Opcode { - // Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter, - kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - - // Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } - - virtual bool IsControl() const { return false; } - - // Try deleting this instruction if possible. - virtual bool TryDelete() { return false; } - - void set_environment(LEnvironment* env) { environment_ = env; } - LEnvironment* environment() const { return environment_; } - bool HasEnvironment() const { return environment_ != NULL; } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } - - void MarkAsSyntacticTailCall() { - bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true); - } - bool IsSyntacticTailCall() const { - return IsSyntacticTailCallBits::decode(bit_field_); - } - - // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { - return IsCall() || - // We only have rudimentary X87Stack tracking, thus in general - // cannot handle phi-nodes. - (IsControl()); - } - - virtual bool HasResult() const = 0; - virtual LOperand* result() const = 0; - - bool HasDoubleRegisterResult(); - bool HasDoubleRegisterInput(); - bool IsDoubleInput(X87Register reg, LCodeGen* cgen); - - LOperand* FirstInput() { return InputAt(0); } - LOperand* Output() { return HasResult() ? result() : NULL; } - - virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } - -#ifdef DEBUG - void VerifyCall(); -#endif - - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; - - private: - // Iterator support. - friend class InputIterator; - - friend class TempIterator; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - - class IsCallBits: public BitField {}; - class IsSyntacticTailCallBits : public BitField { - }; - - LEnvironment* environment_; - SetOncePointer pointer_map_; - HValue* hydrogen_value_; - int bit_field_; -}; - - -// R = number of result operands (0 or 1). -template -class LTemplateResultInstruction : public LInstruction { - public: - // Allow 0 or 1 output operands. - STATIC_ASSERT(R == 0 || R == 1); - bool HasResult() const final { return R != 0 && result() != NULL; } - void set_result(LOperand* operand) { results_[0] = operand; } - LOperand* result() const override { return results_[0]; } - - protected: - EmbeddedContainer results_; -}; - - -// R = number of result operands (0 or 1). -// I = number of input operands. -// T = number of temporary operands. -template -class LTemplateInstruction : public LTemplateResultInstruction { - protected: - EmbeddedContainer inputs_; - EmbeddedContainer temps_; - - private: - // Iterator support. - int InputCount() final { return I; } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return T; } - LOperand* TempAt(int i) final { return temps_[i]; } -}; - - -class LGap : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGap(HBasicBlock* block) : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - // Can't use the DECLARE-macro here because of sub-classes. - bool IsGap() const final { return true; } - void PrintDataTo(StringStream* stream) override; - static LGap* cast(LInstruction* instr) { - DCHECK(instr->IsGap()); - return reinterpret_cast(instr); - } - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { - if (parallel_moves_[pos] == NULL) { - parallel_moves_[pos] = new(zone) LParallelMove(zone); - } - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LInstructionGap final : public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override { - return !IsRedundant(); - } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - -class LClobberDoubles final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LClobberDoubles(Isolate* isolate) { } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d") -}; - - -class LGoto final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LGoto(HBasicBlock* block) : block_(block) { } - - bool HasInterestingComment(LCodeGen* gen) const override; - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - void PrintDataTo(StringStream* stream) override; - bool IsControl() const override { return true; } - - int block_id() const { return block_->block_id(); } - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return false; - } - - bool jumps_to_join() const { return block_->predecessors()->length() > 1; } - HBasicBlock* block() const { return block_; } - - private: - HBasicBlock* block_; -}; - - -class LPrologue final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue") -}; - - -class LLazyBailout final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") -}; - - -class LDummy final : public LTemplateInstruction<1, 0, 0> { - public: - LDummy() {} - DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") -}; - - -class LDummyUse final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDummyUse(LOperand* value) { - inputs_[0] = value; - } - DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") -}; - - -class LDeoptimize final : public LTemplateInstruction<0, 0, 0> { - public: - bool IsControl() const override { return true; } - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") - DECLARE_HYDROGEN_ACCESSOR(Deoptimize) -}; - - -class LLabel final : public LGap { - public: - explicit LLabel(HBasicBlock* block) - : LGap(block), replacement_(NULL) { } - - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - void PrintDataTo(StringStream* stream) override; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - bool is_osr_entry() const { return block()->is_osr_entry(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LParameter final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -template -class LControlInstruction: public LTemplateInstruction<0, I, T> { - public: - LControlInstruction() : false_label_(NULL), true_label_(NULL) { } - - bool IsControl() const final { return true; } - - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - - int TrueDestination(LChunk* chunk) { - return chunk->LookupDestination(true_block_id()); - } - int FalseDestination(LChunk* chunk) { - return chunk->LookupDestination(false_block_id()); - } - - Label* TrueLabel(LChunk* chunk) { - if (true_label_ == NULL) { - true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); - } - return true_label_; - } - Label* FalseLabel(LChunk* chunk) { - if (false_label_ == NULL) { - false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); - } - return false_label_; - } - - protected: - int true_block_id() { return SuccessorAt(0)->block_id(); } - int false_block_id() { return SuccessorAt(1)->block_id(); } - - private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } - - Label* false_label_; - Label* true_label_; -}; - - -class LWrapReceiver final : public LTemplateInstruction<1, 2, 1> { - public: - LWrapReceiver(LOperand* receiver, - LOperand* function, - LOperand* temp) { - inputs_[0] = receiver; - inputs_[1] = function; - temps_[0] = temp; - } - - LOperand* receiver() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") - DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) -}; - - -class LApplyArguments final : public LTemplateInstruction<1, 4, 0> { - public: - LApplyArguments(LOperand* function, - LOperand* receiver, - LOperand* length, - LOperand* elements) { - inputs_[0] = function; - inputs_[1] = receiver; - inputs_[2] = length; - inputs_[3] = elements; - } - - LOperand* function() { return inputs_[0]; } - LOperand* receiver() { return inputs_[1]; } - LOperand* length() { return inputs_[2]; } - LOperand* elements() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - DECLARE_HYDROGEN_ACCESSOR(ApplyArguments) -}; - - -class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { - inputs_[0] = arguments; - inputs_[1] = length; - inputs_[2] = index; - } - - LOperand* arguments() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - void PrintDataTo(StringStream* stream) override; -}; - - -class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LArgumentsLength(LOperand* elements) { - inputs_[0] = elements; - } - - LOperand* elements() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") - DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) -}; - - -class LDebugBreak final : public LTemplateInstruction<0, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") -}; - - -class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LModByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LModByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) - - private: - int32_t divisor_; -}; - - -class LModI final : public LTemplateInstruction<1, 2, 1> { - public: - LModI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = temp; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivByConstI final : public LTemplateInstruction<1, 1, 2> { - public: - LDivByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(Div) - - private: - int32_t divisor_; -}; - - -class LDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) -}; - - -class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> { - public: - LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { - inputs_[0] = dividend; - divisor_ = divisor; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, - "flooring-div-by-power-of-2-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> { - public: - LFlooringDivByConstI(LOperand* dividend, - int32_t divisor, - LOperand* temp1, - LOperand* temp2, - LOperand* temp3) { - inputs_[0] = dividend; - divisor_ = divisor; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; - } - - LOperand* dividend() { return inputs_[0]; } - int32_t divisor() const { return divisor_; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - LOperand* temp3() { return temps_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) - - private: - int32_t divisor_; -}; - - -class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> { - public: - LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { - inputs_[0] = dividend; - inputs_[1] = divisor; - temps_[0] = temp; - } - - LOperand* dividend() { return inputs_[0]; } - LOperand* divisor() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") - DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) -}; - - -class LMulI final : public LTemplateInstruction<1, 2, 1> { - public: - LMulI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = temp; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) -}; - - -class LCompareNumericAndBranch final : public LControlInstruction<2, 0> { - public: - LCompareNumericAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, - "compare-numeric-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) - - Token::Value op() const { return hydrogen()->token(); } - bool is_double() const { - return hydrogen()->representation().IsDouble(); - } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LMathFloor final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFloor(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathRound final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathRound(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathFround final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathFround(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") -}; - - -class LMathAbs final : public LTemplateInstruction<1, 2, 0> { - public: - LMathAbs(LOperand* context, LOperand* value) { - inputs_[1] = context; - inputs_[0] = value; - } - - LOperand* context() { return inputs_[1]; } - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) -}; - - -class LMathLog final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathLog(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") -}; - - -class LMathClz32 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathClz32(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") -}; - -class LMathCos final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathCos(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") -}; - -class LMathSin final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathSin(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") -}; - -class LMathExp final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathExp(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") -}; - - -class LMathSqrt final : public LTemplateInstruction<1, 1, 2> { - public: - explicit LMathSqrt(LOperand* value, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") -}; - - -class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") -}; - - -class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> { - public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") -}; - - -class LCmpHoleAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - -class LIsStringAndBranch final : public LControlInstruction<1, 1> { - public: - LIsStringAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsSmiAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LIsSmiAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> { - public: - LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStringCompareAndBranch final : public LControlInstruction<3, 0> { - public: - LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, - "string-compare-and-branch") - DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) - - void PrintDataTo(StringStream* stream) override; - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> { - public: - LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LClassOfTestAndBranch final : public LControlInstruction<1, 2> { - public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) - - void PrintDataTo(StringStream* stream) override; -}; - -class LCmpT final : public LTemplateInstruction<1, 3, 0> { - public: - LCmpT(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) - - LOperand* context() { return inputs_[0]; } - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> { - public: - LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype, - LOperand* scratch) { - inputs_[0] = object; - inputs_[1] = prototype; - temps_[0] = scratch; - } - - LOperand* object() const { return inputs_[0]; } - LOperand* prototype() const { return inputs_[1]; } - LOperand* scratch() const { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch, - "has-in-prototype-chain-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch) -}; - - -class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> { - public: - LBoundsCheck(LOperand* index, LOperand* length) { - inputs_[0] = index; - inputs_[1] = length; - } - - LOperand* index() { return inputs_[0]; } - LOperand* length() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") - DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) -}; - - -class LBitI final : public LTemplateInstruction<1, 2, 0> { - public: - LBitI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - DECLARE_HYDROGEN_ACCESSOR(Bitwise) - - Token::Value op() const { return hydrogen()->op(); } -}; - - -class LShiftI final : public LTemplateInstruction<1, 2, 0> { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : op_(op), can_deopt_(can_deopt) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - Token::Value op() const { return op_; } - bool can_deopt() const { return can_deopt_; } - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LSubI final : public LTemplateInstruction<1, 2, 0> { - public: - LSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LConstantI final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - int32_t value() const { return hydrogen()->Integer32Value(); } -}; - - -class LConstantS final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } -}; - - -class LConstantD final : public LTemplateInstruction<1, 0, 1> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); } -}; - - -class LConstantE final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - ExternalReference value() const { - return hydrogen()->ExternalReferenceValue(); - } -}; - - -class LConstantT final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - DECLARE_HYDROGEN_ACCESSOR(Constant) - - Handle value(Isolate* isolate) const { - return hydrogen()->handle(isolate); - } -}; - - -class LBranch final : public LControlInstruction<1, 1> { - public: - LBranch(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LCmpMapAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LCmpMapAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareMap) - - Handle map() const { return hydrogen()->map().handle(); } -}; - - -class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> { - public: - LSeqStringGetChar(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; - } - - LOperand* string() const { return inputs_[0]; } - LOperand* index() const { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) -}; - - -class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> { - public: - LSeqStringSetChar(LOperand* context, - LOperand* string, - LOperand* index, - LOperand* value) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - inputs_[3] = value; - } - - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - LOperand* value() { return inputs_[3]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) -}; - - -class LAddI final : public LTemplateInstruction<1, 2, 0> { - public: - LAddI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - static bool UseLea(HAdd* add) { - return !add->CheckFlag(HValue::kCanOverflow) && - add->BetterLeftOperand()->UseCount() > 1; - } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LMathMinMax final : public LTemplateInstruction<1, 2, 1> { - public: - LMathMinMax(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = temp; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") - DECLARE_HYDROGEN_ACCESSOR(MathMinMax) -}; - - -class LPower final : public LTemplateInstruction<1, 2, 0> { - public: - LPower(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Power, "power") - DECLARE_HYDROGEN_ACCESSOR(Power) -}; - - -class LArithmeticD final : public LTemplateInstruction<1, 2, 0> { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) - : op_(op) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - Token::Value op() const { return op_; } - - Opcode opcode() const override { return LInstruction::kArithmeticD; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - private: - Token::Value op_; -}; - - -class LArithmeticT final : public LTemplateInstruction<1, 3, 0> { - public: - LArithmeticT(Token::Value op, - LOperand* context, - LOperand* left, - LOperand* right) - : op_(op) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - Token::Value op() const { return op_; } - - Opcode opcode() const override { return LInstruction::kArithmeticT; } - void CompileToNative(LCodeGen* generator) override; - const char* Mnemonic() const override; - - DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) - - private: - Token::Value op_; -}; - - -class LReturn final : public LTemplateInstruction<0, 3, 0> { - public: - explicit LReturn(LOperand* value, - LOperand* context, - LOperand* parameter_count) { - inputs_[0] = value; - inputs_[1] = context; - inputs_[2] = parameter_count; - } - - bool has_constant_parameter_count() { - return parameter_count()->IsConstantOperand(); - } - LConstantOperand* constant_parameter_count() { - DCHECK(has_constant_parameter_count()); - return LConstantOperand::cast(parameter_count()); - } - LOperand* parameter_count() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") - DECLARE_HYDROGEN_ACCESSOR(Return) -}; - - -class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedField(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> { - public: - LLoadFunctionPrototype(LOperand* function, LOperand* temp) { - inputs_[0] = function; - temps_[0] = temp; - } - - LOperand* function() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") - DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) -}; - - -class LLoadRoot final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") - DECLARE_HYDROGEN_ACCESSOR(LoadRoot) - - Heap::RootListIndex index() const { return hydrogen()->index(); } -}; - - -class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> { - public: - LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = backing_store_owner; - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* backing_store_owner() { return inputs_[2]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } - bool key_is_smi() { - return hydrogen()->key()->representation().IsTagged(); - } -}; - - -inline static bool ExternalArrayOpRequiresTemp( - Representation key_representation, - ElementsKind elements_kind) { - // Operations that require the key to be divided by two to be converted into - // an index cannot fold the scale operation into a load and need an extra - // temp register to do the work. - return key_representation.IsSmi() && - (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS || - elements_kind == UINT8_CLAMPED_ELEMENTS); -} - - -class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadContextSlot(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") - DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> { - public: - LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) { - inputs_[0] = context; - inputs_[1] = value; - temps_[0] = temp; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") - DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - - int slot_index() { return hydrogen()->slot_index(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LPushArgument final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LPushArgument(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - - -class LDrop final : public LTemplateInstruction<0, 0, 0> { - public: - explicit LDrop(int count) : count_(count) { } - - int count() const { return count_; } - - DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") - - private: - int count_; -}; - - -class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> { - public: - LStoreCodeEntry(LOperand* function, LOperand* code_object) { - inputs_[0] = function; - inputs_[1] = code_object; - } - - LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") - DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) -}; - - -class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> { - public: - LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { - inputs_[0] = base_object; - inputs_[1] = offset; - } - - LOperand* base_object() const { return inputs_[0]; } - LOperand* offset() const { return inputs_[1]; } - - void PrintDataTo(StringStream* stream) override; - - DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") -}; - - -class LThisFunction final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") - DECLARE_HYDROGEN_ACCESSOR(ThisFunction) -}; - - -class LContext final : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(Context, "context") - DECLARE_HYDROGEN_ACCESSOR(Context) -}; - - -class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LDeclareGlobals(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") - DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) -}; - - -class LCallWithDescriptor final : public LTemplateResultInstruction<1> { - public: - LCallWithDescriptor(CallInterfaceDescriptor descriptor, - const ZoneList& operands, Zone* zone) - : inputs_(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount, - zone) { - DCHECK(descriptor.GetRegisterParameterCount() + - kImplicitRegisterParameterCount == - operands.length()); - inputs_.AddAll(operands, zone); - } - - LOperand* target() const { return inputs_[0]; } - - DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) - - // The target and context are passed as implicit parameters that are not - // explicitly listed in the descriptor. - static const int kImplicitRegisterParameterCount = 2; - - private: - DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } - - ZoneList inputs_; - - // Iterator support. - int InputCount() final { return inputs_.length(); } - LOperand* InputAt(int i) final { return inputs_[i]; } - - int TempCount() final { return 0; } - LOperand* TempAt(int i) final { return NULL; } -}; - - -class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> { - public: - LInvokeFunction(LOperand* context, LOperand* function) { - inputs_[0] = context; - inputs_[1] = function; - } - - LOperand* context() { return inputs_[0]; } - LOperand* function() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNewArray final : public LTemplateInstruction<1, 2, 0> { - public: - LCallNewArray(LOperand* context, LOperand* constructor) { - inputs_[0] = context; - inputs_[1] = constructor; - } - - LOperand* context() { return inputs_[0]; } - LOperand* constructor() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") - DECLARE_HYDROGEN_ACCESSOR(CallNewArray) - - void PrintDataTo(StringStream* stream) override; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCallRuntime(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { - return save_doubles() == kDontSaveFPRegs; - } - - const Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } -}; - - -class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInteger32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LUint32ToDouble final : public LTemplateInstruction<1, 1, 1> { - public: - explicit LUint32ToDouble(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") -}; - - -class LNumberTagI final : public LTemplateInstruction<1, 1, 1> { - public: - LNumberTagI(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") -}; - - -class LNumberTagU final : public LTemplateInstruction<1, 1, 1> { - public: - LNumberTagU(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") -}; - - -class LNumberTagD final : public LTemplateInstruction<1, 1, 1> { - public: - LNumberTagD(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToI(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LDoubleToSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LTaggedToI(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -class LSmiTag final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LSmiTag(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") - DECLARE_HYDROGEN_ACCESSOR(Change) -}; - - -class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> { - public: - explicit LNumberUntagD(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change); - - bool truncating() { return hydrogen()->CanTruncateToNumber(); } -}; - - -class LSmiUntag final : public LTemplateInstruction<1, 1, 0> { - public: - LSmiUntag(LOperand* value, bool needs_check) - : needs_check_(needs_check) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - bool needs_check() const { return needs_check_; } - - private: - bool needs_check_; -}; - - -class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> { - public: - LStoreNamedField(LOperand* obj, - LOperand* val, - LOperand* temp, - LOperand* temp_map) { - inputs_[0] = obj; - inputs_[1] = val; - temps_[0] = temp; - temps_[1] = temp_map; - } - - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - LOperand* temp_map() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - void PrintDataTo(StringStream* stream) override; -}; - - -class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> { - public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val, - LOperand* backing_store_owner) { - inputs_[0] = obj; - inputs_[1] = key; - inputs_[2] = val; - inputs_[3] = backing_store_owner; - } - - bool is_fixed_typed_array() const { - return hydrogen()->is_fixed_typed_array(); - } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - LOperand* backing_store_owner() { return inputs_[3]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) - - void PrintDataTo(StringStream* stream) override; - uint32_t base_offset() const { return hydrogen()->base_offset(); } - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } -}; - - -class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> { - public: - LTransitionElementsKind(LOperand* object, - LOperand* context, - LOperand* new_map_temp, - LOperand* temp) { - inputs_[0] = object; - inputs_[1] = context; - temps_[0] = new_map_temp; - temps_[1] = temp; - } - - LOperand* context() { return inputs_[1]; } - LOperand* object() { return inputs_[0]; } - LOperand* new_map_temp() { return temps_[0]; } - LOperand* temp() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, - "transition-elements-kind") - DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) - - void PrintDataTo(StringStream* stream) override; - - Handle original_map() { return hydrogen()->original_map().handle(); } - Handle transitioned_map() { - return hydrogen()->transitioned_map().handle(); - } - ElementsKind from_kind() { return hydrogen()->from_kind(); } - ElementsKind to_kind() { return hydrogen()->to_kind(); } -}; - - -class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> { - public: - LTrapAllocationMemento(LOperand* object, - LOperand* temp) { - inputs_[0] = object; - temps_[0] = temp; - } - - LOperand* object() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, - "trap-allocation-memento") -}; - - -class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> { - public: - LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements, - LOperand* key, LOperand* current_capacity) { - inputs_[0] = context; - inputs_[1] = object; - inputs_[2] = elements; - inputs_[3] = key; - inputs_[4] = current_capacity; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - LOperand* elements() { return inputs_[2]; } - LOperand* key() { return inputs_[3]; } - LOperand* current_capacity() { return inputs_[4]; } - - bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; } - - DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements) - DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements") -}; - - -class LStringAdd final : public LTemplateInstruction<1, 3, 0> { - public: - LStringAdd(LOperand* context, LOperand* left, LOperand* right) { - inputs_[0] = context; - inputs_[1] = left; - inputs_[2] = right; - } - - LOperand* context() { return inputs_[0]; } - LOperand* left() { return inputs_[1]; } - LOperand* right() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) -}; - - -class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> { - public: - LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { - inputs_[0] = context; - inputs_[1] = string; - inputs_[2] = index; - } - - LOperand* context() { return inputs_[0]; } - LOperand* string() { return inputs_[1]; } - LOperand* index() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) -}; - - -class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> { - public: - LStringCharFromCode(LOperand* context, LOperand* char_code) { - inputs_[0] = context; - inputs_[1] = char_code; - } - - LOperand* context() { return inputs_[0]; } - LOperand* char_code() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) -}; - - -class LCheckValue final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckValue(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") - DECLARE_HYDROGEN_ACCESSOR(CheckValue) -}; - - -class LCheckArrayBufferNotNeutered final - : public LTemplateInstruction<0, 1, 1> { - public: - explicit LCheckArrayBufferNotNeutered(LOperand* view, LOperand* scratch) { - inputs_[0] = view; - temps_[0] = scratch; - } - - LOperand* view() { return inputs_[0]; } - LOperand* scratch() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered, - "check-array-buffer-not-neutered") - DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered) -}; - - -class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> { - public: - LCheckInstanceType(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) -}; - - -class LCheckMaps final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckMaps(LOperand* value = NULL) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") - DECLARE_HYDROGEN_ACCESSOR(CheckMaps) -}; - - -class LCheckSmi final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LCheckSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampDToUint8(LOperand* value) { - inputs_[0] = value; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* value) { - inputs_[0] = value; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - - -// Truncating conversion from a tagged value to an int32. -class LClampTToUint8NoSSE2 final : public LTemplateInstruction<1, 1, 3> { - public: - LClampTToUint8NoSSE2(LOperand* unclamped, - LOperand* temp1, - LOperand* temp2, - LOperand* temp3) { - inputs_[0] = unclamped; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* scratch() { return temps_[0]; } - LOperand* scratch2() { return temps_[1]; } - LOperand* scratch3() { return temps_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2, - "clamp-t-to-uint8-nosse2") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) -}; - - -class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") - DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) -}; - - -class LAllocate final : public LTemplateInstruction<1, 2, 1> { - public: - LAllocate(LOperand* context, LOperand* size, LOperand* temp) { - inputs_[0] = context; - inputs_[1] = size; - temps_[0] = temp; - } - - LOperand* context() { return inputs_[0]; } - LOperand* size() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LFastAllocate final : public LTemplateInstruction<1, 1, 1> { - public: - LFastAllocate(LOperand* size, LOperand* temp) { - inputs_[0] = size; - temps_[0] = temp; - } - - LOperand* size() const { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate") - DECLARE_HYDROGEN_ACCESSOR(Allocate) -}; - -class LTypeof final : public LTemplateInstruction<1, 2, 0> { - public: - LTypeof(LOperand* context, LOperand* value) { - inputs_[0] = context; - inputs_[1] = value; - } - - LOperand* context() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIsAndBranch final : public LControlInstruction<1, 0> { - public: - explicit LTypeofIsAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) - - Handle type_literal() { return hydrogen()->type_literal(); } - - void PrintDataTo(StringStream* stream) override; -}; - - -class LOsrEntry final : public LTemplateInstruction<0, 0, 0> { - public: - bool HasInterestingComment(LCodeGen* gen) const override { return false; } - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") -}; - - -class LStackCheck final : public LTemplateInstruction<0, 1, 0> { - public: - explicit LStackCheck(LOperand* context) { - inputs_[0] = context; - } - - LOperand* context() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> { - public: - LForInPrepareMap(LOperand* context, LOperand* object) { - inputs_[0] = context; - inputs_[1] = object; - } - - LOperand* context() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") -}; - - -class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> { - public: - explicit LForInCacheArray(LOperand* map) { - inputs_[0] = map; - } - - LOperand* map() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") - - int idx() { - return HForInCacheArray::cast(this->hydrogen_value())->idx(); - } -}; - - -class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> { - public: - LCheckMapValue(LOperand* value, LOperand* map) { - inputs_[0] = value; - inputs_[1] = map; - } - - LOperand* value() { return inputs_[0]; } - LOperand* map() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") -}; - - -class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> { - public: - LLoadFieldByIndex(LOperand* object, LOperand* index) { - inputs_[0] = object; - inputs_[1] = index; - } - - LOperand* object() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") -}; - - -class LChunkBuilder; -class LPlatformChunk final : public LChunk { - public: - LPlatformChunk(CompilationInfo* info, HGraph* graph) - : LChunk(info, graph), - num_double_slots_(0) { } - - int GetNextSpillIndex(RegisterKind kind); - LOperand* GetNextSpillSlot(RegisterKind kind); - - int num_double_slots() const { return num_double_slots_; } - - private: - int num_double_slots_; -}; - - -class LChunkBuilder final : public LChunkBuilderBase { - public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) - : LChunkBuilderBase(info, graph), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - allocator_(allocator) {} - - // Build the sequence for the graph. - LPlatformChunk* Build(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - LInstruction* DoMathFloor(HUnaryMathOperation* instr); - LInstruction* DoMathRound(HUnaryMathOperation* instr); - LInstruction* DoMathFround(HUnaryMathOperation* instr); - LInstruction* DoMathAbs(HUnaryMathOperation* instr); - LInstruction* DoMathLog(HUnaryMathOperation* instr); - LInstruction* DoMathCos(HUnaryMathOperation* instr); - LInstruction* DoMathSin(HUnaryMathOperation* instr); - LInstruction* DoMathExp(HUnaryMathOperation* instr); - LInstruction* DoMathSqrt(HUnaryMathOperation* instr); - LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); - LInstruction* DoMathClz32(HUnaryMathOperation* instr); - LInstruction* DoDivByPowerOf2I(HDiv* instr); - LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HDiv* instr); - LInstruction* DoModByPowerOf2I(HMod* instr); - LInstruction* DoModByConstI(HMod* instr); - LInstruction* DoModI(HMod* instr); - LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); - LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); - - private: - // Methods for getting operands for Use / Define / Temp. - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(X87Register reg); - - // Methods for setting up define-use relationships. - MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); - MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - MUST_USE_RESULT LOperand* UseRegister(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); - - // An input operand in a register that may be trashed. - MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); - - // An input operand in a register or stack slot. - MUST_USE_RESULT LOperand* Use(HValue* value); - MUST_USE_RESULT LOperand* UseAtStart(HValue* value); - - // An input operand in a register, stack slot or a constant operand. - MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); - - // An input operand in a fixed register or a constant operand. - MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value, - Register fixed_register); - - // An input operand in a register or a constant operand. - MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); - MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // An input operand in a constant operand. - MUST_USE_RESULT LOperand* UseConstant(HValue* value); - - // An input operand in register, stack slot or a constant operand. - // Will not be moved to a register even if one is freely available. - MUST_USE_RESULT LOperand* UseAny(HValue* value) override; - - // Temporary operand that must be in a register. - MUST_USE_RESULT LUnallocated* TempRegister(); - MUST_USE_RESULT LOperand* FixedTemp(Register reg); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LTemplateResultInstruction<1>* instr, - LUnallocated* result); - LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); - LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, - int index); - LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - Register reg); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - X87Register reg); - LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr); - // Assigns an environment to an instruction. An instruction which can - // deoptimize must have an environment. - LInstruction* AssignEnvironment(LInstruction* instr); - // Assigns a pointer map to an instruction. An instruction which can - // trigger a GC or a lazy deoptimization must have a pointer map. - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr); - - // Marks a call for the register allocator. Assigns a pointer map to - // support GC and lazy deoptimization. Assigns an environment to support - // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY. - LInstruction* MarkAsCall( - LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - void VisitInstruction(HInstruction* current); - void AddInstruction(LInstruction* instr, HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, - HBinaryOperation* instr); - - LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr); - - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - LAllocator* allocator_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_CONCRETE_INSTRUCTION - -} // namespace internal -} // namespace v8 - -#endif // V8_CRANKSHAFT_X87_LITHIUM_X87_H_ diff --git a/src/isolate.cc b/src/isolate.cc index d7480fefe4..e4d44dc733 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -25,7 +25,6 @@ #include "src/compilation-statistics.h" #include "src/compiler-dispatcher/compiler-dispatcher.h" #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h" -#include "src/crankshaft/hydrogen.h" #include "src/debug/debug.h" #include "src/deoptimizer.h" #include "src/elements.h" @@ -2949,11 +2948,8 @@ void Isolate::DumpAndResetStats() { os << ps << std::endl; } } - if (hstatistics() != nullptr) hstatistics()->Print(); delete turbo_statistics_; turbo_statistics_ = nullptr; - delete hstatistics_; - hstatistics_ = nullptr; if (V8_UNLIKELY(FLAG_runtime_stats == v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) { OFStream os(stdout); @@ -2963,12 +2959,6 @@ void Isolate::DumpAndResetStats() { } -HStatistics* Isolate::GetHStatistics() { - if (hstatistics() == NULL) set_hstatistics(new HStatistics()); - return hstatistics(); -} - - CompilationStatistics* Isolate::GetTurboStatistics() { if (turbo_statistics() == NULL) set_turbo_statistics(new CompilationStatistics()); @@ -2976,12 +2966,6 @@ CompilationStatistics* Isolate::GetTurboStatistics() { } -HTracer* Isolate::GetHTracer() { - if (htracer() == NULL) set_htracer(new HTracer(id())); - return htracer(); -} - - CodeTracer* Isolate::GetCodeTracer() { if (code_tracer() == NULL) set_code_tracer(new CodeTracer(id())); return code_tracer(); diff --git a/src/isolate.h b/src/isolate.h index 2923af3731..2b81b9239e 100644 --- a/src/isolate.h +++ b/src/isolate.h @@ -70,8 +70,6 @@ class Factory; class HandleScopeImplementer; class HeapObjectToIndexHashMap; class HeapProfiler; -class HStatistics; -class HTracer; class InlineRuntimeFunctionsTable; class InnerPointerToCodeCache; class Logger; @@ -407,9 +405,7 @@ typedef std::vector DebugObjectCache; V(AddressToIndexHashMap*, external_reference_map, nullptr) \ V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \ V(int, pending_microtask_count, 0) \ - V(HStatistics*, hstatistics, nullptr) \ V(CompilationStatistics*, turbo_statistics, nullptr) \ - V(HTracer*, htracer, nullptr) \ V(CodeTracer*, code_tracer, nullptr) \ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \ V(PromiseRejectCallback, promise_reject_callback, nullptr) \ @@ -1086,9 +1082,7 @@ class Isolate { int id() const { return static_cast(id_); } - HStatistics* GetHStatistics(); CompilationStatistics* GetTurboStatistics(); - HTracer* GetHTracer(); CodeTracer* GetCodeTracer(); void DumpAndResetStats(); diff --git a/src/v8.cc b/src/v8.cc index 89d20c6742..b7fbb04149 100644 --- a/src/v8.cc +++ b/src/v8.cc @@ -10,7 +10,6 @@ #include "src/base/once.h" #include "src/base/platform/platform.h" #include "src/bootstrapper.h" -#include "src/crankshaft/lithium-allocator.h" #include "src/debug/debug.h" #include "src/deoptimizer.h" #include "src/elements.h" @@ -46,7 +45,6 @@ bool V8::Initialize() { void V8::TearDown() { Bootstrapper::TearDownExtensions(); ElementsAccessor::TearDown(); - LOperand::TearDownCaches(); RegisteredExtension::UnregisterAll(); Isolate::GlobalTearDown(); sampler::Sampler::TearDown(); @@ -80,7 +78,6 @@ void V8::InitializeOncePerProcessImpl() { sampler::Sampler::SetUp(); CpuFeatures::Probe(false); ElementsAccessor::InitializeOncePerProcess(); - LOperand::SetUpCaches(); SetUpJSCallerSavedCodeData(); ExternalReference::SetUp(); Bootstrapper::InitializeOncePerProcess(); diff --git a/src/v8.gyp b/src/v8.gyp index 12f90ade57..8217d934c4 100644 --- a/src/v8.gyp +++ b/src/v8.gyp @@ -912,63 +912,6 @@ 'counters-inl.h', 'counters.cc', 'counters.h', - 'crankshaft/compilation-phase.cc', - 'crankshaft/compilation-phase.h', - 'crankshaft/hydrogen-alias-analysis.h', - 'crankshaft/hydrogen-bce.cc', - 'crankshaft/hydrogen-bce.h', - 'crankshaft/hydrogen-canonicalize.cc', - 'crankshaft/hydrogen-canonicalize.h', - 'crankshaft/hydrogen-check-elimination.cc', - 'crankshaft/hydrogen-check-elimination.h', - 'crankshaft/hydrogen-dce.cc', - 'crankshaft/hydrogen-dce.h', - 'crankshaft/hydrogen-dehoist.cc', - 'crankshaft/hydrogen-dehoist.h', - 'crankshaft/hydrogen-environment-liveness.cc', - 'crankshaft/hydrogen-environment-liveness.h', - 'crankshaft/hydrogen-escape-analysis.cc', - 'crankshaft/hydrogen-escape-analysis.h', - 'crankshaft/hydrogen-flow-engine.h', - 'crankshaft/hydrogen-gvn.cc', - 'crankshaft/hydrogen-gvn.h', - 'crankshaft/hydrogen-infer-representation.cc', - 'crankshaft/hydrogen-infer-representation.h', - 'crankshaft/hydrogen-infer-types.cc', - 'crankshaft/hydrogen-infer-types.h', - 'crankshaft/hydrogen-instructions.cc', - 'crankshaft/hydrogen-instructions.h', - 'crankshaft/hydrogen-load-elimination.cc', - 'crankshaft/hydrogen-load-elimination.h', - 'crankshaft/hydrogen-mark-unreachable.cc', - 'crankshaft/hydrogen-mark-unreachable.h', - 'crankshaft/hydrogen-range-analysis.cc', - 'crankshaft/hydrogen-range-analysis.h', - 'crankshaft/hydrogen-redundant-phi.cc', - 'crankshaft/hydrogen-redundant-phi.h', - 'crankshaft/hydrogen-removable-simulates.cc', - 'crankshaft/hydrogen-removable-simulates.h', - 'crankshaft/hydrogen-representation-changes.cc', - 'crankshaft/hydrogen-representation-changes.h', - 'crankshaft/hydrogen-sce.cc', - 'crankshaft/hydrogen-sce.h', - 'crankshaft/hydrogen-store-elimination.cc', - 'crankshaft/hydrogen-store-elimination.h', - 'crankshaft/hydrogen-types.cc', - 'crankshaft/hydrogen-types.h', - 'crankshaft/hydrogen-uint32-analysis.cc', - 'crankshaft/hydrogen-uint32-analysis.h', - 'crankshaft/hydrogen.cc', - 'crankshaft/hydrogen.h', - 'crankshaft/lithium-allocator-inl.h', - 'crankshaft/lithium-allocator.cc', - 'crankshaft/lithium-allocator.h', - 'crankshaft/lithium-codegen.cc', - 'crankshaft/lithium-codegen.h', - 'crankshaft/lithium.cc', - 'crankshaft/lithium.h', - 'crankshaft/lithium-inl.h', - 'crankshaft/unique.h', 'date.cc', 'date.h', 'dateparser-inl.h', @@ -1552,12 +1495,6 @@ 'compiler/arm/instruction-selector-arm.cc', 'compiler/arm/unwinding-info-writer-arm.h', 'compiler/arm/unwinding-info-writer-arm.cc', - 'crankshaft/arm/lithium-arm.cc', - 'crankshaft/arm/lithium-arm.h', - 'crankshaft/arm/lithium-codegen-arm.cc', - 'crankshaft/arm/lithium-codegen-arm.h', - 'crankshaft/arm/lithium-gap-resolver-arm.cc', - 'crankshaft/arm/lithium-gap-resolver-arm.h', 'debug/arm/debug-arm.cc', 'full-codegen/arm/full-codegen-arm.cc', 'ic/arm/access-compiler-arm.cc', @@ -1607,15 +1544,6 @@ 'compiler/arm64/instruction-selector-arm64.cc', 'compiler/arm64/unwinding-info-writer-arm64.h', 'compiler/arm64/unwinding-info-writer-arm64.cc', - 'crankshaft/arm64/delayed-masm-arm64.cc', - 'crankshaft/arm64/delayed-masm-arm64.h', - 'crankshaft/arm64/delayed-masm-arm64-inl.h', - 'crankshaft/arm64/lithium-arm64.cc', - 'crankshaft/arm64/lithium-arm64.h', - 'crankshaft/arm64/lithium-codegen-arm64.cc', - 'crankshaft/arm64/lithium-codegen-arm64.h', - 'crankshaft/arm64/lithium-gap-resolver-arm64.cc', - 'crankshaft/arm64/lithium-gap-resolver-arm64.h', 'debug/arm64/debug-arm64.cc', 'full-codegen/arm64/full-codegen-arm64.cc', 'ic/arm64/access-compiler-arm64.cc', @@ -1649,12 +1577,6 @@ 'compiler/ia32/instruction-codes-ia32.h', 'compiler/ia32/instruction-scheduler-ia32.cc', 'compiler/ia32/instruction-selector-ia32.cc', - 'crankshaft/ia32/lithium-codegen-ia32.cc', - 'crankshaft/ia32/lithium-codegen-ia32.h', - 'crankshaft/ia32/lithium-gap-resolver-ia32.cc', - 'crankshaft/ia32/lithium-gap-resolver-ia32.h', - 'crankshaft/ia32/lithium-ia32.cc', - 'crankshaft/ia32/lithium-ia32.h', 'debug/ia32/debug-ia32.cc', 'full-codegen/ia32/full-codegen-ia32.cc', 'ic/ia32/access-compiler-ia32.cc', @@ -1687,12 +1609,6 @@ 'compiler/x87/instruction-codes-x87.h', 'compiler/x87/instruction-scheduler-x87.cc', 'compiler/x87/instruction-selector-x87.cc', - 'crankshaft/x87/lithium-codegen-x87.cc', - 'crankshaft/x87/lithium-codegen-x87.h', - 'crankshaft/x87/lithium-gap-resolver-x87.cc', - 'crankshaft/x87/lithium-gap-resolver-x87.h', - 'crankshaft/x87/lithium-x87.cc', - 'crankshaft/x87/lithium-x87.h', 'debug/x87/debug-x87.cc', 'full-codegen/x87/full-codegen-x87.cc', 'ic/x87/access-compiler-x87.cc', @@ -1727,12 +1643,6 @@ 'compiler/mips/instruction-codes-mips.h', 'compiler/mips/instruction-scheduler-mips.cc', 'compiler/mips/instruction-selector-mips.cc', - 'crankshaft/mips/lithium-codegen-mips.cc', - 'crankshaft/mips/lithium-codegen-mips.h', - 'crankshaft/mips/lithium-gap-resolver-mips.cc', - 'crankshaft/mips/lithium-gap-resolver-mips.h', - 'crankshaft/mips/lithium-mips.cc', - 'crankshaft/mips/lithium-mips.h', 'full-codegen/mips/full-codegen-mips.cc', 'debug/mips/debug-mips.cc', 'ic/mips/access-compiler-mips.cc', @@ -1767,12 +1677,6 @@ 'compiler/mips64/instruction-codes-mips64.h', 'compiler/mips64/instruction-scheduler-mips64.cc', 'compiler/mips64/instruction-selector-mips64.cc', - 'crankshaft/mips64/lithium-codegen-mips64.cc', - 'crankshaft/mips64/lithium-codegen-mips64.h', - 'crankshaft/mips64/lithium-gap-resolver-mips64.cc', - 'crankshaft/mips64/lithium-gap-resolver-mips64.h', - 'crankshaft/mips64/lithium-mips64.cc', - 'crankshaft/mips64/lithium-mips64.h', 'debug/mips64/debug-mips64.cc', 'full-codegen/mips64/full-codegen-mips64.cc', 'ic/mips64/access-compiler-mips64.cc', @@ -1790,12 +1694,6 @@ 'compiler/x64/instruction-selector-x64.cc', 'compiler/x64/unwinding-info-writer-x64.h', 'compiler/x64/unwinding-info-writer-x64.cc', - 'crankshaft/x64/lithium-codegen-x64.cc', - 'crankshaft/x64/lithium-codegen-x64.h', - 'crankshaft/x64/lithium-gap-resolver-x64.cc', - 'crankshaft/x64/lithium-gap-resolver-x64.h', - 'crankshaft/x64/lithium-x64.cc', - 'crankshaft/x64/lithium-x64.h', 'x64/assembler-x64-inl.h', 'x64/assembler-x64.cc', 'x64/assembler-x64.h', @@ -1834,12 +1732,6 @@ 'compiler/ppc/instruction-codes-ppc.h', 'compiler/ppc/instruction-scheduler-ppc.cc', 'compiler/ppc/instruction-selector-ppc.cc', - 'crankshaft/ppc/lithium-ppc.cc', - 'crankshaft/ppc/lithium-ppc.h', - 'crankshaft/ppc/lithium-codegen-ppc.cc', - 'crankshaft/ppc/lithium-codegen-ppc.h', - 'crankshaft/ppc/lithium-gap-resolver-ppc.cc', - 'crankshaft/ppc/lithium-gap-resolver-ppc.h', 'debug/ppc/debug-ppc.cc', 'full-codegen/ppc/full-codegen-ppc.cc', 'ic/ppc/access-compiler-ppc.cc', @@ -1874,12 +1766,6 @@ 'compiler/s390/instruction-codes-s390.h', 'compiler/s390/instruction-scheduler-s390.cc', 'compiler/s390/instruction-selector-s390.cc', - 'crankshaft/s390/lithium-codegen-s390.cc', - 'crankshaft/s390/lithium-codegen-s390.h', - 'crankshaft/s390/lithium-gap-resolver-s390.cc', - 'crankshaft/s390/lithium-gap-resolver-s390.h', - 'crankshaft/s390/lithium-s390.cc', - 'crankshaft/s390/lithium-s390.h', 'debug/s390/debug-s390.cc', 'full-codegen/s390/full-codegen-s390.cc', 'ic/s390/access-compiler-s390.cc', diff --git a/test/cctest/BUILD.gn b/test/cctest/BUILD.gn index cfa8d58503..287979c17a 100644 --- a/test/cctest/BUILD.gn +++ b/test/cctest/BUILD.gn @@ -148,7 +148,6 @@ v8_executable("cctest") { "test-hashing.cc", "test-hashmap.cc", "test-heap-profiler.cc", - "test-hydrogen-types.cc", "test-identity-map.cc", "test-inobject-slack-tracking.cc", "test-list.cc", @@ -180,7 +179,6 @@ v8_executable("cctest") { "test-types.cc", "test-unbound-queue.cc", "test-unboxed-doubles.cc", - "test-unique.cc", "test-unscopables-hidden-prototype.cc", "test-usecounters.cc", "test-utils.cc", diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index 4b391bc8d5..b21bd2fc84 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -165,7 +165,6 @@ 'test-hashing.cc', 'test-hashmap.cc', 'test-heap-profiler.cc', - 'test-hydrogen-types.cc', 'test-identity-map.cc', 'test-inobject-slack-tracking.cc', 'test-list.cc', @@ -198,7 +197,6 @@ 'test-types.cc', 'test-unbound-queue.cc', 'test-unboxed-doubles.cc', - 'test-unique.cc', 'test-unscopables-hidden-prototype.cc', 'test-usecounters.cc', 'test-utils.cc', diff --git a/test/cctest/test-ast-types.cc b/test/cctest/test-ast-types.cc index a19b702938..53cf794fba 100644 --- a/test/cctest/test-ast-types.cc +++ b/test/cctest/test-ast-types.cc @@ -4,7 +4,6 @@ #include -#include "src/crankshaft/hydrogen-types.h" #include "src/factory.h" #include "src/heap/heap.h" #include "src/isolate.h" @@ -1833,18 +1832,6 @@ struct Tests { } } } - - void HTypeFromType() { - for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) { - for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) { - AstType* type1 = *it1; - AstType* type2 = *it2; - HType htype1 = HType::FromType(type1); - HType htype2 = HType::FromType(type2); - CHECK(!type1->Is(type2) || htype1.IsSubtypeOf(htype2)); - } - } - } }; } // namespace @@ -1900,5 +1887,3 @@ TEST(AstIntersect_zone) { Tests().Intersect(); } TEST(AstDistributivity_zone) { Tests().Distributivity(); } TEST(AstGetRange_zone) { Tests().GetRange(); } - -TEST(AstHTypeFromType_zone) { Tests().HTypeFromType(); } diff --git a/test/cctest/test-hydrogen-types.cc b/test/cctest/test-hydrogen-types.cc deleted file mode 100644 index 07e10e8082..0000000000 --- a/test/cctest/test-hydrogen-types.cc +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/crankshaft/hydrogen-types.h" - -#include "test/cctest/cctest.h" - -using namespace v8::internal; - -static const HType kTypes[] = { - #define DECLARE_TYPE(Name, mask) HType::Name(), - HTYPE_LIST(DECLARE_TYPE) - #undef DECLARE_TYPE -}; - -static const int kNumberOfTypes = sizeof(kTypes) / sizeof(kTypes[0]); - - -TEST(HTypeDistinct) { - for (int i = 0; i < kNumberOfTypes; ++i) { - for (int j = 0; j < kNumberOfTypes; ++j) { - CHECK(i == j || !kTypes[i].Equals(kTypes[j])); - } - } -} - - -TEST(HTypeReflexivity) { - // Reflexivity of = - for (int i = 0; i < kNumberOfTypes; ++i) { - CHECK(kTypes[i].Equals(kTypes[i])); - } - - // Reflexivity of < - for (int i = 0; i < kNumberOfTypes; ++i) { - CHECK(kTypes[i].IsSubtypeOf(kTypes[i])); - } -} - - -TEST(HTypeTransitivity) { - // Transitivity of = - for (int i = 0; i < kNumberOfTypes; ++i) { - for (int j = 0; j < kNumberOfTypes; ++j) { - for (int k = 0; k < kNumberOfTypes; ++k) { - HType ti = kTypes[i]; - HType tj = kTypes[j]; - HType tk = kTypes[k]; - CHECK(!ti.Equals(tj) || !tj.Equals(tk) || ti.Equals(tk)); - } - } - } - - // Transitivity of < - for (int i = 0; i < kNumberOfTypes; ++i) { - for (int j = 0; j < kNumberOfTypes; ++j) { - for (int k = 0; k < kNumberOfTypes; ++k) { - HType ti = kTypes[i]; - HType tj = kTypes[j]; - HType tk = kTypes[k]; - CHECK(!ti.IsSubtypeOf(tj) || !tj.IsSubtypeOf(tk) || ti.IsSubtypeOf(tk)); - } - } - } -} - - -TEST(HTypeCombine) { - // T < T /\ T' and T' < T /\ T' for all T,T' - for (int i = 0; i < kNumberOfTypes; ++i) { - for (int j = 0; j < kNumberOfTypes; ++j) { - HType ti = kTypes[i]; - HType tj = kTypes[j]; - CHECK(ti.IsSubtypeOf(ti.Combine(tj))); - CHECK(tj.IsSubtypeOf(ti.Combine(tj))); - } - } -} - - -TEST(HTypeAny) { - // T < Any for all T - for (int i = 0; i < kNumberOfTypes; ++i) { - HType ti = kTypes[i]; - CHECK(ti.IsAny()); - } - - // Any < T implies T = Any for all T - for (int i = 0; i < kNumberOfTypes; ++i) { - HType ti = kTypes[i]; - CHECK(!HType::Any().IsSubtypeOf(ti) || HType::Any().Equals(ti)); - } -} - - -TEST(HTypeTagged) { - // T < Tagged for all T \ {Any} - for (int i = 0; i < kNumberOfTypes; ++i) { - HType ti = kTypes[i]; - CHECK(ti.IsTagged() || HType::Any().Equals(ti)); - } - - // Tagged < T implies T = Tagged or T = Any - for (int i = 0; i < kNumberOfTypes; ++i) { - HType ti = kTypes[i]; - CHECK(!HType::Tagged().IsSubtypeOf(ti) || - HType::Tagged().Equals(ti) || - HType::Any().Equals(ti)); - } -} - - -TEST(HTypeSmi) { - // T < Smi implies T = None or T = Smi for all T - for (int i = 0; i < kNumberOfTypes; ++i) { - HType ti = kTypes[i]; - CHECK(!ti.IsSmi() || - ti.Equals(HType::Smi()) || - ti.Equals(HType::None())); - } -} - - -TEST(HTypeHeapObject) { - CHECK(!HType::TaggedPrimitive().IsHeapObject()); - CHECK(!HType::TaggedNumber().IsHeapObject()); - CHECK(!HType::Smi().IsHeapObject()); - CHECK(HType::HeapObject().IsHeapObject()); - CHECK(HType::HeapPrimitive().IsHeapObject()); - CHECK(HType::Null().IsHeapObject()); - CHECK(HType::HeapNumber().IsHeapObject()); - CHECK(HType::String().IsHeapObject()); - CHECK(HType::Boolean().IsHeapObject()); - CHECK(HType::Undefined().IsHeapObject()); - CHECK(HType::JSObject().IsHeapObject()); - CHECK(HType::JSArray().IsHeapObject()); -} - - -TEST(HTypePrimitive) { - CHECK(HType::TaggedNumber().IsTaggedPrimitive()); - CHECK(HType::Smi().IsTaggedPrimitive()); - CHECK(!HType::HeapObject().IsTaggedPrimitive()); - CHECK(HType::HeapPrimitive().IsTaggedPrimitive()); - CHECK(HType::Null().IsHeapPrimitive()); - CHECK(HType::HeapNumber().IsHeapPrimitive()); - CHECK(HType::String().IsHeapPrimitive()); - CHECK(HType::Boolean().IsHeapPrimitive()); - CHECK(HType::Undefined().IsHeapPrimitive()); - CHECK(!HType::JSObject().IsTaggedPrimitive()); - CHECK(!HType::JSArray().IsTaggedPrimitive()); -} - - -TEST(HTypeJSObject) { - CHECK(HType::JSArray().IsJSObject()); -} - - -TEST(HTypeNone) { - // None < T for all T - for (int i = 0; i < kNumberOfTypes; ++i) { - HType ti = kTypes[i]; - CHECK(HType::None().IsSubtypeOf(ti)); - } -} diff --git a/test/cctest/test-types.cc b/test/cctest/test-types.cc index 5d9ade5484..c832f2efe0 100644 --- a/test/cctest/test-types.cc +++ b/test/cctest/test-types.cc @@ -5,7 +5,6 @@ #include #include "src/compiler/types.h" -#include "src/crankshaft/hydrogen-types.h" #include "src/factory.h" #include "src/heap/heap.h" #include "src/isolate.h" diff --git a/test/cctest/test-unique.cc b/test/cctest/test-unique.cc deleted file mode 100644 index 0bc36bafc9..0000000000 --- a/test/cctest/test-unique.cc +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include - -#include "src/v8.h" - -#include "src/crankshaft/unique.h" -#include "src/factory.h" -#include "src/global-handles.h" -// FIXME(mstarzinger, marja): This is weird, but required because of the missing -// (disallowed) include: src/factory.h -> src/objects-inl.h -#include "src/objects-inl.h" -// FIXME(mstarzinger, marja): This is weird, but required because of the missing -// (disallowed) include: src/feedback-vector.h -> -// src/feedback-vector-inl.h -#include "src/feedback-vector-inl.h" -#include "test/cctest/cctest.h" - -using namespace v8::internal; - -#define MAKE_HANDLES_AND_DISALLOW_ALLOCATION \ -Isolate* isolate = CcTest::i_isolate(); \ -Factory* factory = isolate->factory(); \ -HandleScope sc(isolate); \ -Handle handles[] = { \ - factory->InternalizeUtf8String("A"), \ - factory->InternalizeUtf8String("B"), \ - factory->InternalizeUtf8String("C"), \ - factory->InternalizeUtf8String("D"), \ - factory->InternalizeUtf8String("E"), \ - factory->InternalizeUtf8String("F"), \ - factory->InternalizeUtf8String("G") \ -}; \ -DisallowHeapAllocation _disable - -#define MAKE_UNIQUES_A_B_C \ - Unique A(handles[0]); \ - Unique B(handles[1]); \ - Unique C(handles[2]) - -#define MAKE_UNIQUES_A_B_C_D_E_F_G \ - Unique A(handles[0]); \ - Unique B(handles[1]); \ - Unique C(handles[2]); \ - Unique D(handles[3]); \ - Unique E(handles[4]); \ - Unique F(handles[5]); \ - Unique G(handles[6]) - -template -void CheckHashCodeEqual(Unique a, Unique b) { - int64_t hasha = static_cast(a.Hashcode()); - int64_t hashb = static_cast(b.Hashcode()); - CHECK_NE(static_cast(0), hasha); - CHECK_NE(static_cast(0), hashb); - CHECK_EQ(hasha, hashb); -} - - -template -void CheckHashCodeNotEqual(Unique a, Unique b) { - int64_t hasha = static_cast(a.Hashcode()); - int64_t hashb = static_cast(b.Hashcode()); - CHECK_NE(static_cast(0), hasha); - CHECK_NE(static_cast(0), hashb); - CHECK_NE(hasha, hashb); -} - - -TEST(UniqueCreate) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - Handle A = handles[0], B = handles[1]; - - Unique HA(A); - - CHECK(*HA.handle() == *A); - CHECK_EQ(*A, *HA.handle()); - - Unique HA2(A); - - CheckHashCodeEqual(HA, HA2); - CHECK(HA == HA2); - CHECK_EQ(*HA.handle(), *HA2.handle()); - - CHECK(HA2 == HA); - CHECK_EQ(*HA2.handle(), *HA.handle()); - - Unique HB(B); - - CheckHashCodeNotEqual(HA, HB); - CHECK(HA != HB); - CHECK_NE(*HA.handle(), *HB.handle()); - - CHECK(HB != HA); - CHECK_NE(*HB.handle(), *HA.handle()); - - // TODO(titzer): check that Unique properly survives a GC. -} - - -TEST(UniqueSubsume) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - Handle A = handles[0]; - - Unique HA(A); - - CHECK(*HA.handle() == *A); - CHECK_EQ(*A, *HA.handle()); - - Unique HO = HA; // Here comes the subsumption, boys. - - CheckHashCodeEqual(HA, HO); - CHECK(HA == HO); - CHECK_EQ(*HA.handle(), *HO.handle()); - - CHECK(HO == HA); - CHECK_EQ(*HO.handle(), *HA.handle()); -} - - -TEST(UniqueSet_Add) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set = new(&zone) UniqueSet(); - - CHECK_EQ(0, set->size()); - set->Add(A, &zone); - CHECK_EQ(1, set->size()); - set->Add(A, &zone); - CHECK_EQ(1, set->size()); - set->Add(B, &zone); - CHECK_EQ(2, set->size()); - set->Add(C, &zone); - CHECK_EQ(3, set->size()); - set->Add(C, &zone); - CHECK_EQ(3, set->size()); - set->Add(B, &zone); - CHECK_EQ(3, set->size()); - set->Add(A, &zone); - CHECK_EQ(3, set->size()); -} - - -TEST(UniqueSet_Remove) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set = new(&zone) UniqueSet(); - - set->Add(A, &zone); - set->Add(B, &zone); - set->Add(C, &zone); - CHECK_EQ(3, set->size()); - - set->Remove(A); - CHECK_EQ(2, set->size()); - CHECK(!set->Contains(A)); - CHECK(set->Contains(B)); - CHECK(set->Contains(C)); - - set->Remove(A); - CHECK_EQ(2, set->size()); - CHECK(!set->Contains(A)); - CHECK(set->Contains(B)); - CHECK(set->Contains(C)); - - set->Remove(B); - CHECK_EQ(1, set->size()); - CHECK(!set->Contains(A)); - CHECK(!set->Contains(B)); - CHECK(set->Contains(C)); - - set->Remove(C); - CHECK_EQ(0, set->size()); - CHECK(!set->Contains(A)); - CHECK(!set->Contains(B)); - CHECK(!set->Contains(C)); -} - - -TEST(UniqueSet_Contains) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set = new(&zone) UniqueSet(); - - CHECK_EQ(0, set->size()); - set->Add(A, &zone); - CHECK(set->Contains(A)); - CHECK(!set->Contains(B)); - CHECK(!set->Contains(C)); - - set->Add(A, &zone); - CHECK(set->Contains(A)); - CHECK(!set->Contains(B)); - CHECK(!set->Contains(C)); - - set->Add(B, &zone); - CHECK(set->Contains(A)); - CHECK(set->Contains(B)); - - set->Add(C, &zone); - CHECK(set->Contains(A)); - CHECK(set->Contains(B)); - CHECK(set->Contains(C)); -} - - -TEST(UniqueSet_At) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set = new(&zone) UniqueSet(); - - CHECK_EQ(0, set->size()); - set->Add(A, &zone); - CHECK(A == set->at(0)); - - set->Add(A, &zone); - CHECK(A == set->at(0)); - - set->Add(B, &zone); - CHECK(A == set->at(0) || B == set->at(0)); - CHECK(A == set->at(1) || B == set->at(1)); - - set->Add(C, &zone); - CHECK(A == set->at(0) || B == set->at(0) || C == set->at(0)); - CHECK(A == set->at(1) || B == set->at(1) || C == set->at(1)); - CHECK(A == set->at(2) || B == set->at(2) || C == set->at(2)); -} - - -template -static void CHECK_SETS( - UniqueSet* set1, UniqueSet* set2, bool expected) { - CHECK(set1->Equals(set1)); - CHECK(set2->Equals(set2)); - CHECK(expected == set1->Equals(set2)); - CHECK(expected == set2->Equals(set1)); -} - - -TEST(UniqueSet_Equals) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set1 = new(&zone) UniqueSet(); - UniqueSet* set2 = new(&zone) UniqueSet(); - - CHECK_SETS(set1, set2, true); - - set1->Add(A, &zone); - - CHECK_SETS(set1, set2, false); - - set2->Add(A, &zone); - - CHECK_SETS(set1, set2, true); - - set1->Add(B, &zone); - - CHECK_SETS(set1, set2, false); - - set2->Add(C, &zone); - - CHECK_SETS(set1, set2, false); - - set1->Add(C, &zone); - - CHECK_SETS(set1, set2, false); - - set2->Add(B, &zone); - - CHECK_SETS(set1, set2, true); -} - - -TEST(UniqueSet_IsSubset1) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set1 = new(&zone) UniqueSet(); - UniqueSet* set2 = new(&zone) UniqueSet(); - - CHECK(set1->IsSubset(set2)); - CHECK(set2->IsSubset(set1)); - - set1->Add(A, &zone); - - CHECK(!set1->IsSubset(set2)); - CHECK(set2->IsSubset(set1)); - - set2->Add(B, &zone); - - CHECK(!set1->IsSubset(set2)); - CHECK(!set2->IsSubset(set1)); - - set2->Add(A, &zone); - - CHECK(set1->IsSubset(set2)); - CHECK(!set2->IsSubset(set1)); - - set1->Add(B, &zone); - - CHECK(set1->IsSubset(set2)); - CHECK(set2->IsSubset(set1)); -} - - -TEST(UniqueSet_IsSubset2) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C_D_E_F_G; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set1 = new(&zone) UniqueSet(); - UniqueSet* set2 = new(&zone) UniqueSet(); - - set1->Add(A, &zone); - set1->Add(C, &zone); - set1->Add(E, &zone); - - set2->Add(A, &zone); - set2->Add(B, &zone); - set2->Add(C, &zone); - set2->Add(D, &zone); - set2->Add(E, &zone); - set2->Add(F, &zone); - - CHECK(set1->IsSubset(set2)); - CHECK(!set2->IsSubset(set1)); - - set1->Add(G, &zone); - - CHECK(!set1->IsSubset(set2)); - CHECK(!set2->IsSubset(set1)); -} - - -template -static UniqueSet* MakeSet(Zone* zone, int which, Unique* elements) { - UniqueSet* set = new(zone) UniqueSet(); - for (int i = 0; i < 32; i++) { - if ((which & (1 << i)) != 0) set->Add(elements[i], zone); - } - return set; -} - - -TEST(UniqueSet_IsSubsetExhaustive) { - const int kSetSize = 6; - - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C_D_E_F_G; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - Unique elements[] = { - A, B, C, D, E, F, G - }; - - // Exhaustively test all sets with <= 6 elements. - for (int i = 0; i < (1 << kSetSize); i++) { - for (int j = 0; j < (1 << kSetSize); j++) { - UniqueSet* set1 = MakeSet(&zone, i, elements); - UniqueSet* set2 = MakeSet(&zone, j, elements); - - CHECK(((i & j) == i) == set1->IsSubset(set2)); - } - } -} - - -TEST(UniqueSet_Intersect1) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set1 = new(&zone) UniqueSet(); - UniqueSet* set2 = new(&zone) UniqueSet(); - UniqueSet* result; - - CHECK(set1->IsSubset(set2)); - CHECK(set2->IsSubset(set1)); - - set1->Add(A, &zone); - - result = set1->Intersect(set2, &zone); - - CHECK_EQ(0, result->size()); - CHECK(set2->Equals(result)); - - set2->Add(A, &zone); - - result = set1->Intersect(set2, &zone); - - CHECK_EQ(1, result->size()); - CHECK(set1->Equals(result)); - CHECK(set2->Equals(result)); - - set2->Add(B, &zone); - set2->Add(C, &zone); - - result = set1->Intersect(set2, &zone); - - CHECK_EQ(1, result->size()); - CHECK(set1->Equals(result)); -} - - -TEST(UniqueSet_IntersectExhaustive) { - const int kSetSize = 6; - - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C_D_E_F_G; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - Unique elements[] = { - A, B, C, D, E, F, G - }; - - // Exhaustively test all sets with <= 6 elements. - for (int i = 0; i < (1 << kSetSize); i++) { - for (int j = 0; j < (1 << kSetSize); j++) { - UniqueSet* set1 = MakeSet(&zone, i, elements); - UniqueSet* set2 = MakeSet(&zone, j, elements); - - UniqueSet* result = set1->Intersect(set2, &zone); - UniqueSet* expected = MakeSet(&zone, i & j, elements); - - CHECK(result->Equals(expected)); - CHECK(expected->Equals(result)); - } - } -} - - -TEST(UniqueSet_Union1) { - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - UniqueSet* set1 = new(&zone) UniqueSet(); - UniqueSet* set2 = new(&zone) UniqueSet(); - UniqueSet* result; - - CHECK(set1->IsSubset(set2)); - CHECK(set2->IsSubset(set1)); - - set1->Add(A, &zone); - - result = set1->Union(set2, &zone); - - CHECK_EQ(1, result->size()); - CHECK(set1->Equals(result)); - - set2->Add(A, &zone); - - result = set1->Union(set2, &zone); - - CHECK_EQ(1, result->size()); - CHECK(set1->Equals(result)); - CHECK(set2->Equals(result)); - - set2->Add(B, &zone); - set2->Add(C, &zone); - - result = set1->Union(set2, &zone); - - CHECK_EQ(3, result->size()); - CHECK(set2->Equals(result)); -} - - -TEST(UniqueSet_UnionExhaustive) { - const int kSetSize = 6; - - CcTest::InitializeVM(); - MAKE_HANDLES_AND_DISALLOW_ALLOCATION; - MAKE_UNIQUES_A_B_C_D_E_F_G; - - Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME); - - Unique elements[] = { - A, B, C, D, E, F, G - }; - - // Exhaustively test all sets with <= 6 elements. - for (int i = 0; i < (1 << kSetSize); i++) { - for (int j = 0; j < (1 << kSetSize); j++) { - UniqueSet* set1 = MakeSet(&zone, i, elements); - UniqueSet* set2 = MakeSet(&zone, j, elements); - - UniqueSet* result = set1->Union(set2, &zone); - UniqueSet* expected = MakeSet(&zone, i | j, elements); - - CHECK(result->Equals(expected)); - CHECK(expected->Equals(result)); - } - } -}